diff --git a/spaces/101-5/gpt4free/g4f/.v1/testing/deepai_test.py b/spaces/101-5/gpt4free/g4f/.v1/testing/deepai_test.py
deleted file mode 100644
index 474f663ecff8e48b17fc1cdae1ea6e0a89f03c0b..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/g4f/.v1/testing/deepai_test.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from gpt4free import deepai
-
-#single completion
-for chunk in deepai.Completion.create("Write a list of possible vacation destinations:"):
-    print(chunk, end="", flush=True)
-print()
-
-#chat completion
-print("==============")
-messages = [ #taken from the openai docs
-    {"role": "system", "content": "You are a helpful assistant."},
-    {"role": "user", "content": "Who won the world series in 2020?"},
-    {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
-    {"role": "user", "content": "Where was it played?"}
-]
-for chunk in deepai.ChatCompletion.create(messages):
-    print(chunk, end="", flush=True)
-print()
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Apowersoft Video Converter Studio 4.9.1 Crack ((EXCLUSIVE)).md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Apowersoft Video Converter Studio 4.9.1 Crack ((EXCLUSIVE)).md
deleted file mode 100644
index 20e26fcdeb1ae23f8f8b1fbe8ebff1f3ff292879..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Apowersoft Video Converter Studio 4.9.1 Crack ((EXCLUSIVE)).md	
+++ /dev/null
@@ -1,158 +0,0 @@
-<br />
-<h1>Apowersoft Video Converter Studio 4.9.1 Crack: A Powerful and Easy-to-Use Video Converter</h1>
- <p>If you are looking for a simple yet effective way to convert your video and audio files to various formats, you may want to try Apowersoft Video Converter Studio 4.9.1 Crack. This is a cracked version of a popular video converter software that can help you convert, edit, and enhance your media files with ease.</p>
- <p>In this article, we will show you what Apowersoft Video Converter Studio is, how to download and install it, how to use it, and what are its advantages and disadvantages. By the end of this article, you will have a clear idea of whether this software is suitable for your needs or not.</p>
-<h2>Apowersoft Video Converter Studio 4.9.1 Crack</h2><br /><p><b><b>Download File</b> &#8250; <a href="https://byltly.com/2uKxST">https://byltly.com/2uKxST</a></b></p><br /><br />
- <h2>What is Apowersoft Video Converter Studio?</h2>
- <h3>A brief introduction to the software and its features</h3>
- <p>Apowersoft Video Converter Studio is a powerful video converter program that can convert all types of video and audio formats, such as AVI, MP4, FLV, MKV, MP3, WAV, etc. It can also prepare your media files for various devices and platforms, such as YouTube, DVD, iPhone, Android, etc.</p>
- <p>But that's not all. Apowersoft Video Converter Studio also has a built-in video editor that allows you to trim, crop, rotate, add effects, subtitles, watermarks, etc. You can also adjust the video parameters, such as resolution, bitrate, frame rate, etc.</p>
- <p>Moreover, Apowersoft Video Converter Studio has a useful subtitle editor that lets you load external subtitles and edit them freely. You can change the font size, color, position, etc. You can also merge multiple videos into one file with this software.</p>
- <h3>The benefits of using Apowersoft Video Converter Studio</h3>
- <p>There are many benefits of using Apowersoft Video Converter Studio for your video conversion needs. Here are some of them:</p>
-<p>Apowersoft Video Converter Studio 4.9.1 full version download<br />
-How to crack Apowersoft Video Converter Studio 4.9.1<br />
-Apowersoft Video Converter Studio 4.9.1 license key generator<br />
-Apowersoft Video Converter Studio 4.9.1 serial number free<br />
-Apowersoft Video Converter Studio 4.9.1 activation code<br />
-Apowersoft Video Converter Studio 4.9.1 patch download<br />
-Apowersoft Video Converter Studio 4.9.1 keygen torrent<br />
-Apowersoft Video Converter Studio 4.9.1 registration code<br />
-Apowersoft Video Converter Studio 4.9.1 cracked for windows<br />
-Apowersoft Video Converter Studio 4.9.1 cracked for mac<br />
-Apowersoft Video Converter Studio 4.9.1 portable version<br />
-Apowersoft Video Converter Studio 4.9.1 review and features<br />
-Apowersoft Video Converter Studio 4.9.1 alternative software<br />
-Apowersoft Video Converter Studio 4.9.1 vs Wondershare UniConverter<br />
-Apowersoft Video Converter Studio 4.9.1 vs Movavi Video Converter<br />
-Apowersoft Video Converter Studio 4.9.1 vs Freemake Video Converter<br />
-Apowersoft Video Converter Studio 4.9.1 vs Any Video Converter<br />
-Apowersoft Video Converter Studio 4.9.1 vs HandBrake<br />
-Apowersoft Video Converter Studio 4.9.1 vs Format Factory<br />
-Apowersoft Video Converter Studio 4.9.1 vs VLC Media Player<br />
-Apowersoft Video Converter Studio 4.9.1 supported formats and devices<br />
-Apowersoft Video Converter Studio 4.9.1 user guide and tutorial<br />
-Apowersoft Video Converter Studio 4.9.1 system requirements and compatibility<br />
-Apowersoft Video Converter Studio 4.9.1 pros and cons<br />
-Apowersoft Video Converter Studio 4.9.1 price and discount coupon<br />
-How to install and uninstall Apowersoft Video Converter Studio 4.9.1<br />
-How to update and upgrade Apowersoft Video Converter Studio 4.9.1<br />
-How to convert videos with Apowersoft Video Converter Studio 4.9.1<br />
-How to edit videos with Apowersoft Video Converter Studio 4.9.1<br />
-How to download online videos with Apowersoft Video Converter Studio 4.9.1<br />
-How to record screen with Apowersoft Video Converter Studio 4.9.1<br />
-How to burn DVD with Apowersoft Video Converter Studio 4.9.1<br />
-How to merge videos with Apowersoft Video Converter Studio 4.9.1<br />
-How to crop videos with Apowersoft Video Converter Studio 4.9.1<br />
-How to rotate videos with Apowersoft Video Converter Studio 4.9.1<br />
-How to add watermark to videos with Apowersoft Video Converter Studio 4.9.1<br />
-How to add subtitles to videos with Apowersoft Video Converter Studio 4.9.1<br />
-How to extract audio from videos with Apowersoft Video Converter Studio 4.9.</p>
- <ul>
-<li>It can preserve 100% video quality as the original video file.</li>
-<li>It can convert video and audio files at a fast speed.</li>
-<li>It can support multiple languages for subtitles.</li>
-<li>It has a simple and intuitive user interface that is easy to use.</li>
-<li>It has a wide range of output formats and presets for different devices.</li>
-</ul>
- <h2>How to Download and Install Apowersoft Video Converter Studio 4.9.1 Crack?</h2>
- <h3>The steps to download the software from a reliable source</h3>
- <p>If you want to download Apowersoft Video Converter Studio 4.9.1 Crack for free, you need to find a reliable source that offers the cracked version of the software. There are many websites that claim to provide this software for free, but some of them may contain viruses or malware that can harm your computer.</p>
- <p>One of the websites that we recommend is SadeemPC.com, which provides cracked software, games, nulled scripts, WordPress themes and plugins for free. You can download Apowersoft Video Converter Studio 4.9.1 Crack from this website by following these steps:</p>
- <ol>
-<li>Go to <a href="https://www.sadeempc.com/apowersoft-video-converter-studio-crack/">https://www.sadeempc.com/apowersoft-video-converter-studio-crack/</a></li>
-<li>Scroll down until you see the download links section.</li>
-<li>Click on one of the download links (preferably Download Now) and wait for a few seconds.</li>
-<li>You will be redirected to another page where you need to click on another download link (preferably Download Via Torrent).</li>
-<li>You will be redirected again to another page where you need to click on another download link (preferably Download From UploadRAR).</li>
-<li>You will be redirected again to another page where you need to click on another download link (preferably Free Download).</li>
-<li>You will be redirected again to another page where you need to click on another download link (preferably Create Download Link).</li>
-<li>You will be redirected again to another page where you need to click on another download link (preferably Click Here To Download).</li>
-<li>A pop-up window will appear where you need to click on Save File.</li>
-<li>The file will be downloaded to your computer.</li>
-</ol>
- <h3>The steps to install the software and activate it with a serial key</h3>
- <p>After downloading the file from SadeemPC.com, you need to install it on your computer by following these steps:</p>
- <ol>
-<li>Extract the file using WinRAR or any other file extractor.</li>
-<li>Open the extracted folder and run Setup.exe as administrator.</li>
-<li>Follow the installation wizard until it finishes.</li>
-<li>Do not launch the software yet.</li>
-<li>Open the Crack folder and copy all the files inside it.</li>
-<li>Paste them into the installation directory (usually C:\Program Files\Apowersoft\Video Converter Studio).</li>
-<li>Launch the software.</li>
-<li>A pop-up window will appear asking you to register.</li>
-<li>Enter any name and email address.</li>
-<li>Enter one of the serial keys provided in Serial Keys.txt file.</li>
-<li>Click on Register Now.</li>
-<li>The software will be activated successfully.</li>
-</ol>
- <h2>How to Use Apowersoft Video Converter Studio 4.9.1 Crack?</h2>
- <h3>The main interface and functions of the software</h3>
- <p>When you launch Apowersoft Video Converter Studio 4.9.1 Crack for the first time, you will see its main interface which consists of four sections:</p>
- <ul>
-<h3>How to convert video and audio files to various formats</h3>
- <p>One of the main functions of Apowersoft Video Converter Studio 4.9.1 Crack is to convert video and audio files to various formats that you need. You can do this by following these simple steps:</p>
- <ol>
-<li>Select the files that you want to convert from the file list.</li>
-<li>Click on the "Profile" button at the bottom of the interface and choose an output format from the drop-down menu.</li>
-<li>You can also click on the "Edit" button next to the "Profile" button to customize the video parameters, such as resolution, bitrate, frame rate, etc.</li>
-<li>Click on the "Convert" button at the lower right corner of the interface and wait for the conversion process to finish.</li>
-<li>You can check the converted files by clicking on the "Open Folder" button at the bottom of the interface.</li>
-</ol>
- <p>You can also use the "Add to batch" button at the bottom of the interface to add multiple files for batch conversion. This will save you time and effort.</p>
- <h3>How to edit your videos with built-in tools</h3>
- <p>Another function of Apowersoft Video Converter Studio 4.9.1 Crack is to edit your videos with built-in tools. You can do this by following these simple steps:</p>
- <ol>
-<li>Select a file that you want to edit from the file list.</li>
-<li>Click on the "Edit" button on the toolbar and a new window will pop up.</li>
-<li>In this window, you can trim, crop, rotate, add effects, subtitles, watermarks, etc. to your video.</li>
-<li>You can preview your changes in real time by using the play button at the bottom of the window.</li>
-<li>When you are satisfied with your editing, click on the "OK" button at the lower right corner of the window.</li>
-<li>You can also click on the "Reset" button at the lower left corner of the window to undo your changes.</li>
-</ol>
- <p>You can also use the "Edit" button next to the "Profile" button at the bottom of the interface to adjust some basic video parameters, such as brightness, contrast, saturation, etc.</p>
- <h3>How to add external subtitles and audio files</h3>
- <p>A third function of Apowersoft Video Converter Studio 4.9.1 Crack is to add external subtitles and audio files to your videos. You can do this by following these simple steps:</p>
- <ol>
-<li>Select a file that you want to add subtitles or audio files to from the file list.</li>
-<h3>What are the Advantages of Apowersoft Video Converter Studio 4.9.1 Crack?</h3>
- <p>There are many advantages of using Apowersoft Video Converter Studio 4.9.1 Crack for your video conversion and editing needs. Here are some of them:</p>
- <ul>
-<li>It can preserve 100% video quality as the original video file.</li>
-<li>It can convert video and audio files at a fast speed.</li>
-<li>It can support multiple video and audio formats and devices.</li>
-<li>It has a simple and intuitive user interface that is easy to use.</li>
-<li>It has a built-in video editor that allows you to trim, crop, rotate, add effects, subtitles, watermarks, etc.</li>
-<li>It has a useful subtitle editor that lets you load external subtitles and edit them freely.</li>
-<li>It has the ability to merge pieces of videos into one.</li>
-<li>It is free to download and use with a serial key.</li>
-</ul>
- <h3>What are the Disadvantages of Apowersoft Video Converter Studio 4.9.1 Crack?</h3>
- <p>However, there are also some disadvantages of using Apowersoft Video Converter Studio 4.9.1 Crack that you should be aware of. Here are some of them:</p>
- <ul>
-<li>It may contain viruses or malware that can harm your computer if you download it from untrusted sources.</li>
-<li>It may violate the copyright and license agreement of the official software if you use it without permission.</li>
-<li>It may not receive updates and technical support from the official site if you use it illegally.</li>
-</ul>
- <h2>Conclusion</h2>
- <p>In conclusion, Apowersoft Video Converter Studio 4.9.1 Crack is a powerful and easy-to-use video converter software that can help you convert, edit, and enhance your video and audio files with ease. It has many advantages, such as high-quality and fast conversion, multiple formats and devices support, built-in video editor and subtitle editor, etc. However, it also has some disadvantages, such as possible risks of downloading cracked software from untrusted sources, legal and ethical issues of using cracked software without permission, lack of updates and technical support from the official site, etc.</p>
- <p>If you want to try this software for free, you can download it from SadeemPC.com and use one of the serial keys provided in Serial Keys.txt file to activate it. However, we recommend you to buy the official version from Apowersoft.com if you want to enjoy its full features and benefits legally and safely.</p>
- <p>We hope this article has helped you understand what Apowersoft Video Converter Studio 4.9.1 Crack is, how to download and install it, how to use it, and what are its advantages and disadvantages. If you have any questions or feedback, please feel free to leave a comment below.</p>
- <h2>FAQs</h2>
- <p>Here are some frequently asked questions and answers about Apowersoft Video Converter Studio 4.9.1 Crack:</p>
- <ol>
-<li><b>What is the difference between Apowersoft Video Converter Studio 4.9.1 Crack and Apowersoft Video Converter Studio 4.9.1?</b></li>
-<p>The main difference is that Apowersoft Video Converter Studio 4.9.1 Crack is a cracked version of the official software that can be downloaded for free with a serial key from some websites, while Apowersoft Video Converter Studio 4.9.1 is the official version that can be bought from Apowersoft.com with a license code.</p>
-<li><b>Is Apowersoft Video Converter Studio 4.9.1 Crack safe to use?</b></li>
-<p>It depends on where you download it from. If you download it from a reliable source like SadeemPC.com, it may be safe to use. However, if you download it from an untrusted source, it may contain viruses or malware that can harm your computer.</p>
-<li><b>Is Apowersoft Video Converter Studio 4.9.1 Crack legal to use?</b></li>
-<p>No, it is not legal to use Apowersoft Video Converter Studio 4.9.1 Crack without permission from the official site. It may violate the copyright and license agreement of the official software if you use it illegally.</p>
-<li><b>How can I update Apowersoft Video Converter Studio 4.9.1 Crack?</b></li>
-<p>You cannot update Apowersoft Video Converter Studio 4.9.1 Crack manually or automatically because it is not connected to the official site. If you want to update the software, you need to buy the official version from Apowersoft.com or download a newer cracked version from another source.</p>
-<li><b>How can I get technical support for Apowersoft Video Converter Studio 4.9.1 Crack?</b></li>
-<p>You cannot get technical support for Apowersoft Video Converter Studio 4.9.1 Crack from the official site because it is not recognized by them. If you need technical support, you need to buy the official version from Apowersoft.com or contact the source where you downloaded the cracked version.</p>
-</ol>
- </p> 0a6ba089eb<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cubase 8 Free Download Full Version Crack Windows 10 Is It Worth It?.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cubase 8 Free Download Full Version Crack Windows 10 Is It Worth It?.md
deleted file mode 100644
index cd847bcbc90710336ba0bfee62a586380941e9c2..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cubase 8 Free Download Full Version Crack Windows 10 Is It Worth It?.md	
+++ /dev/null
@@ -1,21 +0,0 @@
-<br />
-<h1>How to Get Cubase 8 Free Download Full Version Crack Windows 10</h1>
-<p>If you are looking for a powerful and versatile digital audio workstation, you might be interested in Cubase 8. Cubase 8 is a software that allows you to record, edit, mix and produce music of any genre and style. It has a user-friendly interface, a large collection of instruments and effects, and advanced features such as VST Connect SE, Chord Track and Chord Pads.</p>
-<h2>cubase 8 free download full version crack windows 10</h2><br /><p><b><b>Download Zip</b> &#9989; <a href="https://byltly.com/2uKz5L">https://byltly.com/2uKz5L</a></b></p><br /><br />
-<p>However, Cubase 8 is not a cheap software. The full version costs $549.99, which might be too expensive for some users. That's why some people look for ways to get Cubase 8 free download full version crack Windows 10. A crack is a modified version of the software that bypasses the activation process and lets you use it without paying.</p>
-<p>But is it really worth it to get Cubase 8 free download full version crack Windows 10? In this article, we will discuss the pros and cons of using a cracked version of Cubase 8, and how to get it safely and legally.</p>
-<h2>The Pros of Using Cubase 8 Free Download Full Version Crack Windows 10</h2>
-<p>The main advantage of using a cracked version of Cubase 8 is that you can save money. You don't have to spend hundreds of dollars to get the full features and functionality of the software. You can enjoy all the benefits of Cubase 8 without breaking the bank.</p>
-<p>Another advantage is that you can access the software anytime and anywhere. You don't have to worry about online activation or registration. You can use Cubase 8 offline and on any computer you want. You can also share it with your friends and colleagues without any restrictions.</p>
-<p></p>
-<h2>The Cons of Using Cubase 8 Free Download Full Version Crack Windows 10</h2>
-<p>However, using a cracked version of Cubase 8 also has some drawbacks. The first one is that you are violating the intellectual property rights of the software developer. By using a crack, you are stealing their work and depriving them of their rightful income. This is not only unethical but also illegal. You could face legal consequences if you are caught using or distributing a cracked version of Cubase 8.</p>
-<p>The second drawback is that you are risking your computer's security and performance. A crack is usually created by hackers or malicious users who might insert viruses, malware or spyware into the software. These could harm your computer, steal your personal information, or damage your files. You could also experience crashes, errors, or glitches while using a cracked version of Cubase 8. You might lose your work or compromise your projects.</p>
-<p>The third drawback is that you are missing out on updates and support. A cracked version of Cubase 8 is not compatible with the official updates and patches released by the software developer. These updates are important to fix bugs, improve performance, and add new features and enhancements. You could also miss out on technical support and customer service if you encounter any problems or issues with the software.</p>
-<h2>How to Get Cubase 8 Free Download Full Version Crack Windows 10 Safely and Legally</h2>
-<p>So, how can you get Cubase 8 free download full version crack Windows 10 without risking your computer's security and performance, violating the law, or missing out on updates and support? The answer is simple: you can't.</p>
-<p>There is no safe and legal way to get a cracked version of Cubase 8. The only way to get the full version of Cubase 8 is to buy it from the official website or an authorized dealer. This way, you can ensure that you are getting a genuine and reliable product that will meet your needs and expectations.</p>
-<p>However, if you are still hesitant to spend money on Cubase 8, there are some alternatives that you can try. For example, you can download the trial version of Cubase 8 from the official website. The trial version lets you use the software for 30 days for free. You can test all the features and functions of Cubase 8 and see if it suits your preferences and requirements.</p>
-<p>Another option is to use a free or cheaper digital audio workstation that has similar capabilities as Cubase 8. Some examples are Audacity, Reaper, LMMS, Ardour, or GarageBand. These software are either free or low-cost, but they still offer a range of tools and functions for music</p> ddb901b051<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cyberghost Vpn Serial Giveaway.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cyberghost Vpn Serial Giveaway.md
deleted file mode 100644
index 7e5a6c6b3d98bb260b940ee51cd33fa16d8fdd5c..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cyberghost Vpn Serial Giveaway.md	
+++ /dev/null
@@ -1,33 +0,0 @@
-<br />
-<h1>How to Get a Free Cyberghost VPN Serial Key for 12 Months</h1>
-<p>If you are looking for a reliable and secure VPN service that can protect your online privacy and unblock geo-restricted content, you might be interested in Cyberghost VPN. Cyberghost VPN is one of the most popular VPNs in the world, with over 30 million users and 3000+ servers in 60+ countries. Cyberghost VPN offers a strict no-logs policy, high-level encryption, Wi-Fi protection, malware blocking, and access to streaming services like Netflix, Hulu, BBC iPlayer, and more.</p>
-<h2>Cyberghost Vpn Serial Giveaway</h2><br /><p><b><b>DOWNLOAD</b> ->>> <a href="https://byltly.com/2uKx8W">https://byltly.com/2uKx8W</a></b></p><br /><br />
-<p>But what if you don't want to pay for a VPN subscription? Well, you are in luck because we have an exclusive giveaway for our readers. You can get a free Cyberghost VPN serial key that will activate your premium account for 12 months and allow you to use the service on up to 7 devices simultaneously. That's a value of $69.99 that you can get for free!</p>
-<p>How to participate in the giveaway? It's very simple. Just follow these steps:</p>
-<ol>
-<li>Complete this survey: https://cyberghostvpn.typeform.com/to/cI1IEO</li>
-<li>Enter your name and email address in the widget below and click on "Enter".</li>
-<li>Wait for the giveaway to end on Monday, August 11 at at 15:00 UTC (8:00 a.m. PDT/PST).</li>
-<li>Check your email inbox for the Cyberghost VPN serial key and instructions on how to activate it.</li>
-</ol>
-<p>That's it! You have just entered the giveaway and have a chance to win a free Cyberghost VPN serial key. Don't miss this opportunity to enjoy one of the best VPNs in the market for free. Good luck!</p>
-
-<p>Why should you use Cyberghost VPN? There are many benefits of using a VPN service, especially in today's digital world where online threats and censorship are rampant. Here are some of the reasons why you should use Cyberghost VPN:</p>
-<ul>
-<li>You can hide your real IP address and location from prying eyes and hackers. This way, you can surf the web anonymously and securely, without leaving any traces or exposing your personal data.</li>
-<li>You can bypass geo-restrictions and access any website or service that is blocked in your country or region. For example, you can watch Netflix US from anywhere in the world, or access social media platforms like Facebook and Twitter in countries where they are banned.</li>
-<li>You can protect your online transactions and conversations from eavesdroppers and cybercriminals. Cyberghost VPN encrypts your traffic with AES-256 bit encryption, the same standard used by the military and banks. This means that no one can intercept or tamper with your data, even on public Wi-Fi networks.</li>
-<li>You can enjoy faster and smoother online streaming and gaming. Cyberghost VPN has optimized servers for various streaming platforms and games, ensuring that you get the best possible performance and quality. You can also avoid bandwidth throttling and ISP snooping that can slow down your connection.</li>
-</ul>
-<p>How to use Cyberghost VPN? Using Cyberghost VPN is very easy and intuitive. You don't need any technical skills or knowledge to use it. Here's how to use Cyberghost VPN:</p>
-<p></p>
-<ol>
-<li>Download and install the Cyberghost VPN app on your device. You can find it on the official website or on the app store of your device.</li>
-<li>Launch the app and log in with your Cyberghost VPN serial key or create a free account if you don't have one.</li>
-<li>Select the VPN profile that suits your needs. You can choose from Surf Anonymously, Unblock Streaming, Network / Wi-Fi Protection, Torrent Anonymously, or Custom.</li>
-<li>Click on the Connect button and wait for the app to establish a secure connection to a VPN server.</li>
-<li>Enjoy your online freedom and privacy with Cyberghost VPN!</li>
-</ol>
-<p>Conclusion Cyberghost VPN is one of the best VPN services that you can use to protect your online privacy and unblock geo-restricted content. It offers a strict no-logs policy, high-level encryption, Wi-Fi protection, malware blocking, and access to streaming services like Netflix, Hulu, BBC iPlayer, and more. And now, you have a chance to get a free Cyberghost VPN serial key that will activate your premium account for 12 months and allow you to use the service on up to 7 devices simultaneously. Don't miss this opportunity to enjoy one of the best VPNs in the market for free. Enter the giveaway now and good luck!</p> cec2833e83<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fizikos Uzdavinynas 10 Kl Pdf 37 _VERIFIED_.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fizikos Uzdavinynas 10 Kl Pdf 37 _VERIFIED_.md
deleted file mode 100644
index 0ad5a4a9809325647153e37c529c22446afbd783..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fizikos Uzdavinynas 10 Kl Pdf 37 _VERIFIED_.md	
+++ /dev/null
@@ -1,22 +0,0 @@
-
-<h1>Fizikos Uzdavinynas 10 Kl PDF 37: A Useful Resource for Physics Students</h1>
-<p>If you are a student of physics in the 10th grade, you might be looking for a good problem book to practice your skills and prepare for exams. One of the best options available online is the fizikos uzdavinynas 10 kl pdf 37, which is a collection of physics problems and solutions for the 10th grade curriculum in Lithuania.</p>
-<h2>fizikos uzdavinynas 10 kl pdf 37</h2><br /><p><b><b>DOWNLOAD</b> &#128279; <a href="https://byltly.com/2uKvEI">https://byltly.com/2uKvEI</a></b></p><br /><br />
-<p>The fizikos uzdavinynas 10 kl pdf 37 covers all the main topics of physics, such as mechanics, thermodynamics, electricity, magnetism, optics, and modern physics. It contains over 300 problems of varying difficulty levels, from simple exercises to challenging puzzles. The problems are arranged by topic and subtopic, and each problem has a detailed solution with explanations and diagrams.</p>
-<p>The fizikos uzdavinynas 10 kl pdf 37 is a great resource for physics students who want to improve their understanding of the subject and test their knowledge. It can also help teachers who need some extra material for their classes or homework assignments. The fizikos uzdavinynas 10 kl pdf 37 is available for free download from the website of the Lithuanian Ministry of Education and Science.</p>
-<p>To download the fizikos uzdavinynas 10 kl pdf 37, you just need to follow these simple steps:</p>
-<ol>
-<li>Go to the website of the Lithuanian Ministry of Education and Science at https://www.smm.lt/</li>
-<li>Click on the tab "Mokymo priemonės" (Teaching tools) on the top menu.</li>
-<li>Scroll down to the section "Fizika" (Physics) and click on the link "Fizikos uždavinių rinkinys X klasei" (Physics problem set for the 10th grade).</li>
-<li>You will see a list of files with different formats and languages. Choose the one that says "PDF (Lietuvių)" (PDF in Lithuanian).</li>
-<li>Click on the download icon next to the file name and save it to your device.</li>
-</ol>
-<p>That's it! You now have access to the fizikos uzdavinynas 10 kl pdf 37, a useful resource for physics students. Enjoy solving the problems and learning more about physics!</p>
-
-<p>The fizikos uzdavinynas 10 kl pdf 37 is not only a problem book, but also a learning tool. It can help you review the concepts and formulas of physics, as well as develop your problem-solving skills and logical thinking. The problems are designed to stimulate your curiosity and creativity, and to challenge you to apply your knowledge in different situations.</p>
-<p></p>
-<p>The fizikos uzdavinynas 10 kl pdf 37 is also a reliable source of information. It is based on the official physics curriculum for the 10th grade in Lithuania, which follows the international standards and recommendations. The problems and solutions are written by experienced physics teachers and experts, who have checked the accuracy and clarity of the content.</p>
-<p>The fizikos uzdavinynas 10 kl pdf 37 is suitable for students of any level of physics proficiency. Whether you are a beginner or an advanced learner, you will find something useful and interesting in this book. You can use it as a self-study guide, a supplement to your textbook, or a preparation for exams. You can also use it as a fun and educational activity to do with your friends or family.</p> cec2833e83<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Antenna Web Design Studio 6.57 REPACK Crack.md b/spaces/1gistliPinn/ChatGPT4/Examples/Antenna Web Design Studio 6.57 REPACK Crack.md
deleted file mode 100644
index efcd95708cbdce1b35354e5c1f85ff13570161c0..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Antenna Web Design Studio 6.57 REPACK Crack.md	
+++ /dev/null
@@ -1,96 +0,0 @@
-<br />
-<h1>Antenna Web Design Studio 6.57 Crack: A Review</h1>
-<p>If you are looking for a software that can help you create your own website without coding, you might want to check out Antenna Web Design Studio 6.57 Crack. This is a powerful and easy-to-use web design tool that lets you design your web pages with drag and drop, transparent layers, master pages, and more. In this article, we will review the features, benefits, and drawbacks of Antenna Web Design Studio 6.57 Crack.</p>
-
-<h2>Features of Antenna Web Design Studio 6.57 Crack</h2>
-<p>Antenna Web Design Studio 6.57 Crack has many features that make it a great choice for web designers of all levels. Some of the main features are:</p>
-<h2>Antenna Web Design Studio 6.57 Crack</h2><br /><p><b><b>Download File</b> &#10038;&#10038;&#10038; <a href="https://imgfil.com/2uxZlp">https://imgfil.com/2uxZlp</a></b></p><br /><br />
-<ul>
-<li>No need for technical knowledge of HTML and CSS: You can design your web pages visually with Antenna Web Design Studio 6.57 Crack, without writing any code. You can also edit the HTML and CSS code if you want to customize your pages further.</li>
-<li>Create dual layer layouts for desktop and mobile versions: You can design your website for both desktop and mobile devices with Antenna Web Design Studio 6.57 Crack. You can create two different layouts for each page, and switch between them easily. This way, you can ensure that your website looks good and works well on any device.</li>
-<li>Very high speed in creating pages: You can create your web pages in minutes with Antenna Web Design Studio 6.57 Crack, thanks to its intuitive interface and powerful editor. You can drag and drop your images, text, videos, buttons, and animations from anywhere on your webpage, and adjust them as you like.</li>
-<li>Possibility to create gallery of images: You can also create beautiful photo galleries for your web pages with Antenna Web Design Studio 6.57 Crack. You can choose from different styles and effects for your galleries, and add captions and links to your images.</li>
-<li>Benefit from the very powerful and beautiful graphics capabilities: Antenna Web Design Studio 6.57 Crack also has a built-in graphics editor that lets you create stunning graphics for your website. You can draw shapes, gradients, shadows, textures, and more with Antenna Web Design Studio 6.57 Crack.</li>
-<li>Take advantage of the complete layering system of your web pages: Antenna Web Design Studio 6.57 Crack also allows you to use transparent layers to create complex layouts for your web pages. You can stack multiple layers on top of each other, and change their opacity, position, size, and rotation.</li>
-<li>Ability to design CSS with CSS Styles Editor: Antenna Web Design Studio 6.57 Crack also has a CSS Styles Editor that lets you create and edit CSS styles for your web pages. You can apply different styles to different elements on your page, and preview the results instantly.</li>
-</ul>
-
-<h2>Benefits of Antenna Web Design Studio 6.57 Crack</h2>
-<p>Antenna Web Design Studio 6.57 Crack has many benefits that make it a worthwhile software to use for web design. Some of the main benefits are:</p>
-<ul>
-<li>It is easy to use: Antenna Web Design Studio 6.57 Crack has a user-friendly interface that makes it easy to navigate and use. You can access all the tools and options from the main menu or the toolbar, and get help from the online tutorials and support.</li>
-<li>It is flexible: Antenna Web Design Studio 6.57 Crack gives you full control over your web design, allowing you to customize every aspect of your web pages. You can also import and export your files in various formats, such as HTML, JPG, PNG, GIF, SVG, PDF, etc.</li>
-<li>It is affordable: Antenna Web Design Studio 6.57 Crack is a low-cost software that offers a lot of value for its price. You can download it for free from various websites, or buy it for a reasonable price from the official website.</li>
-<li>It is compatible: Antenna Web Design Studio 6.57 Crack is compatible with Windows 7/8/8.1/10 operating systems, and works well with most browsers and devices.</li>
-</ul>
-
-<h2>Drawbacks of Antenna Web Design Studio 6.57 Crack</h2>
-<p>Antenna Web Design Studio 6.57 Crack is not a perfect software, and it has some drawbacks that you should be aware of before using it. Some of the main drawbacks are:</p>
-<ul>
-<li>It is illegal: Antenna Web Design Studio 6.57 Crack is a cracked version of the original software, which means that it is not authorized by the developer or the distributor. Using cracked software is illegal and unethical, and it may expose you to legal issues or malware infections.</li>
-<li>It is unstable: Antenna Web Design Studio 6.57 Crack may not work properly or crash frequently due to its cracked nature. It may also have bugs or errors that affect its performance or functionality.</li>
-<li>It is outdated: Antenna Web Design Studio 6.57 Crack is an old version of the software that may not have the latest features or updates that the original software has.</li>
-<li>It is unsupported: Antenna Web Design Studio 6.57 Crack does not have any official support or customer service from the developer or the distributor. If you encounter any problems or issues with the software, you will not be able to get any help or assistance.</li>
-</ul>
-
-<h2>Conclusion</h2>
-<p>Antenna Web Design Studio 6.57 Crack is a powerful and easy-to-use web design software that lets you create your own website without coding. It has many features, benefits, and drawbacks that you should consider before using it.</p>
-
-<p>If you are looking for a legal, stable, updated, and supported version of the software, you should buy Antenna Web Design Studio from the official website or a trusted vendor.</p>
-
-<p>If you are looking for an alternative web design software that is free and open source, you should try WordPress.org or Wix.com.</p>
-
-<p>If you are looking for a professional web design service that can create a custom website for you at an affordable price, you should contact us today.</p>
-<h2>How to Download and Install Antenna Web Design Studio 6.57 Crack</h2>
-<p>If you want to try Antenna Web Design Studio 6.57 Crack for yourself, you can download it from various websites that offer cracked software. However, we do not recommend this option, as it is illegal and risky. You may end up downloading a virus or malware that can harm your computer or steal your personal information.</p>
-
-<p>The best way to download and install Antenna Web Design Studio 6.57 Crack is to buy it from the official website or a trusted vendor. This way, you can get a legal, stable, updated, and supported version of the software that will work as intended.</p>
-<p></p>
-
-<p>To buy Antenna Web Design Studio 6.57 Crack, you need to follow these steps:</p>
-<ol>
-<li>Go to the official website of Antenna Web Design Studio at https://www.stormdance.net/</li>
-<li>Click on the "Buy Now" button and choose your preferred payment method.</li>
-<li>After completing the payment, you will receive an email with your license key and a download link.</li>
-<li>Click on the download link and save the setup file on your computer.</li>
-<li>Run the setup file and follow the instructions to install Antenna Web Design Studio 6.57 Crack on your computer.</li>
-<li>Enter your license key when prompted to activate the software.</li>
-<li>Enjoy creating your own website with Antenna Web Design Studio 6.57 Crack.</li>
-</ol>
-
-<h2>Tips and Tricks for Using Antenna Web Design Studio 6.57 Crack</h2>
-<p>Antenna Web Design Studio 6.57 Crack is a versatile and powerful web design software that can help you create amazing websites with ease. However, there are some tips and tricks that can help you make the most out of it and improve your web design skills. Here are some of them:</p>
-<ul>
-<li>Use master pages: Master pages are templates that you can use to create consistent layouts for your web pages. You can define common elements such as headers, footers, menus, logos, etc. on your master pages, and then apply them to your web pages. This way, you can save time and ensure uniformity across your website.</li>
-<li>Use layers: Layers are transparent containers that you can use to arrange your web page elements in different positions and depths. You can stack multiple layers on top of each other, and change their opacity, position, size, and rotation. This way, you can create complex and dynamic layouts for your web pages.</li>
-<li>Use styles: Styles are sets of formatting rules that you can apply to different elements on your web page, such as text, images, links, etc. You can create and edit styles with the CSS Styles Editor in Antenna Web Design Studio 6.57 Crack. This way, you can change the appearance of your web page elements easily and consistently.</li>
-<li>Use graphics: Graphics are visual elements that you can create or import in Antenna Web Design Studio 6.57 Crack. You can use graphics to enhance the look and feel of your website, such as backgrounds, buttons, icons, logos, etc. You can also use the built-in graphics editor in Antenna Web Design Studio 6.57 Crack to create stunning graphics for your website.</li>
-<li>Use galleries: Galleries are collections of images that you can display on your web pages in different ways. You can choose from different styles and effects for your galleries, such as slideshows, lightboxes, carousels, etc. You can also add captions and links to your images in your galleries.</li>
-</ul>
-
-<h2>Conclusion</h2>
-<p>Antenna Web Design Studio 6.57 Crack is a powerful and easy-to-use web design software that lets you create your own website without coding. It has many features, benefits, and drawbacks that you should consider before using it.</p>
-
-<p>If you are looking for a legal, stable, updated, and supported version of the software, you should buy Antenna Web Design Studio from the official website or a trusted vendor.</p>
-
-<p>If you are looking for an alternative web design software that is free and open source, you should try WordPress.org or Wix.com.</p>
-
-<p>If you are looking for a professional web design service that can create a custom website for you at an affordable price, you should contact us today.</p>
-
-<p>We hope this article has helped you learn more about Antenna Web Design Studio 6.57 Crack and how to use it effectively.</p>
-
-<p>Thank you for reading!</p>
-<h2>Conclusion</h2>
-<p>Antenna Web Design Studio 6.57 Crack is a powerful and easy-to-use web design software that lets you create your own website without coding. It has many features, benefits, and drawbacks that you should consider before using it.</p>
-
-<p>If you are looking for a legal, stable, updated, and supported version of the software, you should buy Antenna Web Design Studio from the official website or a trusted vendor.</p>
-
-<p>If you are looking for an alternative web design software that is free and open source, you should try WordPress.org or Wix.com.</p>
-
-<p>If you are looking for a professional web design service that can create a custom website for you at an affordable price, you should contact us today.</p>
-
-<p>We hope this article has helped you learn more about Antenna Web Design Studio 6.57 Crack and how to use it effectively.</p>
-
-<p>Thank you for reading!</p> 3cee63e6c2<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Folder Marker Pro 4.0 UPDATED Crack.md b/spaces/1gistliPinn/ChatGPT4/Examples/Folder Marker Pro 4.0 UPDATED Crack.md
deleted file mode 100644
index d521aa625ce374b61ca68ba1d937438c65338a5a..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Folder Marker Pro 4.0 UPDATED Crack.md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>Folder Marker Pro 4.0 Crack</h2><br /><p><b><b>Download Zip</b> &#10004; <a href="https://imgfil.com/2uxXNN">https://imgfil.com/2uxXNN</a></b></p><br /><br />
-
- d5da3c52bf<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apex Racing How to Race and Drift with Unlimited Money MOD.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apex Racing How to Race and Drift with Unlimited Money MOD.md
deleted file mode 100644
index 9cd67986e66d6bd817df0c741211c5447436d105..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apex Racing How to Race and Drift with Unlimited Money MOD.md	
+++ /dev/null
@@ -1,117 +0,0 @@
-
-<h1>Apex Racing Mod: A Realistic and Fun Racing Game for Android</h1>
-<p>If you are a fan of racing and drifting games, you might want to check out Apex Racing Mod, a game that offers highly realistic vehicle simulation, multiplayer and single player modes, customizable cars and tracks, leaderboard and achievements, and more. In this article, we will tell you what Apex Racing Mod is, how to download and install it, and how to play it.</p>
-<h2>apex racing mod</h2><br /><p><b><b>Download File</b> &#9675; <a href="https://urlin.us/2uSUk9">https://urlin.us/2uSUk9</a></b></p><br /><br />
- <h2>What is Apex Racing Mod?</h2>
-<p>Apex Racing Mod is a modified version of Apex Racing, a racing and drifting game developed by Mobiplay Games. The modded version gives you unlimited money, which you can use to buy and upgrade your cars, unlock new tracks, and access premium features. The modded version also removes ads and other restrictions from the original game.</p>
- <h3>Features of Apex Racing Mod</h3>
-<p>Apex Racing Mod has many features that make it one of the best racing games for Android. Here are some of them:</p>
- <h4>Realistic vehicle simulation</h4>
-<p>Apex Racing Mod uses advanced physics and graphics to create realistic car movements, collisions, damages, sounds, and effects. You can feel the difference between different car models, engines, tires, suspensions, brakes, and more. You can also customize your car's appearance, performance, and handling to suit your preferences.</p>
-<p>apex racing mod apk download<br />
-apex racing mod unlimited money<br />
-apex racing mod realistic simulation<br />
-apex racing mod multiplayer mode<br />
-apex racing mod single player mode<br />
-apex racing mod latest version<br />
-apex racing mod android game<br />
-apex racing mod free game<br />
-apex racing mod offline game<br />
-apex racing mod online game<br />
-apex racing mod drift game<br />
-apex racing mod car game<br />
-apex racing mod best game<br />
-apex racing mod fun game<br />
-apex racing mod challenging game<br />
-apex racing mod global leaderboard<br />
-apex racing mod review<br />
-apex racing mod guide<br />
-apex racing mod tips<br />
-apex racing mod tricks<br />
-apex racing mod cheats<br />
-apex racing mod hack<br />
-apex racing mod gameplay<br />
-apex racing mod features<br />
-apex racing mod graphics<br />
-apex racing mod sound<br />
-apex racing mod controls<br />
-apex racing mod customization<br />
-apex racing mod vehicles<br />
-apex racing mod tracks<br />
-apex racing mod maps<br />
-apex racing mod modes<br />
-apex racing mod levels<br />
-apex racing mod missions<br />
-apex racing mod challenges<br />
-apex racing mod achievements<br />
-apex racing mod rewards<br />
-apex racing mod coins<br />
-apex racing mod gems<br />
-apex racing mod upgrades<br />
-apex racing mod skins<br />
-apex racing mod decals<br />
-apex racing mod spoilers<br />
-apex racing mod tires<br />
-apex racing mod engines<br />
-apex racing mod nitro<br />
-apex racing mod speedometer <br />
-apex racing mod camera angle <br />
-apex racing mod steering wheel</p>
- <h4>Multiplayer and single player modes</h4>
-<p>Apex Racing Mod supports both online and offline gameplay. You can race against other players from around the world in real-time multiplayer mode, or challenge yourself in single player mode. You can choose from different game modes, such as race, drift, time trial, elimination, and more. You can also join or create your own racing club and compete with other clubs.</p>
- <h4>Customizable cars and tracks</h4>
-<p>Apex Racing Mod has a variety of cars and tracks to choose from. You can unlock over 50 cars from different categories, such as sports, muscle, classic, supercars, and more. You can also unlock over 20 tracks from different locations, such as city streets, highways, deserts, mountains, snowfields, and more. You can also create your own tracks using the track editor.</p>
- <h4>Leaderboard and achievements</h4>
-<p>Apex Racing Mod has a global leaderboard that shows your ranking among other players based on your performance in multiplayer mode. You can also earn achievements by completing various tasks and challenges in the game. You can share your progress and achievements with your friends on social media.</p>
- <h2>How to download and install Apex Racing Mod?</h2>
-<p>If you want to download and install Apex Racing Mod on your Android device, you need to follow these steps:</p>
- <h3>Requirements for Apex Racing Mod</h3>
-<p>Before you download and install Apex Racing Mod, you need to make sure that your device meets these requirements:</p>
-<ul>
-<li>Your device must have Android 5.0 or higher.</li>
-<li>Your device must have at least 200 MB of free storage space.</li>
-<li>Your device must have a stable internet connection.</li>
-<li>You must enable unknown sources in your device's settings.</li>
-</ul>
- <h3>Steps to download and install Apex Racing Mod</h3>
-<p>After you have checked the requirements, you can follow these steps to download and install Apex Racing Mod:</p>
-<ol>
-<li>Go to this link to download the APK file of Apex Racing Mod.</li>
-<li>Once the download is complete, locate the APK file in your device's file manager and tap on it to install it.</li>
-<li>Wait <p>Wait for the installation to finish and grant the necessary permissions to the app.</li>
-<li>Launch the app and enjoy playing Apex Racing Mod with unlimited money and no ads.</li>
-</ol>
- <h2>How to play Apex Racing Mod?</h2>
-<p>Now that you have downloaded and installed Apex Racing Mod, you might be wondering how to play it. Here are some tips and tricks to help you get started:</p>
- <h3>Controls and gameplay of Apex Racing Mod</h3>
-<p>Apex Racing Mod has simple and intuitive controls that you can customize according to your preference. You can use the on-screen buttons or tilt your device to steer your car. You can also use the brake, accelerator, handbrake, and nitro buttons to control your speed and drift. You can switch between different camera angles to get a better view of the road.</p>
- <p>The gameplay of Apex Racing Mod is fast-paced and exciting. You can choose from different game modes, such as race, drift, time trial, elimination, and more. You can also select your car and track from the available options or create your own. You can earn money by winning races, drifting, performing stunts, and completing challenges. You can use the money to buy and upgrade your cars, unlock new tracks, and access premium features.</p>
- <h3>Tips and tricks for Apex Racing Mod</h3>
-<p>If you want to improve your skills and performance in Apex Racing Mod, you can follow these tips and tricks:</p>
-<ul>
-<li>Practice on different tracks and cars to get familiar with their characteristics and handling.</li>
-<li>Use the nitro wisely. Don't waste it on straight roads or when you are already ahead of your opponents. Save it for tight corners or when you need a boost.</li>
-<li>Drift as much as possible. Drifting not only gives you more money, but also fills up your nitro meter faster.</li>
-<li>Avoid crashing into other cars or obstacles. Crashing will damage your car and slow you down. It will also reduce your money and score.</li>
-<li>Upgrade your car regularly. Upgrading your car will improve its performance, speed, handling, durability, and appearance.</li>
-</ul>
- <h2>Conclusion</h2>
-<p>Apex Racing Mod is a fun and realistic racing game for Android that you can download and play for free. It has many features that make it one of the best racing games for Android, such as realistic vehicle simulation, multiplayer and single player modes, customizable cars and tracks, leaderboard and achievements, and more. It also gives you unlimited money, which you can use to buy and upgrade your cars, unlock new tracks, and access premium features. It also removes ads and other restrictions from the original game.</p>
- <p>If you are looking for a racing game that will challenge your skills and entertain you for hours, you should try Apex Racing Mod. It is easy to download and install, and easy to play. You can race against other players from around the world or challenge yourself in single player mode. You can also create your own tracks using the track editor. You can share your progress and achievements with your friends on social media.</p>
- <p>Apex Racing Mod is a game that will make you feel like a real racer. Download it now and enjoy the thrill of racing!</p>
- <h2>FAQs</h2>
-<p>Here are some frequently asked questions about Apex Racing Mod:</p>
- <ol>
-<li><b>Is Apex Racing Mod safe to download and install?</b></li>
-<p>Yes, Apex Racing Mod is safe to download and install. It does not contain any viruses or malware that could harm your device or data. However, you should always download it from a trusted source and enable unknown sources in your device's settings before installing it.</p>
- <li><b>Do I need to root my device to play Apex Racing Mod?</b></li>
-<p>No, you do not need to root your device to play Apex Racing Mod. The modded version works on both rooted and non-rooted devices.</p>
- <li><b>Can I play Apex Racing Mod offline?</b></li>
-<p>Yes, you can play Apex Racing Mod offline. However, some features may not be available or updated when you play offline, such as multiplayer mode, leaderboard, achievements, etc.</p>
- <li><b>How can I update Apex Racing Mod?</b></li>
-<p>To update Apex Racing Mod, you need to download the latest version of the APK file and install it over the existing one. You do not need to uninstall the previous version or lose your progress.</p>
- <li><b>How can I contact the developer of Apex Racing Mod?</b></li>
-<p>If you have any questions or feedback about Apex Racing Mod, you can contact the developer of the original game (Mobiplay Games) through their email <p>If you have any questions or feedback about Apex Racing Mod, you can contact the developer of the original game (Mobiplay Games) through their email address: mobiplaygames@gmail.com. You can also visit their website or follow them on Facebook and Twitter for more updates and news.</p>
- : https://apkrace.com/apex-racing-mod-apk/ : https://mobiplaygames.com/ : https://www.facebook.com/mobiplaygames : https://twitter.com/mobiplaygames</p> 197e85843d<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Build and Battle with Your Favorite Cookies in Cookie Run Kingdom Online.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Build and Battle with Your Favorite Cookies in Cookie Run Kingdom Online.md
deleted file mode 100644
index 1c1a96b5720b78a1e9976f1db54c90059914cf31..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Build and Battle with Your Favorite Cookies in Cookie Run Kingdom Online.md	
+++ /dev/null
@@ -1,93 +0,0 @@
-
-<h1>Cookie Run Kingdom: How to Play Online Without Downloading</h1>
-<p>Do you love cookies and RPG games? If so, you might want to try Cookie Run Kingdom, a popular game that lets you build your own kingdom of cookies and fight against evil forces. But what if you don't want to download the game on your device? Don't worry, there are ways to play Cookie Run Kingdom online without downloading. In this article, we will show you how to do that and why you might want to try it.</p>
-<h2>cookie run kingdom no download</h2><br /><p><b><b>Download</b> &#127775; <a href="https://urlin.us/2uSUU0">https://urlin.us/2uSUU0</a></b></p><br /><br />
-<h2>What is Cookie Run Kingdom?</h2>
-<p>Cookie Run Kingdom is a builder and battle RPG game developed by Devsisters Corporation. It was released in January 2021 and has been downloaded over 10 million times on Google Play and App Store. The game is also available on PC, Mac, and browser platforms.</p>
-<h3>A builder and battle RPG game</h3>
-<p>In Cookie Run Kingdom, you can create your own kingdom of cookies by building various structures, such as houses, farms, shops, and factories. You can also recruit different types of cookies, each with their own skills and abilities, to join your team. You can then use your team of cookies to fight against enemies in various modes, such as story mode, tower mode, arena mode, and guild mode. You can also upgrade your cookies and equip them with items to make them stronger.</p>
-<h3>A cookie-themed adventure with a rich story</h3>
-<p>Cookie Run Kingdom is not just a game of building and fighting. It also has a rich story that unfolds as you progress through the game. You will discover the secrets of the cookie world, meet various characters, and face different challenges. The game has over 200 story levels, each with its own cutscenes and dialogues. The game also features voice acting from famous actors, such as Tom Kenny, Tara Strong, Cristina Vee, and more.</p>
-<h3>A multiplayer game with guilds and events</h3>
-<p>Cookie Run Kingdom is also a multiplayer game that lets you interact with other players from around the world. You can join or create a guild with other players and cooperate with them in guild missions, raids, wars, and chat. You can also compete with other players in the arena mode and rank up in the leaderboard. The game also has regular events that offer special rewards and challenges.</p>
-<h2>Why play Cookie Run Kingdom online without downloading?</h2>
-<p>Cookie Run Kingdom is a fun and addictive game that you can enjoy on your device. However, there are some reasons why you might want to play it online without downloading. Here are some of them:</p>
-<h3>Save storage space on your device</h3>
-<p>Cookie Run Kingdom is a large game that requires about 1.5 GB of storage space on your device. If you have limited storage space or want to save it for other apps or files, you might not want to download the game. By playing it online without downloading, you can save storage space on your device and still enjoy the game.</p>
-<h3>Play on any device with a browser</h3>
-<p>Cookie Run Kingdom is compatible with Android, iOS, PC, Mac, and browser platforms. However, if you don't have access to your preferred device or want to switch between devices easily, you might want to play it online without downloading without downloading. By playing it online, you can use any device that has a browser and an internet connection. You can also switch between devices without losing your progress or data.</p>
-<h3>Enjoy faster loading and smoother gameplay</h3>
-<p>Cookie Run Kingdom is a high-quality game that requires a lot of resources to run smoothly. If you have a slow or unstable internet connection, or a low-end device, you might experience lagging, crashing, or freezing issues when playing the game. By playing it online without downloading, you can avoid these problems and enjoy faster loading and smoother gameplay. The online platform will handle the processing and rendering of the game for you, so you don't have to worry about your device's performance or connection.</p>
-<p>cookie run kingdom online free<br />
-cookie run kingdom browser game<br />
-cookie run kingdom play without install<br />
-cookie run kingdom unblocked games<br />
-cookie run kingdom pc version<br />
-cookie run kingdom mac compatible<br />
-cookie run kingdom web version<br />
-cookie run kingdom no emulator<br />
-cookie run kingdom instant play<br />
-cookie run kingdom now.gg<br />
-cookie run kingdom rpg online<br />
-cookie run kingdom builder game<br />
-cookie run kingdom sonic crossover<br />
-cookie run kingdom guild tips<br />
-cookie run kingdom coupon codes<br />
-cookie run kingdom combat sneak peek<br />
-cookie run kingdom characters list<br />
-cookie run kingdom story mode<br />
-cookie run kingdom dark flour war<br />
-cookie run kingdom age of darkness<br />
-cookie run kingdom rise of heroes<br />
-cookie run kingdom final battle<br />
-cookie run kingdom oyster cookie review<br />
-cookie run kingdom best team composition<br />
-cookie run kingdom tier list 2023<br />
-cookie run kingdom update news<br />
-cookie run kingdom devplay corporation<br />
-cookie run kingdom discord server<br />
-cookie run kingdom instagram page<br />
-cookie run kingdom facebook group<br />
-cookie run kingdom youtube videos<br />
-cookie run kingdom reddit community<br />
-cookie run kingdom wiki guide<br />
-cookie run kingdom fandom page<br />
-cookie run kingdom fan art gallery<br />
-cookie run kingdom merchandise store<br />
-cookie run kingdom plushies collection<br />
-cookie run kingdom rainbow cubes hack<br />
-cookie run kingdom magic cutters cheat<br />
-cookie run kingdom time jumpers glitch<br />
-cookie run kingdom aurora items freebie<br />
-cookie run kingdom how to get gingerbrave <br />
-cookie run kingdom how to unlock costumes <br />
-cookie run kingdom how to level up fast <br />
-cookie run kingdom how to earn crystals <br />
-cookie run kingdom how to join a guild <br />
-cookie run kingdom how to make friends <br />
-cookie run kingdom how to chat with others <br />
-cookie run kingdom how to change language <br />
-cookie run kingdom how to contact support</p>
-<h2>How to play Cookie Run Kingdom online without downloading?</h2>
-<p>Now that you know why you might want to play Cookie Run Kingdom online without downloading, you might be wondering how to do it. There are two main ways to play the game online: using now.gg or using YouTube. Here are the steps for each method:</p>
-<h3>Use now.gg to play in your browser</h3>
-<p>Now.gg is a cloud gaming platform that lets you play mobile games in your browser without downloading or installing anything. It is free, fast, and secure. You can use now.gg to play Cookie Run Kingdom online in a few simple steps:</p>
-<h4>Step 1: Go to the now.gg website</h4>
-<p>Open your browser and go to the now.gg website. You will see a list of games that you can play on the platform. You can also use the search bar to find the game you want.</p>
-<h4>Step 2: Search for Cookie Run Kingdom</h4>
-<p>Type "Cookie Run Kingdom" in the search bar and hit enter. You will see the game's icon and name on the screen. Click on it to open the game's page.</p>
-<h4>Step 3: Click on the play button and enjoy</h4>
-<p>On the game's page, you will see a big play button on the top right corner. Click on it to start playing the game in your browser. You will need to sign in with your Google account or create a new one if you don't have one. You will also need to agree to the terms and conditions of the game and the platform. After that, you can enjoy playing Cookie Run Kingdom online without downloading.</p>
-<h3>Use YouTube to watch gameplay videos and guides</h3>
-<p>If you don't want to play Cookie Run Kingdom online but still want to enjoy it without downloading, you can use YouTube to watch gameplay videos and guides. YouTube is a video-sharing platform that has millions of videos on various topics, including games. You can use YouTube to watch Cookie Run Kingdom videos in a few simple steps:</p>
-<h4>Step 1: Go to YouTube and search for Cookie Run Kingdom</h4>
-<p>Open your browser and go to YouTube.com. You will see a search bar on the top of the page. Type "Cookie Run Kingdom" in the search bar and hit enter. You will see a list of videos related to the game.</p>
-<h4>Step 2: Find a video that suits your interest and level</h4>
-<p>You can browse through the videos and find one that suits your interest and level. For example, you can watch videos that show how to build your kingdom, how to fight against enemies, how to recruit cookies, how to upgrade cookies, how to join guilds, how to participate in events, and more. You can also filter the videos by relevance, date, view count, rating, etc.</p>
-<h4>Step 3: Watch and learn from other players</h4>
-<p>Once you find a video that you like, click on it to watch it. You can learn from other players' strategies, tips, tricks, and experiences. You can also leave comments, like, share, or subscribe to the video or channel if you want.</p>
-<h2>Conclusion</h2>
-<p>Cookie Run Kingdom is a fun and addictive game that you can play on your device or online without downloading. Playing it online has some benefits, such as saving storage space, playing on any device with a browser, and enjoying faster loading and smoother gameplay. You can play it online using now.gg or watch it online using YouTube. Either way, you can have a great time with this cookie-themed adventure.</p>
- FAQs - Q: Is Cookie Run Kingdom free to play? - A: Yes, Cookie Run Kingdom is free to play with optional in-app purchases. - Q: How do I save my progress when playing Cookie Run Kingdom online? - A: You can save your progress by linking your game account with your Google account or Facebook account. - Q: Can I play Cookie Run Kingdom offline? - A - A: No, you need an internet connection to play Cookie Run Kingdom. - Q: How do I get more cookies in Cookie Run Kingdom? - A: You can get more cookies by completing story levels, participating in events, summoning them with crystals or tickets, or buying them with real money. - Q: How do I join a guild in Cookie Run Kingdom? - A: You can join a guild by tapping on the guild icon on the bottom right corner of the screen. You can then search for a guild that suits your preferences, or create your own guild if you want. - Q: How do I contact the developers of Cookie Run Kingdom? - A: You can contact the developers of Cookie Run Kingdom by sending an email to support@cookierun.com or visiting their official website, Facebook page, Twitter account, or Discord server.</p> 197e85843d<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/ARK Survival Evolved APK - The Best Mobile Game of 2023 - Download Here.md b/spaces/1phancelerku/anime-remove-background/ARK Survival Evolved APK - The Best Mobile Game of 2023 - Download Here.md
deleted file mode 100644
index 63f9a723969bfbaa96bf4536c3078ea46199225b..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/ARK Survival Evolved APK - The Best Mobile Game of 2023 - Download Here.md	
+++ /dev/null
@@ -1,117 +0,0 @@
-
-<br>
- <table>
-<tr>
-<td>
- <h1>Download ARK: Survival Evolved APK Latest Version</h1>
-<p>Do you love dinosaurs? Do you love survival games? Do you love open-world games? If you answered yes to any of these questions, then you should definitely check out <strong>ARK: Survival Evolved</strong>, one of the most popular and immersive games on Android. In this article, we will tell you everything you need to know about this game and how to download its latest version as an APK file.</p>
-<h2>What is ARK: Survival Evolved?</h2>
-<p>ARK: Survival Evolved</strong> is a 3D action-adventure game that lets you experience what it would be like to live in a world full of dinosaurs and other prehistoric creatures. You can explore, hunt, gather, craft, build, tame, breed, and fight your way through a massive open world that is constantly evolving and changing. You can play solo or join a tribe of other players online and cooperate or compete with them in various modes. You can also customize your character, your base, and your dinosaurs to suit your playstyle and preferences.</p>
-<h2>download ark survival evolved apk latest version</h2><br /><p><b><b>Download Zip</b> &rArr;&rArr;&rArr; <a href="https://jinyurl.com/2uNOqk">https://jinyurl.com/2uNOqk</a></b></p><br /><br />
-<h2>Why download ARK: Survival Evolved APK?</h2>
-<p>If you are a fan of ARK: Survival Evolved, you might be wondering why you should download the APK file instead of getting the game from the Google Play Store. Well, there are several reasons why downloading the APK file can be beneficial for you. Here are some of them:</p>
-<ul>
-<li>You can get the latest version of the game before it is officially released on the Play Store. This way, you can enjoy the new features, bug fixes, and improvements as soon as possible.</li>
-<li>You can access the game even if it is not available in your region or country. Some games are geo-restricted or banned in certain areas due to legal or political reasons. By downloading the APK file, you can bypass these restrictions and play the game wherever you are.</li>
-<li>You can save storage space on your device. The APK file is usually smaller than the Play Store version because it does not include additional data or files that are not necessary for running the game. You can also delete the APK file after installing the game to free up more space.</li>
-<li>You can avoid annoying ads and in-app purchases. Some games on the Play Store are filled with ads that interrupt your gameplay or tempt you to spend real money on items or upgrades. By downloading the APK file, you can avoid these annoyances and enjoy the game without any distractions or costs.</li>
-</ul>
-<h2>How to download ARK: Survival Evolved APK?</h2>
-<p>Now that you know why downloading ARK: Survival Evolved APK is a good idea, you might be wondering how to do it. Don't worry, it's very easy and simple. Just follow these steps and you will be playing the game in no time:</p>
-<h3>Step 1: Enable unknown sources</h3>
-<p>Before you can install any APK file on your device, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than the Play Store. To do this, go to your device's settings and look for security or privacy options. Then, find the option that says unknown sources or allow installation from unknown sources and toggle it on. You might see a warning message that says installing apps from unknown sources can harm your device or compromise your data. Don't worry, this is just a precautionary measure and as long as you download the APK file from a reliable and safe source, you have nothing to fear.</p>
-<h3>Step 2: Download the APK file</h3>
-<p>The next step is to download the APK file of ARK: Survival Evolved from a trustworthy and reputable source. There are many websites that offer APK files for various games and apps, but not all of them are safe and secure. Some of them might contain malware, viruses, or spyware that can damage your device or steal your personal information. To avoid this risk, we recommend downloading the APK file from <a href="">APKPure.com</a>, one of the most popular and trusted sources for APK files on the internet. To download the APK file from APKPure.com, simply click on this link: <a href="">https://apkpure.com/ark-survival-evolved/com.studiowildcard.wardrumstudios.ark/download?from=details</a>. This will take you to the download page where you can see the size, version, and update date of the APK file. Then, click on the download button and wait for the file to be downloaded to your device.</p>
-<h3>Step 3: Install the APK file</h3>
-<p>Once you have downloaded the APK file, you need to install it on your device. To do this, locate the file in your downloads folder or wherever you saved it and tap on it. You might see a pop-up window that asks you if you want to install this application. Tap on install and wait for the installation process to finish. You might also see a pop-up window that asks you if you want to open this application or done. Tap on done and exit the installer.</p>
-<h3>Step 4: Launch the game and enjoy</h3>
-<p>Congratulations! You have successfully installed ARK: Survival Evolved APK on your device. Now, all that's left to do is to launch the game and enjoy it. To do this, go to your app drawer or home screen and look for the ARK: Survival Evolved icon. Tap on it and wait for the game to load. You might see a splash screen that shows the game's logo and some information. Then, you will see the main menu where you can choose to play single player, multiplayer, or settings. Choose the mode you want to play and start your adventure in the world of ARK: Survival Evolved.</p>
-<h2>What are the features of ARK: Survival Evolved APK?</h2>
-<p>ARK: Survival Evolved APK is not just a simple game. It is a rich and immersive experience that offers you countless hours of fun and entertainment. Here are some of the features that make this game so amazing:</p>
-<h3>Feature 1: Explore a massive open world</h3>
-<p>One of the most impressive aspects of ARK: Survival Evolved is its huge and diverse game world. You can explore over 50 square kilometers of land and sea, each with its own biome, climate, terrain, flora, and fauna. You can discover lush jungles, snowy mountains, volcanic islands, swamps, caves, underwater reefs, and more. You can also encounter over 200 different species of animals, from dinosaurs and mammals to insects and fish. Some of them are friendly and can be tamed, while others are hostile and will attack you on sight. You can also find hidden secrets, ancient ruins, artifacts, and loot scattered around the world.</p>
-<p>How to download ark survival evolved apk for free<br />
-Ark survival evolved apk mod unlimited money and resources<br />
-Ark survival evolved apk obb data offline download<br />
-Download ark survival evolved apk on pc with bluestacks<br />
-Ark survival evolved apk latest version 2.0.28 update<br />
-Ark survival evolved apk full game unlocked download<br />
-Ark survival evolved apk android requirements and compatibility<br />
-Ark survival evolved apk filehippo download link and review<br />
-Ark survival evolved apk combo download and install guide<br />
-Ark survival evolved apk wizcase download for free 2023<br />
-Best settings for ark survival evolved apk on android<br />
-Ark survival evolved apk gameplay and features overview<br />
-Ark survival evolved apk cheats and hacks for android<br />
-Ark survival evolved apk download size and installation time<br />
-Ark survival evolved apk tips and tricks for beginners<br />
-Ark survival evolved apk multiplayer mode and online servers<br />
-Ark survival evolved apk graphics and performance comparison<br />
-Ark survival evolved apk bugs and issues fix and solutions<br />
-Ark survival evolved apk new dinosaurs and creatures update<br />
-Ark survival evolved apk maps and locations guide<br />
-Ark survival evolved apk crafting and building system tutorial<br />
-Ark survival evolved apk weapons and armor list and stats<br />
-Ark survival evolved apk taming and breeding dinosaurs guide<br />
-Ark survival evolved apk missions and challenges walkthrough<br />
-Ark survival evolved apk skins and customizations options<br />
-Ark survival evolved apk events and rewards calendar 2023<br />
-Ark survival evolved apk reviews and ratings from users<br />
-Ark survival evolved apk alternatives and similar games for android<br />
-Ark survival evolved apk faq and support contact information<br />
-Ark survival evolved apk developer studio wildcard website and social media</p>
-<h3>Feature 2: Tame and breed over 80 dinosaurs</h3>
-<p>If you love dinosaurs, you will love ARK: Survival Evolved. This game lets you tame and breed over 80 different types of dinosaurs, from the mighty Tyrannosaurus Rex to the adorable Dodo. You can use various methods to capture and tame them, such as knocking them out with tranquilizer darts, feeding them berries or meat, or using special items like kibble or pheromones. Once you have tamed a dinosaur, you can ride it, use it as a mount, a pet, a companion, or a weapon. You can also breed them to create new generations of dinosaurs with improved stats and traits. You can even mutate them to create unique and rare variations.</p>
-<h3>Feature 3: Craft and build your base</h3>
-<p>In order to survive in ARK: Survival Evolved, you need to craft and build your base. You can gather resources from the environment, such as wood, stone, metal, fiber, hide, and more. You can use these resources to craft tools, weapons, armor, clothing, and other items that will help you in your journey. You can also use these resources to build structures, such as walls, floors, roofs, doors, windows, fences, ladders, ramps, and more. You can design your base however you want, from a simple hut to a fortified castle. You can also decorate your base with furniture, paintings, trophies, flags, and more.</p>
-<h3>Feature 4: Join a tribe and cooperate with other players</h3>
-<p>ARK: Survival Evolved is not only a solo game. You can also play online with other players from around the world. You can join a tribe of up to 10 players and cooperate with them in various ways. You can share resources, items, structures, dinosaurs, and more with your tribe members. You can also chat with them, trade with them, fight with them, or raid with them. You can also join or create a server that suits your preferences, such as PvE, PvP, hardcore, casual, modded, or vanilla. You can also customize the server settings, such as the difficulty level, the day and night cycle, the weather, the spawn rates, and more.</p>
-<h3>Feature 5: Survive in a harsh environment</h3>
-<p>ARK: Survival Evolved is not a game for the faint of heart. It is a game that challenges you to survive in a harsh and unforgiving environment. You have to deal with hunger, thirst, temperature, weather, diseases, and predators. You have to eat and drink regularly to maintain your health and stamina. You have to wear appropriate clothing and shelter to protect yourself from the heat or cold. You have to avoid or cure diseases that can affect your performance or even kill you. You have to fight or flee from predators that can attack you at any time. You have to be prepared for anything and everything in ARK: Survival Evolved.</p>
-<h2>What are the requirements for ARK: Survival Evolved APK?</h2>
-<p>ARK: Survival Evolved APK is a game that requires a lot of resources and power to run smoothly on your device. It is not a game that you can play on any device. It is a game that you need to have a decent device to enjoy. Here are the minimum and recommended specifications for running ARK: Survival Evolved APK on your device:</p>
-<table>
-<tr>
-<th>Minimum</th>
-<th>Recommended</th>
-</tr>
-<tr>
-<td>Android 7.0 or higher</td>
-<td>Android 9.0 or higher</td>
-</tr>
-<tr>
-<td>3 GB of RAM</td>
-<td>4 GB of RAM or more</td>
-</tr>
-<tr>
-<td>2.4 GHz quad-core processor</td>
-<td>3.0 GHz octa-core processor or better</td>
-</tr>
-<tr>
-<td>Mali-T760MP4 GPU or equivalent</td>
-<td>Adreno 530 GPU or higher</td>
-</tr>
-<tr>
-<td>2 GB of free storage space</td>
-<td>4 GB of free storage space or more</td>
-</tr>
-</table>
- <p>If your device meets these requirements, you should be able to play ARK: Survival Evolved APK without any major issues. However, if your device does not meet these requirements, you might experience lag, crashes, glitches, or errors while playing the game. In that case, you might want to lower the graphics settings, close other apps running in the background, or upgrade your device.</p>
- <h2>Conclusion</h2>
- <p>In conclusion, ARK: Survival Evolved APK is an amazing game that offers you a unique and thrilling experience of living in a world full of dinosaurs and other prehistoric creatures. You can explore, hunt, gather, craft, build, tame, breed, and fight your way through a massive open world that is constantly evolving and changing. You can play solo or join a tribe of other players online and cooperate or compete with them in various modes. You can also customize your character, your base, and your dinosaurs to suit your playstyle and preferences.</p>
- <p>If you are interested in playing this game, you can download its latest version as an APK file from APKPure.com by following the steps we have provided above. This way, you can enjoy the game before it is officially released on the Play Store, access the game even if it is not available in your region or country, save storage space on your device, avoid annoying ads and in-app purchases, and enjoy the game without any distractions or costs.</p>
- <p>So, what are you waiting for? Download ARK: Survival Evolved APK now and start your adventure in the world of dinosaurs. You won't regret it!</p>
- <h2>FAQs</h2>
- <p>Here are some of the frequently asked questions and answers about ARK: Survival Evolved APK:</p>
- <h3>Q: Is ARK: Survival Evolved APK safe to download and install?</h3>
-<p>A: Yes, as long as you download the APK file from a reliable and safe source like APKPure.com, you should not have any problems with security or privacy. However, you should always be careful when downloading and installing any app from unknown sources and scan them with an antivirus app before opening them.</p>
- <h3>Q: Is ARK: Survival Evolved APK free to play?</h3>
-<p>A: Yes, ARK: Survival Evolved APK is free to play and does not require any subscription or payment to download or play. However, the game does offer some optional in-app purchases that can enhance your gameplay or unlock some premium features. You can choose to buy these items or not depending on your preference.</p>
- <h3>Q: How can I update ARK: Survival Evolved APK?</h3>
-<p>A: To update ARK: Survival Evolved APK, you need to download the latest version of the APK file from APKPure.com and install it over the existing one. You do not need to uninstall the previous version or lose your progress. However, you should always backup your data before updating any app to avoid any potential issues.</p>
- <h3>Q: How can I play ARK: Survival Evolved APK offline?</h3>
-<p>A: You can play ARK: Survival Evolved APK offline by choosing the single player mode from the main menu. This way, you can enjoy the game without an internet connection or other players. However, you will not be able to access some features or modes that require online connectivity, such as multiplayer, leaderboards, or events.</p>
- <h3>Q: How can I contact the developers of ARK: Survival Evolved APK?</h3>
-<p>A: You can contact the developers of ARK: Survival Evolved APK by visiting their official website at <a href="">https://www.playark.com/</a> or their social media pages on Facebook, Twitter, Instagram, YouTube, or Discord. You can also send them an email at <a href="">support@studiowildcard.com</a> or use the in-game feedback option.</p> 197e85843d<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download 3D Interior Design Software and Tools for Professional Results.md b/spaces/1phancelerku/anime-remove-background/Download 3D Interior Design Software and Tools for Professional Results.md
deleted file mode 100644
index d09d69c8476342fd6b4ab531de810919eb3345c6..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download 3D Interior Design Software and Tools for Professional Results.md	
+++ /dev/null
@@ -1,108 +0,0 @@
-
-<h1>How to Download 3D Interior Models for Your Projects</h1>
-<p>If you are working on a project that involves interior design, architecture, or visualization, you might want to use 3D interior models to enhance your work. 3D interior models are digital representations of indoor spaces and objects that you can download and use in your software. In this article, we will explain what 3D interior models are, why you need them, where to find them, and how to use them.</p>
-<h2>download 3d interior</h2><br /><p><b><b>Download File</b> &#10038; <a href="https://jinyurl.com/2uNPeY">https://jinyurl.com/2uNPeY</a></b></p><br /><br />
- <h2>What are 3D Interior Models and Why You Need Them</h2>
-<h3>3D Interior Models are digital representations of indoor spaces and objects</h3>
-<p>3D interior models are files that contain the geometry, texture, material, lighting, and other properties of indoor spaces and objects. They can be created using 3D modeling software or scanned from real-life environments. They can range from simple furniture pieces to complex scenes with multiple elements.</p>
-<h3>3D Interior Models can help you visualize, design, and present your ideas</h3>
-<p>Using 3D interior models can have many benefits for your projects. For example, you can:</p>
-<ul>
-<li>Visualize your ideas in a realistic and immersive way</li>
-<li>Design and test different layouts, colors, styles, and lighting effects</li>
-<li>Present your work to clients, collaborators, or audiences in a professional and engaging way</li>
-<li>Save time and money by avoiding mistakes and rework</li>
-<li>Learn from other designers and improve your skills</li>
-</ul>
- <h2>Where to Find and Download Free 3D Interior Models Online</h2>
-<p>There are many websites that offer free 3D interior models for download. You can browse through thousands of models in various categories, formats, and quality levels. Here are some of the most popular ones:</p>
-<h3>Sketchfab</h3>
-<p>[Sketchfab](^1^) is a platform that lets you upload, view, and download 3D models in your browser. You can find over 18,000 free interior 3D models on Sketchfab, ranging from commercial buildings to cozy bedrooms. You can also interact with the models using VR or AR devices.</p>
-<h3>CGTrader</h3>
-<p>[CGTrader](^2^) is a marketplace that connects 3D designers with buyers. You can find over 18,000 free interior 3D models on CGTrader, covering various styles and themes. You can also buy premium models or request custom ones from the community.</p>
-<h3>3DZIP.ORG</h3>
-<p>[3DZIP.ORG](^3^) is a website that provides free resources for 3D visualization. You can find over 13,000 free interior 3D models on 3DZIP.ORG, mostly in SketchUp format. You can also download textures, materials, scenes, and tutorials.</p>
-<p>download 3d interior design software<br />
-download 3d interior models free<br />
-download 3d interior rendering software<br />
-download 3d interior design app<br />
-download 3d interior scenes for 3ds max<br />
-download 3d interior design online<br />
-download 3d interior design programs<br />
-download 3d interior models for sketchup<br />
-download 3d interior design software for pc<br />
-download 3d interior models for blender<br />
-download 3d interior design software for mac<br />
-download 3d interior models for maya<br />
-download 3d interior design software free trial<br />
-download 3d interior models for revit<br />
-download 3d interior design software full version<br />
-download 3d interior models for autocad<br />
-download 3d interior design software for android<br />
-download 3d interior models for unity<br />
-download 3d interior design software for windows 10<br />
-download 3d interior models for unreal engine<br />
-download 3d interior design software for beginners<br />
-download 3d interior models for lumion<br />
-download 3d interior design software for professionals<br />
-download 3d interior models for vray<br />
-download 3d interior design software with crack<br />
-download 3d interior models for cinema 4d<br />
-download 3d interior design software reviews<br />
-download 3d interior models for rhino<br />
-download 3d interior design software comparison<br />
-download 3d interior models for blender cycles<br />
-download 3d interior design software tutorial<br />
-download 3d interior models for sketchup vray<br />
-download 3d interior design software best<br />
-download 3d interior models for blender eevee<br />
-download 3d interior design software foyr neo<br />
-download 3d interior models for sketchup pro<br />
-download 3d interior design software homestyler<br />
-download 3d interior models for blender free<br />
-download 3d interior design software homebyme<br />
-download 3d interior models for sketchup free<br />
-download 3d interior design software planner 5D <br />
-download 3d interior models for blender realistic <br />
-download 3d interior design software roomstyler <br />
-download 3d interior models for sketchup realistic <br />
-download 3d interior design software smartdraw <br />
-download 3d interior models for blender low poly <br />
-download 3d interior design software roomsketcher <br />
-download 3d interior models for sketchup low poly <br />
-download 3d interior design software ikea home planner <br />
-download 3d interior models for blender game engine</p>
- <h2>How to Use 3D Interior Models in Your Software</h2>
-<h3>Importing 3D Interior Models into Your Software</h3>
-<p>To use 3D interior models in your software, you need to import them first. Depending on the software you use, you might need to convert the model files into compatible formats. Some of the most common formats for 3D interior models are FBX, OBJ, MAX, C4D, SKP, and BLEND. You can use online converters or plugins to convert the files if needed.</p>
-<h3>Editing and Customizing 3D Interior Models</h3>
-<p>Once you have imported the 3D interior models into your software, you can edit and customize them according to your needs. You can modify the geometry, texture, material, lighting, and other properties of the models. You can also add or remove elements, combine or split models, and adjust the scale and orientation of the models. You can use the tools and features of your software to make the changes you want.</p>
-<h3>Rendering and Exporting 3D Interior Models</h3>
-<p>After you have finished editing and customizing the 3D interior models, you can render and export them for your final output. Rendering is the process of generating realistic images or videos from the 3D models. Exporting is the process of saving the 3D models or the rendered images or videos in a file format that you can use for your project. You can use the settings and options of your software to render and export the 3D interior models.</p>
- <h2>Conclusion</h2>
-<p>3D interior models are a great way to enhance your projects that involve interior design, architecture, or visualization. They can help you visualize, design, and present your ideas in a realistic and immersive way. You can find and download free 3D interior models online from various websites, such as Sketchfab, CGTrader, and 3DZIP.ORG. You can also use your software to import, edit, customize, render, and export the 3D interior models for your final output.</p>
- <h2>FAQs</h2>
-<h4>What are some of the best software for 3D interior modeling?</h4>
-<p>Some of the best software for 3D interior modeling are SketchUp, Blender, 3ds Max, Cinema 4D, and Revit. They have powerful tools and features for creating and editing 3D interior models.</p>
-<h4>How can I learn 3D interior modeling?</h4>
-<p>You can learn 3D interior modeling by taking online courses, watching tutorials, reading books, or joining communities. You can also practice by following projects or challenges that involve 3D interior modeling.</p>
-<h4>How much does it cost to download 3D interior models?</h4>
-<p>It depends on the website and the model you want to download. Some websites offer free 3D interior models that you can download without any charge. Some websites offer premium 3D interior models that you have to pay for. Some websites offer both free and premium 3D interior models that you can choose from.</p>
-<h4>What are some of the advantages of using 3D interior models?</h4>
-<p>Some of the advantages of using 3D interior models are:</p>
-<ul>
-<li>You can save time and money by avoiding mistakes and rework</li>
-<li>You can design and test different layouts, colors, styles, and lighting effects</li>
-<li>You can present your work to clients, collaborators, or audiences in a professional and engaging way</li>
-<li>You can learn from other designers and improve your skills</li>
-</ul>
-<h4>What are some of the challenges of using 3D interior models?</h4>
-<p>Some of the challenges of using 3D interior models are:</p>
-<ul>
-<li>You need to have a compatible software and hardware to use them</li>
-<li>You need to have some skills and knowledge to create and edit them</li>
-<li>You need to have a good internet connection to download them</li>
-<li>You need to respect the license and attribution of the models</li>
-</ul></p> 197e85843d<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/2023Liu2023/bingo/src/lib/bots/bing/types.ts b/spaces/2023Liu2023/bingo/src/lib/bots/bing/types.ts
deleted file mode 100644
index 02cd5e8b01e3529642d28dc1539bf958f4ac420b..0000000000000000000000000000000000000000
--- a/spaces/2023Liu2023/bingo/src/lib/bots/bing/types.ts
+++ /dev/null
@@ -1,259 +0,0 @@
-export type Author = 'user' | 'system' | 'bot'
-
-export type BotId = 'bing'
-
-export enum BingConversationStyle {
-  Creative = 'Creative',
-  Balanced = 'Balanced',
-  Precise = 'Precise'
-}
-
-export enum ErrorCode {
-  CONVERSATION_LIMIT = 'CONVERSATION_LIMIT',
-  BING_UNAUTHORIZED = 'BING_UNAUTHORIZED',
-  BING_FORBIDDEN = 'BING_FORBIDDEN',
-  BING_CAPTCHA = 'BING_CAPTCHA',
-  THROTTLE_LIMIT = 'THROTTLE_LIMIT',
-  NOTFOUND_ERROR = 'NOT_FOUND_ERROR',
-  UNKOWN_ERROR = 'UNKOWN_ERROR',
-  NETWORK_ERROR = 'NETWORK_ERROR',
-}
-
-export class ChatError extends Error {
-  code: ErrorCode
-  constructor(message: string, code: ErrorCode) {
-    super(message)
-    this.code = code
-  }
-}
-
-export type ChatMessageModel = {
-  id: string
-  author: Author
-  text: string
-  error?: ChatError
-  throttling?: Throttling
-  sourceAttributions?: SourceAttribution[]
-  suggestedResponses?: SuggestedResponse[]
-}
-
-export interface ConversationModel {
-  messages: ChatMessageModel[]
-}
-
-export type Event =
-  | {
-      type: 'UPDATE_ANSWER'
-      data: {
-        text: string
-        spokenText?: string
-        sourceAttributions?: SourceAttribution[]
-        suggestedResponses?: SuggestedResponse[]
-        throttling?: Throttling
-      }
-    }
-  | {
-      type: 'DONE'
-    }
-  | {
-      type: 'ERROR'
-      error: ChatError
-    }
-
-export interface SendMessageParams<T> {
-  prompt: string
-  imageUrl?: string
-  options: T
-  onEvent: (event: Event) => void
-  signal?: AbortSignal
-}
-
-export interface ConversationResponse {
-  conversationId: string
-  clientId: string
-  conversationSignature: string
-  result: {
-    value: string
-    message?: string
-  }
-}
-
-export interface Telemetry {
-  metrics?: null
-  startTime: string
-}
-
-export interface ChatUpdateArgument {
-  messages?: ChatResponseMessage[]
-  throttling?: Throttling
-  requestId: string
-  result: null
-}
-
-export type ChatUpdateCompleteResponse = {
-  type: 2
-  invocationId: string
-  item: ChatResponseItem
-} | {
-  type: 1
-  target: string
-  arguments: ChatUpdateArgument[]
-} | {
-  type: 3
-  invocationId: string
-} | {
-  type: 6 | 7
-}
-
-export interface ChatRequestResult {
-  value: string
-  serviceVersion: string
-  error?: string
-}
-
-export interface ChatResponseItem {
-  messages: ChatResponseMessage[]
-  firstNewMessageIndex: number
-  suggestedResponses: null
-  conversationId: string
-  requestId: string
-  conversationExpiryTime: string
-  telemetry: Telemetry
-  result: ChatRequestResult
-  throttling: Throttling
-}
-export enum InvocationEventType {
-  Invocation = 1,
-  StreamItem = 2,
-  Completion = 3,
-  StreamInvocation = 4,
-  CancelInvocation = 5,
-  Ping = 6,
-  Close = 7,
-}
-
-// https://github.com/bytemate/bingchat-api/blob/main/src/lib.ts
-
-export interface ConversationInfo {
-  conversationId: string
-  clientId: string
-  conversationSignature: string
-  invocationId: number
-  conversationStyle: BingConversationStyle
-  prompt: string
-  imageUrl?: string
-}
-
-export interface BingChatResponse {
-  conversationSignature: string
-  conversationId: string
-  clientId: string
-  invocationId: number
-  conversationExpiryTime: Date
-  response: string
-  details: ChatResponseMessage
-}
-
-export interface Throttling {
-  maxNumLongDocSummaryUserMessagesInConversation: number
-  maxNumUserMessagesInConversation: number
-  numLongDocSummaryUserMessagesInConversation: number
-  numUserMessagesInConversation: number
-}
-
-export interface ChatResponseMessage {
-  text: string
-  spokenText?: string
-  author: string
-  createdAt: Date
-  timestamp: Date
-  messageId: string
-  requestId: string
-  offense: string
-  adaptiveCards: AdaptiveCard[]
-  sourceAttributions: SourceAttribution[]
-  feedback: Feedback
-  contentOrigin: string
-  messageType?: string
-  contentType?: string
-  privacy: null
-  suggestedResponses: SuggestedResponse[]
-}
-
-export interface AdaptiveCard {
-  type: string
-  version: string
-  body: Body[]
-}
-
-export interface Body {
-  type: string
-  text: string
-  wrap: boolean
-  size?: string
-}
-
-export interface Feedback {
-  tag: null
-  updatedOn: null
-  type: string
-}
-
-export interface SourceAttribution {
-  providerDisplayName: string
-  seeMoreUrl: string
-  searchQuery: string
-}
-
-export interface SuggestedResponse {
-  text: string
-  author?: Author
-  createdAt?: Date
-  timestamp?: Date
-  messageId?: string
-  messageType?: string
-  offense?: string
-  feedback?: Feedback
-  contentOrigin?: string
-  privacy?: null
-}
-
-export interface KBlobRequest {
-  knowledgeRequest: KnowledgeRequestContext
-  imageBase64?: string
-}
-
-export interface KBlobResponse {
-  blobId: string
-  processedBlobId?: string
-}
-
-export interface KnowledgeRequestContext {
-  imageInfo:        ImageInfo;
-  knowledgeRequest: KnowledgeRequest;
-}
-
-export interface ImageInfo {
-  url?: string;
-}
-
-export interface KnowledgeRequest {
-  invokedSkills:            string[];
-  subscriptionId:           string;
-  invokedSkillsRequestData: InvokedSkillsRequestData;
-  convoData:                ConvoData;
-}
-
-export interface ConvoData {
-  convoid:   string;
-  convotone: BingConversationStyle;
-}
-
-export interface InvokedSkillsRequestData {
-  enableFaceBlur: boolean;
-}
-
-export interface FileItem {
-  url: string;
-  status?: 'loading' | 'error' | 'loaded'
-}
diff --git a/spaces/2kaara/oreo/Dockerfile b/spaces/2kaara/oreo/Dockerfile
deleted file mode 100644
index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000
--- a/spaces/2kaara/oreo/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-FROM node:18-bullseye-slim
-
-RUN apt-get update && \
-
-apt-get install -y git
-
-RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
-
-WORKDIR /app
-
-RUN npm install
-
-COPY Dockerfile greeting.md* .env* ./
-
-RUN npm run build
-
-EXPOSE 7860
-
-ENV NODE_ENV=production
-
-CMD [ "npm", "start" ]
\ No newline at end of file
diff --git a/spaces/AIConsultant/MusicGen/audiocraft/data/__init__.py b/spaces/AIConsultant/MusicGen/audiocraft/data/__init__.py
deleted file mode 100644
index 2906ff12bc85a894837579f3137f6f71a0438329..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/audiocraft/data/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-"""Audio loading and writing support. Datasets for raw audio
-or also including some metadata."""
-
-# flake8: noqa
-from . import audio, audio_dataset, info_audio_dataset, music_dataset, sound_dataset
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192.py
deleted file mode 100644
index 0c245621ef6925ae17757ff273f012de3de87360..0000000000000000000000000000000000000000
--- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192.py
+++ /dev/null
@@ -1,2861 +0,0 @@
-default_scope = 'mmpose'
-default_hooks = dict(
-    timer=dict(type='IterTimerHook'),
-    logger=dict(type='LoggerHook', interval=50),
-    param_scheduler=dict(type='ParamSchedulerHook'),
-    checkpoint=dict(
-        type='CheckpointHook', interval=10, save_best='PCK', rule='greater'),
-    sampler_seed=dict(type='DistSamplerSeedHook'),
-    visualization=dict(type='PoseVisualizationHook', enable=False))
-custom_hooks = [dict(type='SyncBuffersHook')]
-env_cfg = dict(
-    cudnn_benchmark=False,
-    mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
-    dist_cfg=dict(backend='nccl'))
-vis_backends = [dict(type='LocalVisBackend')]
-visualizer = dict(
-    type='PoseLocalVisualizer',
-    vis_backends=[dict(type='LocalVisBackend'),
-                  dict(type='WandbVisBackend')],
-    name='visualizer')
-log_processor = dict(
-    type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)
-log_level = 'INFO'
-load_from = None
-resume = False
-backend_args = dict(backend='local')
-train_cfg = dict(by_epoch=True, max_epochs=210, val_interval=10)
-val_cfg = dict()
-test_cfg = dict()
-colors = dict(
-    sss=[255, 128, 0],
-    lss=[255, 0, 128],
-    sso=[128, 0, 255],
-    lso=[0, 128, 255],
-    vest=[0, 128, 128],
-    sling=[0, 0, 128],
-    shorts=[128, 128, 128],
-    trousers=[128, 0, 128],
-    skirt=[64, 128, 128],
-    ssd=[64, 64, 128],
-    lsd=[128, 64, 0],
-    vd=[128, 64, 255],
-    sd=[128, 64, 0])
-dataset_info = dict(
-    dataset_name='deepfashion2',
-    paper_info=dict(
-        author=
-        'Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo',
-        title=
-        'DeepFashion2: A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images',
-        container=
-        'Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)',
-        year='2019',
-        homepage='https://github.com/switchablenorms/DeepFashion2'),
-    keypoint_info=dict({
-        0:
-        dict(name='sss_kpt1', id=0, color=[255, 128, 0], type='', swap=''),
-        1:
-        dict(
-            name='sss_kpt2',
-            id=1,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt6'),
-        2:
-        dict(
-            name='sss_kpt3',
-            id=2,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt5'),
-        3:
-        dict(name='sss_kpt4', id=3, color=[255, 128, 0], type='', swap=''),
-        4:
-        dict(
-            name='sss_kpt5',
-            id=4,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt3'),
-        5:
-        dict(
-            name='sss_kpt6',
-            id=5,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt2'),
-        6:
-        dict(
-            name='sss_kpt7',
-            id=6,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt25'),
-        7:
-        dict(
-            name='sss_kpt8',
-            id=7,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt24'),
-        8:
-        dict(
-            name='sss_kpt9',
-            id=8,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt23'),
-        9:
-        dict(
-            name='sss_kpt10',
-            id=9,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt22'),
-        10:
-        dict(
-            name='sss_kpt11',
-            id=10,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt21'),
-        11:
-        dict(
-            name='sss_kpt12',
-            id=11,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt20'),
-        12:
-        dict(
-            name='sss_kpt13',
-            id=12,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt19'),
-        13:
-        dict(
-            name='sss_kpt14',
-            id=13,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt18'),
-        14:
-        dict(
-            name='sss_kpt15',
-            id=14,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt17'),
-        15:
-        dict(name='sss_kpt16', id=15, color=[255, 128, 0], type='', swap=''),
-        16:
-        dict(
-            name='sss_kpt17',
-            id=16,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt15'),
-        17:
-        dict(
-            name='sss_kpt18',
-            id=17,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt14'),
-        18:
-        dict(
-            name='sss_kpt19',
-            id=18,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt13'),
-        19:
-        dict(
-            name='sss_kpt20',
-            id=19,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt12'),
-        20:
-        dict(
-            name='sss_kpt21',
-            id=20,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt11'),
-        21:
-        dict(
-            name='sss_kpt22',
-            id=21,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt10'),
-        22:
-        dict(
-            name='sss_kpt23',
-            id=22,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt9'),
-        23:
-        dict(
-            name='sss_kpt24',
-            id=23,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt8'),
-        24:
-        dict(
-            name='sss_kpt25',
-            id=24,
-            color=[255, 128, 0],
-            type='',
-            swap='sss_kpt7'),
-        25:
-        dict(name='lss_kpt1', id=25, color=[255, 0, 128], type='', swap=''),
-        26:
-        dict(
-            name='lss_kpt2',
-            id=26,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt6'),
-        27:
-        dict(
-            name='lss_kpt3',
-            id=27,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt5'),
-        28:
-        dict(name='lss_kpt4', id=28, color=[255, 0, 128], type='', swap=''),
-        29:
-        dict(
-            name='lss_kpt5',
-            id=29,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt3'),
-        30:
-        dict(
-            name='lss_kpt6',
-            id=30,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt2'),
-        31:
-        dict(
-            name='lss_kpt7',
-            id=31,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt33'),
-        32:
-        dict(
-            name='lss_kpt8',
-            id=32,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt32'),
-        33:
-        dict(
-            name='lss_kpt9',
-            id=33,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt31'),
-        34:
-        dict(
-            name='lss_kpt10',
-            id=34,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt30'),
-        35:
-        dict(
-            name='lss_kpt11',
-            id=35,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt29'),
-        36:
-        dict(
-            name='lss_kpt12',
-            id=36,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt28'),
-        37:
-        dict(
-            name='lss_kpt13',
-            id=37,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt27'),
-        38:
-        dict(
-            name='lss_kpt14',
-            id=38,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt26'),
-        39:
-        dict(
-            name='lss_kpt15',
-            id=39,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt25'),
-        40:
-        dict(
-            name='lss_kpt16',
-            id=40,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt24'),
-        41:
-        dict(
-            name='lss_kpt17',
-            id=41,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt23'),
-        42:
-        dict(
-            name='lss_kpt18',
-            id=42,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt22'),
-        43:
-        dict(
-            name='lss_kpt19',
-            id=43,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt21'),
-        44:
-        dict(name='lss_kpt20', id=44, color=[255, 0, 128], type='', swap=''),
-        45:
-        dict(
-            name='lss_kpt21',
-            id=45,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt19'),
-        46:
-        dict(
-            name='lss_kpt22',
-            id=46,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt18'),
-        47:
-        dict(
-            name='lss_kpt23',
-            id=47,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt17'),
-        48:
-        dict(
-            name='lss_kpt24',
-            id=48,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt16'),
-        49:
-        dict(
-            name='lss_kpt25',
-            id=49,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt15'),
-        50:
-        dict(
-            name='lss_kpt26',
-            id=50,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt14'),
-        51:
-        dict(
-            name='lss_kpt27',
-            id=51,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt13'),
-        52:
-        dict(
-            name='lss_kpt28',
-            id=52,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt12'),
-        53:
-        dict(
-            name='lss_kpt29',
-            id=53,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt11'),
-        54:
-        dict(
-            name='lss_kpt30',
-            id=54,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt10'),
-        55:
-        dict(
-            name='lss_kpt31',
-            id=55,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt9'),
-        56:
-        dict(
-            name='lss_kpt32',
-            id=56,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt8'),
-        57:
-        dict(
-            name='lss_kpt33',
-            id=57,
-            color=[255, 0, 128],
-            type='',
-            swap='lss_kpt7'),
-        58:
-        dict(name='sso_kpt1', id=58, color=[128, 0, 255], type='', swap=''),
-        59:
-        dict(
-            name='sso_kpt2',
-            id=59,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt26'),
-        60:
-        dict(
-            name='sso_kpt3',
-            id=60,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt5'),
-        61:
-        dict(
-            name='sso_kpt4',
-            id=61,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt6'),
-        62:
-        dict(
-            name='sso_kpt5',
-            id=62,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt3'),
-        63:
-        dict(
-            name='sso_kpt6',
-            id=63,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt4'),
-        64:
-        dict(
-            name='sso_kpt7',
-            id=64,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt25'),
-        65:
-        dict(
-            name='sso_kpt8',
-            id=65,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt24'),
-        66:
-        dict(
-            name='sso_kpt9',
-            id=66,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt23'),
-        67:
-        dict(
-            name='sso_kpt10',
-            id=67,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt22'),
-        68:
-        dict(
-            name='sso_kpt11',
-            id=68,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt21'),
-        69:
-        dict(
-            name='sso_kpt12',
-            id=69,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt20'),
-        70:
-        dict(
-            name='sso_kpt13',
-            id=70,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt19'),
-        71:
-        dict(
-            name='sso_kpt14',
-            id=71,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt18'),
-        72:
-        dict(
-            name='sso_kpt15',
-            id=72,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt17'),
-        73:
-        dict(
-            name='sso_kpt16',
-            id=73,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt29'),
-        74:
-        dict(
-            name='sso_kpt17',
-            id=74,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt15'),
-        75:
-        dict(
-            name='sso_kpt18',
-            id=75,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt14'),
-        76:
-        dict(
-            name='sso_kpt19',
-            id=76,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt13'),
-        77:
-        dict(
-            name='sso_kpt20',
-            id=77,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt12'),
-        78:
-        dict(
-            name='sso_kpt21',
-            id=78,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt11'),
-        79:
-        dict(
-            name='sso_kpt22',
-            id=79,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt10'),
-        80:
-        dict(
-            name='sso_kpt23',
-            id=80,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt9'),
-        81:
-        dict(
-            name='sso_kpt24',
-            id=81,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt8'),
-        82:
-        dict(
-            name='sso_kpt25',
-            id=82,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt7'),
-        83:
-        dict(
-            name='sso_kpt26',
-            id=83,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt2'),
-        84:
-        dict(
-            name='sso_kpt27',
-            id=84,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt30'),
-        85:
-        dict(
-            name='sso_kpt28',
-            id=85,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt31'),
-        86:
-        dict(
-            name='sso_kpt29',
-            id=86,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt16'),
-        87:
-        dict(
-            name='sso_kpt30',
-            id=87,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt27'),
-        88:
-        dict(
-            name='sso_kpt31',
-            id=88,
-            color=[128, 0, 255],
-            type='',
-            swap='sso_kpt28'),
-        89:
-        dict(name='lso_kpt1', id=89, color=[0, 128, 255], type='', swap=''),
-        90:
-        dict(
-            name='lso_kpt2',
-            id=90,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt6'),
-        91:
-        dict(
-            name='lso_kpt3',
-            id=91,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt5'),
-        92:
-        dict(
-            name='lso_kpt4',
-            id=92,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt34'),
-        93:
-        dict(
-            name='lso_kpt5',
-            id=93,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt3'),
-        94:
-        dict(
-            name='lso_kpt6',
-            id=94,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt2'),
-        95:
-        dict(
-            name='lso_kpt7',
-            id=95,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt33'),
-        96:
-        dict(
-            name='lso_kpt8',
-            id=96,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt32'),
-        97:
-        dict(
-            name='lso_kpt9',
-            id=97,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt31'),
-        98:
-        dict(
-            name='lso_kpt10',
-            id=98,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt30'),
-        99:
-        dict(
-            name='lso_kpt11',
-            id=99,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt29'),
-        100:
-        dict(
-            name='lso_kpt12',
-            id=100,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt28'),
-        101:
-        dict(
-            name='lso_kpt13',
-            id=101,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt27'),
-        102:
-        dict(
-            name='lso_kpt14',
-            id=102,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt26'),
-        103:
-        dict(
-            name='lso_kpt15',
-            id=103,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt25'),
-        104:
-        dict(
-            name='lso_kpt16',
-            id=104,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt24'),
-        105:
-        dict(
-            name='lso_kpt17',
-            id=105,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt23'),
-        106:
-        dict(
-            name='lso_kpt18',
-            id=106,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt22'),
-        107:
-        dict(
-            name='lso_kpt19',
-            id=107,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt21'),
-        108:
-        dict(
-            name='lso_kpt20',
-            id=108,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt37'),
-        109:
-        dict(
-            name='lso_kpt21',
-            id=109,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt19'),
-        110:
-        dict(
-            name='lso_kpt22',
-            id=110,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt18'),
-        111:
-        dict(
-            name='lso_kpt23',
-            id=111,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt17'),
-        112:
-        dict(
-            name='lso_kpt24',
-            id=112,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt16'),
-        113:
-        dict(
-            name='lso_kpt25',
-            id=113,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt15'),
-        114:
-        dict(
-            name='lso_kpt26',
-            id=114,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt14'),
-        115:
-        dict(
-            name='lso_kpt27',
-            id=115,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt13'),
-        116:
-        dict(
-            name='lso_kpt28',
-            id=116,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt12'),
-        117:
-        dict(
-            name='lso_kpt29',
-            id=117,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt11'),
-        118:
-        dict(
-            name='lso_kpt30',
-            id=118,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt10'),
-        119:
-        dict(
-            name='lso_kpt31',
-            id=119,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt9'),
-        120:
-        dict(
-            name='lso_kpt32',
-            id=120,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt8'),
-        121:
-        dict(
-            name='lso_kpt33',
-            id=121,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt7'),
-        122:
-        dict(
-            name='lso_kpt34',
-            id=122,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt4'),
-        123:
-        dict(
-            name='lso_kpt35',
-            id=123,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt38'),
-        124:
-        dict(
-            name='lso_kpt36',
-            id=124,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt39'),
-        125:
-        dict(
-            name='lso_kpt37',
-            id=125,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt20'),
-        126:
-        dict(
-            name='lso_kpt38',
-            id=126,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt35'),
-        127:
-        dict(
-            name='lso_kpt39',
-            id=127,
-            color=[0, 128, 255],
-            type='',
-            swap='lso_kpt36'),
-        128:
-        dict(name='vest_kpt1', id=128, color=[0, 128, 128], type='', swap=''),
-        129:
-        dict(
-            name='vest_kpt2',
-            id=129,
-            color=[0, 128, 128],
-            type='',
-            swap='vest_kpt6'),
-        130:
-        dict(
-            name='vest_kpt3',
-            id=130,
-            color=[0, 128, 128],
-            type='',
-            swap='vest_kpt5'),
-        131:
-        dict(name='vest_kpt4', id=131, color=[0, 128, 128], type='', swap=''),
-        132:
-        dict(
-            name='vest_kpt5',
-            id=132,
-            color=[0, 128, 128],
-            type='',
-            swap='vest_kpt3'),
-        133:
-        dict(
-            name='vest_kpt6',
-            id=133,
-            color=[0, 128, 128],
-            type='',
-            swap='vest_kpt2'),
-        134:
-        dict(
-            name='vest_kpt7',
-            id=134,
-            color=[0, 128, 128],
-            type='',
-            swap='vest_kpt15'),
-        135:
-        dict(
-            name='vest_kpt8',
-            id=135,
-            color=[0, 128, 128],
-            type='',
-            swap='vest_kpt14'),
-        136:
-        dict(
-            name='vest_kpt9',
-            id=136,
-            color=[0, 128, 128],
-            type='',
-            swap='vest_kpt13'),
-        137:
-        dict(
-            name='vest_kpt10',
-            id=137,
-            color=[0, 128, 128],
-            type='',
-            swap='vest_kpt12'),
-        138:
-        dict(name='vest_kpt11', id=138, color=[0, 128, 128], type='', swap=''),
-        139:
-        dict(
-            name='vest_kpt12',
-            id=139,
-            color=[0, 128, 128],
-            type='',
-            swap='vest_kpt10'),
-        140:
-        dict(name='vest_kpt13', id=140, color=[0, 128, 128], type='', swap=''),
-        141:
-        dict(
-            name='vest_kpt14',
-            id=141,
-            color=[0, 128, 128],
-            type='',
-            swap='vest_kpt8'),
-        142:
-        dict(
-            name='vest_kpt15',
-            id=142,
-            color=[0, 128, 128],
-            type='',
-            swap='vest_kpt7'),
-        143:
-        dict(name='sling_kpt1', id=143, color=[0, 0, 128], type='', swap=''),
-        144:
-        dict(
-            name='sling_kpt2',
-            id=144,
-            color=[0, 0, 128],
-            type='',
-            swap='sling_kpt6'),
-        145:
-        dict(
-            name='sling_kpt3',
-            id=145,
-            color=[0, 0, 128],
-            type='',
-            swap='sling_kpt5'),
-        146:
-        dict(name='sling_kpt4', id=146, color=[0, 0, 128], type='', swap=''),
-        147:
-        dict(
-            name='sling_kpt5',
-            id=147,
-            color=[0, 0, 128],
-            type='',
-            swap='sling_kpt3'),
-        148:
-        dict(
-            name='sling_kpt6',
-            id=148,
-            color=[0, 0, 128],
-            type='',
-            swap='sling_kpt2'),
-        149:
-        dict(
-            name='sling_kpt7',
-            id=149,
-            color=[0, 0, 128],
-            type='',
-            swap='sling_kpt15'),
-        150:
-        dict(
-            name='sling_kpt8',
-            id=150,
-            color=[0, 0, 128],
-            type='',
-            swap='sling_kpt14'),
-        151:
-        dict(
-            name='sling_kpt9',
-            id=151,
-            color=[0, 0, 128],
-            type='',
-            swap='sling_kpt13'),
-        152:
-        dict(
-            name='sling_kpt10',
-            id=152,
-            color=[0, 0, 128],
-            type='',
-            swap='sling_kpt12'),
-        153:
-        dict(name='sling_kpt11', id=153, color=[0, 0, 128], type='', swap=''),
-        154:
-        dict(
-            name='sling_kpt12',
-            id=154,
-            color=[0, 0, 128],
-            type='',
-            swap='sling_kpt10'),
-        155:
-        dict(
-            name='sling_kpt13',
-            id=155,
-            color=[0, 0, 128],
-            type='',
-            swap='sling_kpt9'),
-        156:
-        dict(
-            name='sling_kpt14',
-            id=156,
-            color=[0, 0, 128],
-            type='',
-            swap='sling_kpt8'),
-        157:
-        dict(
-            name='sling_kpt15',
-            id=157,
-            color=[0, 0, 128],
-            type='',
-            swap='sling_kpt7'),
-        158:
-        dict(
-            name='shorts_kpt1',
-            id=158,
-            color=[128, 128, 128],
-            type='',
-            swap='shorts_kpt3'),
-        159:
-        dict(
-            name='shorts_kpt2',
-            id=159,
-            color=[128, 128, 128],
-            type='',
-            swap=''),
-        160:
-        dict(
-            name='shorts_kpt3',
-            id=160,
-            color=[128, 128, 128],
-            type='',
-            swap='shorts_kpt1'),
-        161:
-        dict(
-            name='shorts_kpt4',
-            id=161,
-            color=[128, 128, 128],
-            type='',
-            swap='shorts_kpt10'),
-        162:
-        dict(
-            name='shorts_kpt5',
-            id=162,
-            color=[128, 128, 128],
-            type='',
-            swap='shorts_kpt9'),
-        163:
-        dict(
-            name='shorts_kpt6',
-            id=163,
-            color=[128, 128, 128],
-            type='',
-            swap='shorts_kpt8'),
-        164:
-        dict(
-            name='shorts_kpt7',
-            id=164,
-            color=[128, 128, 128],
-            type='',
-            swap=''),
-        165:
-        dict(
-            name='shorts_kpt8',
-            id=165,
-            color=[128, 128, 128],
-            type='',
-            swap='shorts_kpt6'),
-        166:
-        dict(
-            name='shorts_kpt9',
-            id=166,
-            color=[128, 128, 128],
-            type='',
-            swap='shorts_kpt5'),
-        167:
-        dict(
-            name='shorts_kpt10',
-            id=167,
-            color=[128, 128, 128],
-            type='',
-            swap='shorts_kpt4'),
-        168:
-        dict(
-            name='trousers_kpt1',
-            id=168,
-            color=[128, 0, 128],
-            type='',
-            swap='trousers_kpt3'),
-        169:
-        dict(
-            name='trousers_kpt2',
-            id=169,
-            color=[128, 0, 128],
-            type='',
-            swap=''),
-        170:
-        dict(
-            name='trousers_kpt3',
-            id=170,
-            color=[128, 0, 128],
-            type='',
-            swap='trousers_kpt1'),
-        171:
-        dict(
-            name='trousers_kpt4',
-            id=171,
-            color=[128, 0, 128],
-            type='',
-            swap='trousers_kpt14'),
-        172:
-        dict(
-            name='trousers_kpt5',
-            id=172,
-            color=[128, 0, 128],
-            type='',
-            swap='trousers_kpt13'),
-        173:
-        dict(
-            name='trousers_kpt6',
-            id=173,
-            color=[128, 0, 128],
-            type='',
-            swap='trousers_kpt12'),
-        174:
-        dict(
-            name='trousers_kpt7',
-            id=174,
-            color=[128, 0, 128],
-            type='',
-            swap='trousers_kpt11'),
-        175:
-        dict(
-            name='trousers_kpt8',
-            id=175,
-            color=[128, 0, 128],
-            type='',
-            swap='trousers_kpt10'),
-        176:
-        dict(
-            name='trousers_kpt9',
-            id=176,
-            color=[128, 0, 128],
-            type='',
-            swap=''),
-        177:
-        dict(
-            name='trousers_kpt10',
-            id=177,
-            color=[128, 0, 128],
-            type='',
-            swap='trousers_kpt8'),
-        178:
-        dict(
-            name='trousers_kpt11',
-            id=178,
-            color=[128, 0, 128],
-            type='',
-            swap='trousers_kpt7'),
-        179:
-        dict(
-            name='trousers_kpt12',
-            id=179,
-            color=[128, 0, 128],
-            type='',
-            swap='trousers_kpt6'),
-        180:
-        dict(
-            name='trousers_kpt13',
-            id=180,
-            color=[128, 0, 128],
-            type='',
-            swap='trousers_kpt5'),
-        181:
-        dict(
-            name='trousers_kpt14',
-            id=181,
-            color=[128, 0, 128],
-            type='',
-            swap='trousers_kpt4'),
-        182:
-        dict(
-            name='skirt_kpt1',
-            id=182,
-            color=[64, 128, 128],
-            type='',
-            swap='skirt_kpt3'),
-        183:
-        dict(
-            name='skirt_kpt2', id=183, color=[64, 128, 128], type='', swap=''),
-        184:
-        dict(
-            name='skirt_kpt3',
-            id=184,
-            color=[64, 128, 128],
-            type='',
-            swap='skirt_kpt1'),
-        185:
-        dict(
-            name='skirt_kpt4',
-            id=185,
-            color=[64, 128, 128],
-            type='',
-            swap='skirt_kpt8'),
-        186:
-        dict(
-            name='skirt_kpt5',
-            id=186,
-            color=[64, 128, 128],
-            type='',
-            swap='skirt_kpt7'),
-        187:
-        dict(
-            name='skirt_kpt6', id=187, color=[64, 128, 128], type='', swap=''),
-        188:
-        dict(
-            name='skirt_kpt7',
-            id=188,
-            color=[64, 128, 128],
-            type='',
-            swap='skirt_kpt5'),
-        189:
-        dict(
-            name='skirt_kpt8',
-            id=189,
-            color=[64, 128, 128],
-            type='',
-            swap='skirt_kpt4'),
-        190:
-        dict(name='ssd_kpt1', id=190, color=[64, 64, 128], type='', swap=''),
-        191:
-        dict(
-            name='ssd_kpt2',
-            id=191,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt6'),
-        192:
-        dict(
-            name='ssd_kpt3',
-            id=192,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt5'),
-        193:
-        dict(name='ssd_kpt4', id=193, color=[64, 64, 128], type='', swap=''),
-        194:
-        dict(
-            name='ssd_kpt5',
-            id=194,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt3'),
-        195:
-        dict(
-            name='ssd_kpt6',
-            id=195,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt2'),
-        196:
-        dict(
-            name='ssd_kpt7',
-            id=196,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt29'),
-        197:
-        dict(
-            name='ssd_kpt8',
-            id=197,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt28'),
-        198:
-        dict(
-            name='ssd_kpt9',
-            id=198,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt27'),
-        199:
-        dict(
-            name='ssd_kpt10',
-            id=199,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt26'),
-        200:
-        dict(
-            name='ssd_kpt11',
-            id=200,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt25'),
-        201:
-        dict(
-            name='ssd_kpt12',
-            id=201,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt24'),
-        202:
-        dict(
-            name='ssd_kpt13',
-            id=202,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt23'),
-        203:
-        dict(
-            name='ssd_kpt14',
-            id=203,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt22'),
-        204:
-        dict(
-            name='ssd_kpt15',
-            id=204,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt21'),
-        205:
-        dict(
-            name='ssd_kpt16',
-            id=205,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt20'),
-        206:
-        dict(
-            name='ssd_kpt17',
-            id=206,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt19'),
-        207:
-        dict(name='ssd_kpt18', id=207, color=[64, 64, 128], type='', swap=''),
-        208:
-        dict(
-            name='ssd_kpt19',
-            id=208,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt17'),
-        209:
-        dict(
-            name='ssd_kpt20',
-            id=209,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt16'),
-        210:
-        dict(
-            name='ssd_kpt21',
-            id=210,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt15'),
-        211:
-        dict(
-            name='ssd_kpt22',
-            id=211,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt14'),
-        212:
-        dict(
-            name='ssd_kpt23',
-            id=212,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt13'),
-        213:
-        dict(
-            name='ssd_kpt24',
-            id=213,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt12'),
-        214:
-        dict(
-            name='ssd_kpt25',
-            id=214,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt11'),
-        215:
-        dict(
-            name='ssd_kpt26',
-            id=215,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt10'),
-        216:
-        dict(
-            name='ssd_kpt27',
-            id=216,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt9'),
-        217:
-        dict(
-            name='ssd_kpt28',
-            id=217,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt8'),
-        218:
-        dict(
-            name='ssd_kpt29',
-            id=218,
-            color=[64, 64, 128],
-            type='',
-            swap='ssd_kpt7'),
-        219:
-        dict(name='lsd_kpt1', id=219, color=[128, 64, 0], type='', swap=''),
-        220:
-        dict(
-            name='lsd_kpt2',
-            id=220,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt6'),
-        221:
-        dict(
-            name='lsd_kpt3',
-            id=221,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt5'),
-        222:
-        dict(name='lsd_kpt4', id=222, color=[128, 64, 0], type='', swap=''),
-        223:
-        dict(
-            name='lsd_kpt5',
-            id=223,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt3'),
-        224:
-        dict(
-            name='lsd_kpt6',
-            id=224,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt2'),
-        225:
-        dict(
-            name='lsd_kpt7',
-            id=225,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt37'),
-        226:
-        dict(
-            name='lsd_kpt8',
-            id=226,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt36'),
-        227:
-        dict(
-            name='lsd_kpt9',
-            id=227,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt35'),
-        228:
-        dict(
-            name='lsd_kpt10',
-            id=228,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt34'),
-        229:
-        dict(
-            name='lsd_kpt11',
-            id=229,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt33'),
-        230:
-        dict(
-            name='lsd_kpt12',
-            id=230,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt32'),
-        231:
-        dict(
-            name='lsd_kpt13',
-            id=231,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt31'),
-        232:
-        dict(
-            name='lsd_kpt14',
-            id=232,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt30'),
-        233:
-        dict(
-            name='lsd_kpt15',
-            id=233,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt29'),
-        234:
-        dict(
-            name='lsd_kpt16',
-            id=234,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt28'),
-        235:
-        dict(
-            name='lsd_kpt17',
-            id=235,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt27'),
-        236:
-        dict(
-            name='lsd_kpt18',
-            id=236,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt26'),
-        237:
-        dict(
-            name='lsd_kpt19',
-            id=237,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt25'),
-        238:
-        dict(
-            name='lsd_kpt20',
-            id=238,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt24'),
-        239:
-        dict(
-            name='lsd_kpt21',
-            id=239,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt23'),
-        240:
-        dict(name='lsd_kpt22', id=240, color=[128, 64, 0], type='', swap=''),
-        241:
-        dict(
-            name='lsd_kpt23',
-            id=241,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt21'),
-        242:
-        dict(
-            name='lsd_kpt24',
-            id=242,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt20'),
-        243:
-        dict(
-            name='lsd_kpt25',
-            id=243,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt19'),
-        244:
-        dict(
-            name='lsd_kpt26',
-            id=244,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt18'),
-        245:
-        dict(
-            name='lsd_kpt27',
-            id=245,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt17'),
-        246:
-        dict(
-            name='lsd_kpt28',
-            id=246,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt16'),
-        247:
-        dict(
-            name='lsd_kpt29',
-            id=247,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt15'),
-        248:
-        dict(
-            name='lsd_kpt30',
-            id=248,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt14'),
-        249:
-        dict(
-            name='lsd_kpt31',
-            id=249,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt13'),
-        250:
-        dict(
-            name='lsd_kpt32',
-            id=250,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt12'),
-        251:
-        dict(
-            name='lsd_kpt33',
-            id=251,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt11'),
-        252:
-        dict(
-            name='lsd_kpt34',
-            id=252,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt10'),
-        253:
-        dict(
-            name='lsd_kpt35',
-            id=253,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt9'),
-        254:
-        dict(
-            name='lsd_kpt36',
-            id=254,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt8'),
-        255:
-        dict(
-            name='lsd_kpt37',
-            id=255,
-            color=[128, 64, 0],
-            type='',
-            swap='lsd_kpt7'),
-        256:
-        dict(name='vd_kpt1', id=256, color=[128, 64, 255], type='', swap=''),
-        257:
-        dict(
-            name='vd_kpt2',
-            id=257,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt6'),
-        258:
-        dict(
-            name='vd_kpt3',
-            id=258,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt5'),
-        259:
-        dict(name='vd_kpt4', id=259, color=[128, 64, 255], type='', swap=''),
-        260:
-        dict(
-            name='vd_kpt5',
-            id=260,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt3'),
-        261:
-        dict(
-            name='vd_kpt6',
-            id=261,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt2'),
-        262:
-        dict(
-            name='vd_kpt7',
-            id=262,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt19'),
-        263:
-        dict(
-            name='vd_kpt8',
-            id=263,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt18'),
-        264:
-        dict(
-            name='vd_kpt9',
-            id=264,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt17'),
-        265:
-        dict(
-            name='vd_kpt10',
-            id=265,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt16'),
-        266:
-        dict(
-            name='vd_kpt11',
-            id=266,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt15'),
-        267:
-        dict(
-            name='vd_kpt12',
-            id=267,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt14'),
-        268:
-        dict(name='vd_kpt13', id=268, color=[128, 64, 255], type='', swap=''),
-        269:
-        dict(
-            name='vd_kpt14',
-            id=269,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt12'),
-        270:
-        dict(
-            name='vd_kpt15',
-            id=270,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt11'),
-        271:
-        dict(
-            name='vd_kpt16',
-            id=271,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt10'),
-        272:
-        dict(
-            name='vd_kpt17',
-            id=272,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt9'),
-        273:
-        dict(
-            name='vd_kpt18',
-            id=273,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt8'),
-        274:
-        dict(
-            name='vd_kpt19',
-            id=274,
-            color=[128, 64, 255],
-            type='',
-            swap='vd_kpt7'),
-        275:
-        dict(name='sd_kpt1', id=275, color=[128, 64, 0], type='', swap=''),
-        276:
-        dict(
-            name='sd_kpt2',
-            id=276,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt6'),
-        277:
-        dict(
-            name='sd_kpt3',
-            id=277,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt5'),
-        278:
-        dict(name='sd_kpt4', id=278, color=[128, 64, 0], type='', swap=''),
-        279:
-        dict(
-            name='sd_kpt5',
-            id=279,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt3'),
-        280:
-        dict(
-            name='sd_kpt6',
-            id=280,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt2'),
-        281:
-        dict(
-            name='sd_kpt7',
-            id=281,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt19'),
-        282:
-        dict(
-            name='sd_kpt8',
-            id=282,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt18'),
-        283:
-        dict(
-            name='sd_kpt9',
-            id=283,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt17'),
-        284:
-        dict(
-            name='sd_kpt10',
-            id=284,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt16'),
-        285:
-        dict(
-            name='sd_kpt11',
-            id=285,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt15'),
-        286:
-        dict(
-            name='sd_kpt12',
-            id=286,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt14'),
-        287:
-        dict(name='sd_kpt13', id=287, color=[128, 64, 0], type='', swap=''),
-        288:
-        dict(
-            name='sd_kpt14',
-            id=288,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt12'),
-        289:
-        dict(
-            name='sd_kpt15',
-            id=289,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt11'),
-        290:
-        dict(
-            name='sd_kpt16',
-            id=290,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt10'),
-        291:
-        dict(
-            name='sd_kpt17',
-            id=291,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt9'),
-        292:
-        dict(
-            name='sd_kpt18',
-            id=292,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt8'),
-        293:
-        dict(
-            name='sd_kpt19',
-            id=293,
-            color=[128, 64, 0],
-            type='',
-            swap='sd_kpt7')
-    }),
-    skeleton_info=dict({
-        0:
-        dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]),
-        1:
-        dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]),
-        2:
-        dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]),
-        3:
-        dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]),
-        4:
-        dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]),
-        5:
-        dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]),
-        6:
-        dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]),
-        7:
-        dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]),
-        8:
-        dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]),
-        9:
-        dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]),
-        10:
-        dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]),
-        11:
-        dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]),
-        12:
-        dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]),
-        13:
-        dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]),
-        14:
-        dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]),
-        15:
-        dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]),
-        16:
-        dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]),
-        17:
-        dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]),
-        18:
-        dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]),
-        19:
-        dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]),
-        20:
-        dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]),
-        21:
-        dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]),
-        22:
-        dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]),
-        23:
-        dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]),
-        24:
-        dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]),
-        25:
-        dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]),
-        26:
-        dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]),
-        27:
-        dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]),
-        28:
-        dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]),
-        29:
-        dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]),
-        30:
-        dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]),
-        31:
-        dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]),
-        32:
-        dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]),
-        33:
-        dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]),
-        34:
-        dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]),
-        35:
-        dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]),
-        36:
-        dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]),
-        37:
-        dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]),
-        38:
-        dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]),
-        39:
-        dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]),
-        40:
-        dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]),
-        41:
-        dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]),
-        42:
-        dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]),
-        43:
-        dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]),
-        44:
-        dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]),
-        45:
-        dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]),
-        46:
-        dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]),
-        47:
-        dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]),
-        48:
-        dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]),
-        49:
-        dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]),
-        50:
-        dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]),
-        51:
-        dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]),
-        52:
-        dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]),
-        53:
-        dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]),
-        54:
-        dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]),
-        55:
-        dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]),
-        56:
-        dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]),
-        57:
-        dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]),
-        58:
-        dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]),
-        59:
-        dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]),
-        60:
-        dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]),
-        61:
-        dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]),
-        62:
-        dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]),
-        63:
-        dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]),
-        64:
-        dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]),
-        65:
-        dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]),
-        66:
-        dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]),
-        67:
-        dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]),
-        68:
-        dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]),
-        69:
-        dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]),
-        70:
-        dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]),
-        71:
-        dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]),
-        72:
-        dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]),
-        73:
-        dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]),
-        74:
-        dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]),
-        75:
-        dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]),
-        76:
-        dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]),
-        77:
-        dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]),
-        78:
-        dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]),
-        79:
-        dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]),
-        80:
-        dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]),
-        81:
-        dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]),
-        82:
-        dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]),
-        83:
-        dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]),
-        84:
-        dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]),
-        85:
-        dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]),
-        86:
-        dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]),
-        87:
-        dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]),
-        88:
-        dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]),
-        89:
-        dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]),
-        90:
-        dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]),
-        91:
-        dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]),
-        92:
-        dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]),
-        93:
-        dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]),
-        94:
-        dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]),
-        95:
-        dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]),
-        96:
-        dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]),
-        97:
-        dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]),
-        98:
-        dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]),
-        99:
-        dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]),
-        100:
-        dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]),
-        101:
-        dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]),
-        102:
-        dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]),
-        103:
-        dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]),
-        104:
-        dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]),
-        105:
-        dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]),
-        106:
-        dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]),
-        107:
-        dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]),
-        108:
-        dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]),
-        109:
-        dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]),
-        110:
-        dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]),
-        111:
-        dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]),
-        112:
-        dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]),
-        113:
-        dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]),
-        114:
-        dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]),
-        115:
-        dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]),
-        116:
-        dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]),
-        117:
-        dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]),
-        118:
-        dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]),
-        119:
-        dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]),
-        120:
-        dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]),
-        121:
-        dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]),
-        122:
-        dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]),
-        123:
-        dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]),
-        124:
-        dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]),
-        125:
-        dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]),
-        126:
-        dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]),
-        127:
-        dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]),
-        128:
-        dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]),
-        129:
-        dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]),
-        130:
-        dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]),
-        131:
-        dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]),
-        132:
-        dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]),
-        133:
-        dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]),
-        134:
-        dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]),
-        135:
-        dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]),
-        136:
-        dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]),
-        137:
-        dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]),
-        138:
-        dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]),
-        139:
-        dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]),
-        140:
-        dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]),
-        141:
-        dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]),
-        142:
-        dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]),
-        143:
-        dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]),
-        144:
-        dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]),
-        145:
-        dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]),
-        146:
-        dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]),
-        147:
-        dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]),
-        148:
-        dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]),
-        149:
-        dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]),
-        150:
-        dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]),
-        151:
-        dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]),
-        152:
-        dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]),
-        153:
-        dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]),
-        154:
-        dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]),
-        155:
-        dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]),
-        156:
-        dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]),
-        157:
-        dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]),
-        158:
-        dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]),
-        159:
-        dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]),
-        160:
-        dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]),
-        161:
-        dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]),
-        162:
-        dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]),
-        163:
-        dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]),
-        164:
-        dict(
-            link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128,
-                                                                128]),
-        165:
-        dict(
-            link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128,
-                                                                128]),
-        166:
-        dict(
-            link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128,
-                                                                128]),
-        167:
-        dict(
-            link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128,
-                                                                128]),
-        168:
-        dict(
-            link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128,
-                                                                128]),
-        169:
-        dict(
-            link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128,
-                                                                128]),
-        170:
-        dict(
-            link=('shorts_kpt9', 'shorts_kpt10'),
-            id=170,
-            color=[128, 128, 128]),
-        171:
-        dict(
-            link=('shorts_kpt10', 'shorts_kpt3'),
-            id=171,
-            color=[128, 128, 128]),
-        172:
-        dict(
-            link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128,
-                                                                128]),
-        173:
-        dict(
-            link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128,
-                                                                128]),
-        174:
-        dict(
-            link=('trousers_kpt1', 'trousers_kpt4'),
-            id=174,
-            color=[128, 0, 128]),
-        175:
-        dict(
-            link=('trousers_kpt4', 'trousers_kpt5'),
-            id=175,
-            color=[128, 0, 128]),
-        176:
-        dict(
-            link=('trousers_kpt5', 'trousers_kpt6'),
-            id=176,
-            color=[128, 0, 128]),
-        177:
-        dict(
-            link=('trousers_kpt6', 'trousers_kpt7'),
-            id=177,
-            color=[128, 0, 128]),
-        178:
-        dict(
-            link=('trousers_kpt7', 'trousers_kpt8'),
-            id=178,
-            color=[128, 0, 128]),
-        179:
-        dict(
-            link=('trousers_kpt8', 'trousers_kpt9'),
-            id=179,
-            color=[128, 0, 128]),
-        180:
-        dict(
-            link=('trousers_kpt9', 'trousers_kpt10'),
-            id=180,
-            color=[128, 0, 128]),
-        181:
-        dict(
-            link=('trousers_kpt10', 'trousers_kpt11'),
-            id=181,
-            color=[128, 0, 128]),
-        182:
-        dict(
-            link=('trousers_kpt11', 'trousers_kpt12'),
-            id=182,
-            color=[128, 0, 128]),
-        183:
-        dict(
-            link=('trousers_kpt12', 'trousers_kpt13'),
-            id=183,
-            color=[128, 0, 128]),
-        184:
-        dict(
-            link=('trousers_kpt13', 'trousers_kpt14'),
-            id=184,
-            color=[128, 0, 128]),
-        185:
-        dict(
-            link=('trousers_kpt14', 'trousers_kpt3'),
-            id=185,
-            color=[128, 0, 128]),
-        186:
-        dict(
-            link=('trousers_kpt3', 'trousers_kpt2'),
-            id=186,
-            color=[128, 0, 128]),
-        187:
-        dict(
-            link=('trousers_kpt2', 'trousers_kpt1'),
-            id=187,
-            color=[128, 0, 128]),
-        188:
-        dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]),
-        189:
-        dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]),
-        190:
-        dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]),
-        191:
-        dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]),
-        192:
-        dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]),
-        193:
-        dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]),
-        194:
-        dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]),
-        195:
-        dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]),
-        196:
-        dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]),
-        197:
-        dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]),
-        198:
-        dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]),
-        199:
-        dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]),
-        200:
-        dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]),
-        201:
-        dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]),
-        202:
-        dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]),
-        203:
-        dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]),
-        204:
-        dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]),
-        205:
-        dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]),
-        206:
-        dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]),
-        207:
-        dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]),
-        208:
-        dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]),
-        209:
-        dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]),
-        210:
-        dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]),
-        211:
-        dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]),
-        212:
-        dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]),
-        213:
-        dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]),
-        214:
-        dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]),
-        215:
-        dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]),
-        216:
-        dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]),
-        217:
-        dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]),
-        218:
-        dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]),
-        219:
-        dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]),
-        220:
-        dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]),
-        221:
-        dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]),
-        222:
-        dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]),
-        223:
-        dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]),
-        224:
-        dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]),
-        225:
-        dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]),
-        226:
-        dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]),
-        227:
-        dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]),
-        228:
-        dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]),
-        229:
-        dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]),
-        230:
-        dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]),
-        231:
-        dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]),
-        232:
-        dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]),
-        233:
-        dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]),
-        234:
-        dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]),
-        235:
-        dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]),
-        236:
-        dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]),
-        237:
-        dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]),
-        238:
-        dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]),
-        239:
-        dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]),
-        240:
-        dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]),
-        241:
-        dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]),
-        242:
-        dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]),
-        243:
-        dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]),
-        244:
-        dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]),
-        245:
-        dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]),
-        246:
-        dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]),
-        247:
-        dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]),
-        248:
-        dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]),
-        249:
-        dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]),
-        250:
-        dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]),
-        251:
-        dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]),
-        252:
-        dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]),
-        253:
-        dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]),
-        254:
-        dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]),
-        255:
-        dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]),
-        256:
-        dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]),
-        257:
-        dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]),
-        258:
-        dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]),
-        259:
-        dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]),
-        260:
-        dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]),
-        261:
-        dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]),
-        262:
-        dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]),
-        263:
-        dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]),
-        264:
-        dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]),
-        265:
-        dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]),
-        266:
-        dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]),
-        267:
-        dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]),
-        268:
-        dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]),
-        269:
-        dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]),
-        270:
-        dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]),
-        271:
-        dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]),
-        272:
-        dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]),
-        273:
-        dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]),
-        274:
-        dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]),
-        275:
-        dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]),
-        276:
-        dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]),
-        277:
-        dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]),
-        278:
-        dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]),
-        279:
-        dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]),
-        280:
-        dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]),
-        281:
-        dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]),
-        282:
-        dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]),
-        283:
-        dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]),
-        284:
-        dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]),
-        285:
-        dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]),
-        286:
-        dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]),
-        287:
-        dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]),
-        288:
-        dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]),
-        289:
-        dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]),
-        290:
-        dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]),
-        291:
-        dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]),
-        292:
-        dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]),
-        293:
-        dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]),
-        294:
-        dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]),
-        295:
-        dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]),
-        296:
-        dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]),
-        297:
-        dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]),
-        298:
-        dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]),
-        299:
-        dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]),
-        300:
-        dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]),
-        301:
-        dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]),
-        302:
-        dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]),
-        303:
-        dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0])
-    }),
-    joint_weights=[
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
-        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
-    ],
-    sigmas=[])
-param_scheduler = [
-    dict(
-        type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False),
-    dict(
-        type='MultiStepLR',
-        begin=0,
-        end=210,
-        milestones=[100, 160],
-        gamma=0.1,
-        by_epoch=True)
-]
-optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))
-auto_scale_lr = dict(base_batch_size=512)
-dataset_type = 'DeepFashion2Dataset'
-data_mode = 'topdown'
-data_root = 'data/deepfashion2/'
-codec = dict(
-    type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
-train_pipeline = [
-    dict(type='LoadImage'),
-    dict(type='GetBBoxCenterScale'),
-    dict(type='RandomFlip', direction='horizontal'),
-    dict(
-        type='RandomBBoxTransform',
-        shift_prob=0,
-        rotate_factor=60,
-        scale_factor=(0.75, 1.25)),
-    dict(type='TopdownAffine', input_size=(192, 256)),
-    dict(
-        type='GenerateTarget',
-        encoder=dict(
-            type='MSRAHeatmap',
-            input_size=(192, 256),
-            heatmap_size=(48, 64),
-            sigma=2)),
-    dict(type='PackPoseInputs')
-]
-val_pipeline = [
-    dict(type='LoadImage', backend_args=dict(backend='local')),
-    dict(type='GetBBoxCenterScale'),
-    dict(type='TopdownAffine', input_size=(192, 256)),
-    dict(type='PackPoseInputs')
-]
-train_dataloader = dict(
-    batch_size=64,
-    num_workers=6,
-    persistent_workers=True,
-    sampler=dict(type='DefaultSampler', shuffle=True),
-    dataset=dict(
-        type='DeepFashion2Dataset',
-        data_root='data/deepfashion2/',
-        data_mode='topdown',
-        ann_file='train/deepfashion2_sling_dress.json',
-        data_prefix=dict(img='train/image/'),
-        pipeline=[
-            dict(type='LoadImage'),
-            dict(type='GetBBoxCenterScale'),
-            dict(type='RandomFlip', direction='horizontal'),
-            dict(
-                type='RandomBBoxTransform',
-                shift_prob=0,
-                rotate_factor=60,
-                scale_factor=(0.75, 1.25)),
-            dict(type='TopdownAffine', input_size=(192, 256)),
-            dict(
-                type='GenerateTarget',
-                encoder=dict(
-                    type='MSRAHeatmap',
-                    input_size=(192, 256),
-                    heatmap_size=(48, 64),
-                    sigma=2)),
-            dict(type='PackPoseInputs')
-        ]))
-val_dataloader = dict(
-    batch_size=32,
-    num_workers=6,
-    persistent_workers=True,
-    drop_last=False,
-    sampler=dict(type='DefaultSampler', shuffle=False),
-    dataset=dict(
-        type='DeepFashion2Dataset',
-        data_root='data/deepfashion2/',
-        data_mode='topdown',
-        ann_file='validation/deepfashion2_sling_dress.json',
-        data_prefix=dict(img='validation/image/'),
-        test_mode=True,
-        pipeline=[
-            dict(type='LoadImage', backend_args=dict(backend='local')),
-            dict(type='GetBBoxCenterScale'),
-            dict(type='TopdownAffine', input_size=(192, 256)),
-            dict(type='PackPoseInputs')
-        ]))
-test_dataloader = dict(
-    batch_size=32,
-    num_workers=6,
-    persistent_workers=True,
-    drop_last=False,
-    sampler=dict(type='DefaultSampler', shuffle=False),
-    dataset=dict(
-        type='DeepFashion2Dataset',
-        data_root='data/deepfashion2/',
-        data_mode='topdown',
-        ann_file='validation/deepfashion2_sling_dress.json',
-        data_prefix=dict(img='validation/image/'),
-        test_mode=True,
-        pipeline=[
-            dict(type='LoadImage', backend_args=dict(backend='local')),
-            dict(type='GetBBoxCenterScale'),
-            dict(type='TopdownAffine', input_size=(192, 256)),
-            dict(type='PackPoseInputs')
-        ]))
-channel_cfg = dict(
-    num_output_channels=294,
-    dataset_joints=294,
-    dataset_channel=[[
-        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
-        20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
-        38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
-        56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
-        74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
-        92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
-        108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
-        122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
-        136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
-        150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
-        164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
-        178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
-        192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
-        206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
-        220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
-        234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
-        248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
-        262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
-        276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
-        290, 291, 292, 293
-    ]],
-    inference_channel=[
-        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
-        20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
-        38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
-        56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
-        74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
-        92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
-        108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
-        122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
-        136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
-        150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
-        164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
-        178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
-        192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
-        206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
-        220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
-        234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
-        248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
-        262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
-        276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
-        290, 291, 292, 293
-    ])
-model = dict(
-    type='TopdownPoseEstimator',
-    data_preprocessor=dict(
-        type='PoseDataPreprocessor',
-        mean=[123.675, 116.28, 103.53],
-        std=[58.395, 57.12, 57.375],
-        bgr_to_rgb=True),
-    backbone=dict(
-        type='ResNet',
-        depth=50,
-        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
-    head=dict(
-        type='HeatmapHead',
-        in_channels=2048,
-        out_channels=294,
-        loss=dict(type='KeypointMSELoss', use_target_weight=True),
-        decoder=dict(
-            type='MSRAHeatmap',
-            input_size=(192, 256),
-            heatmap_size=(48, 64),
-            sigma=2)),
-    test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))
-val_evaluator = [
-    dict(type='PCKAccuracy', thr=0.2),
-    dict(type='AUC'),
-    dict(type='EPE')
-]
-test_evaluator = [
-    dict(type='PCKAccuracy', thr=0.2),
-    dict(type='AUC'),
-    dict(type='EPE')
-]
-launcher = 'pytorch'
-work_dir = './work_dirs/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192'
diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/search/[id]/$types.d.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/search/[id]/$types.d.ts
deleted file mode 100644
index 2070a00e82345d9d69a8eef3b0529d9ace1adaea..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/search/[id]/$types.d.ts
+++ /dev/null
@@ -1,9 +0,0 @@
-import type * as Kit from '@sveltejs/kit';
-
-type Expand<T> = T extends infer O ? { [K in keyof O]: O[K] } : never;
-type RouteParams = { id: string }
-type RouteId = '/search/[id]';
-
-export type EntryGenerator = () => Promise<Array<RouteParams>> | Array<RouteParams>;
-export type RequestHandler = Kit.RequestHandler<RouteParams, RouteId>;
-export type RequestEvent = Kit.RequestEvent<RouteParams, RouteId>;
\ No newline at end of file
diff --git a/spaces/AkshayKumarP/AI-ChatBot/app.py b/spaces/AkshayKumarP/AI-ChatBot/app.py
deleted file mode 100644
index 4124a0cbb2350165a9117e156f6e6baf19afe998..0000000000000000000000000000000000000000
--- a/spaces/AkshayKumarP/AI-ChatBot/app.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from transformers import AutoModelForCausalLM, AutoTokenizer
-import gradio as gr
-import torch
-
-
-title = "????AI ChatBot"
-description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
-examples = [["How are you?"]]
-
-
-tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
-model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
-
-
-def predict(input, history=[]):
-    # tokenize the new input sentence
-    new_user_input_ids = tokenizer.encode(
-        input + tokenizer.eos_token, return_tensors="pt"
-    )
-
-    # append the new user input tokens to the chat history
-    bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
-
-    # generate a response
-    history = model.generate(
-        bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
-    ).tolist()
-
-    # convert the tokens to text, and then split the responses into lines
-    response = tokenizer.decode(history[0]).split("<|endoftext|>")
-    # print('decoded_response-->>'+str(response))
-    response = [
-        (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
-    ]  # convert to tuples of list
-    # print('response-->>'+str(response))
-    return response, history
-
-
-gr.Interface(
-    fn=predict,
-    title=title,
-    description=description,
-    examples=examples,
-    inputs=["text", "state"],
-    outputs=["chatbot", "state"],
-    theme="finlaymacklon/boxy_violet",
-).launch()
\ No newline at end of file
diff --git a/spaces/AnandSoni2001/StockMarketPrediction/README.md b/spaces/AnandSoni2001/StockMarketPrediction/README.md
deleted file mode 100644
index 23e5954d3f60b557c74d411836b14d682a3b3f14..0000000000000000000000000000000000000000
--- a/spaces/AnandSoni2001/StockMarketPrediction/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: StockMarketPrediction
-emoji: 👀
-colorFrom: green
-colorTo: gray
-sdk: streamlit
-sdk_version: 1.21.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Andy1621/uniformer_image_detection/tools/deployment/mmdet2torchserve.py b/spaces/Andy1621/uniformer_image_detection/tools/deployment/mmdet2torchserve.py
deleted file mode 100644
index 2d6f25c41d279ffcc1f4f2182b1559f41042e716..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/tools/deployment/mmdet2torchserve.py
+++ /dev/null
@@ -1,107 +0,0 @@
-from argparse import ArgumentParser, Namespace
-from pathlib import Path
-from tempfile import TemporaryDirectory
-
-import mmcv
-
-try:
-    from model_archiver.model_packaging import package_model
-    from model_archiver.model_packaging_utils import ModelExportUtils
-except ImportError:
-    package_model = None
-
-
-def mmdet2torchserve(
-    config_file: str,
-    checkpoint_file: str,
-    output_folder: str,
-    model_name: str,
-    model_version: str = '1.0',
-    force: bool = False,
-):
-    """Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
-
-    Args:
-        config_file:
-            In MMDetection config format.
-            The contents vary for each task repository.
-        checkpoint_file:
-            In MMDetection checkpoint format.
-            The contents vary for each task repository.
-        output_folder:
-            Folder where `{model_name}.mar` will be created.
-            The file created will be in TorchServe archive format.
-        model_name:
-            If not None, used for naming the `{model_name}.mar` file
-            that will be created under `output_folder`.
-            If None, `{Path(checkpoint_file).stem}` will be used.
-        model_version:
-            Model's version.
-        force:
-            If True, if there is an existing `{model_name}.mar`
-            file under `output_folder` it will be overwritten.
-    """
-    config = mmcv.Config.fromfile(config_file)
-
-    with TemporaryDirectory() as tmpdir:
-        config.dump(f'{tmpdir}/config.py')
-
-        args = Namespace(
-            **{
-                'model_file': f'{tmpdir}/config.py',
-                'serialized_file': checkpoint_file,
-                'handler': f'{Path(__file__).parent}/mmdet_handler.py',
-                'model_name': model_name or Path(checkpoint_file).stem,
-                'version': model_version,
-                'export_path': output_folder,
-                'force': force,
-                'requirements_file': None,
-                'extra_files': None,
-                'runtime': 'python',
-                'archive_format': 'default'
-            })
-        manifest = ModelExportUtils.generate_manifest_json(args)
-        package_model(args, manifest)
-
-
-def parse_args():
-    parser = ArgumentParser(
-        description='Convert MMDetection models to TorchServe `.mar` format.')
-    parser.add_argument('config', type=str, help='config file path')
-    parser.add_argument('checkpoint', type=str, help='checkpoint file path')
-    parser.add_argument(
-        '--output-folder',
-        type=str,
-        required=True,
-        help='Folder where `{model_name}.mar` will be created.')
-    parser.add_argument(
-        '--model-name',
-        type=str,
-        default=None,
-        help='If not None, used for naming the `{model_name}.mar`'
-        'file that will be created under `output_folder`.'
-        'If None, `{Path(checkpoint_file).stem}` will be used.')
-    parser.add_argument(
-        '--model-version',
-        type=str,
-        default='1.0',
-        help='Number used for versioning.')
-    parser.add_argument(
-        '-f',
-        '--force',
-        action='store_true',
-        help='overwrite the existing `{model_name}.mar`')
-    args = parser.parse_args()
-
-    return args
-
-
-if __name__ == '__main__':
-    args = parse_args()
-
-    if package_model is None:
-        raise ImportError('`torch-model-archiver` is required.'
-                          'Try: pip install torch-model-archiver')
-
-    mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
-                     args.model_name, args.model_version, args.force)
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes.py
deleted file mode 100644
index 29a9f98a93fedbf9644599203b48aa30a7ad8a28..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './fcn_d6_r50-d16_769x769_40k_cityscapes.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/ema.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/ema.py
deleted file mode 100644
index 15c7e68088f019802a59e7ae41cc1fe0c7f28f96..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/ema.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from ...parallel import is_module_wrapper
-from ..hooks.hook import HOOKS, Hook
-
-
-@HOOKS.register_module()
-class EMAHook(Hook):
-    r"""Exponential Moving Average Hook.
-
-    Use Exponential Moving Average on all parameters of model in training
-    process. All parameters have a ema backup, which update by the formula
-    as below. EMAHook takes priority over EvalHook and CheckpointSaverHook.
-
-        .. math::
-
-            \text{Xema\_{t+1}} = (1 - \text{momentum}) \times
-            \text{Xema\_{t}} +  \text{momentum} \times X_t
-
-    Args:
-        momentum (float): The momentum used for updating ema parameter.
-            Defaults to 0.0002.
-        interval (int): Update ema parameter every interval iteration.
-            Defaults to 1.
-        warm_up (int): During first warm_up steps, we may use smaller momentum
-            to update ema parameters more slowly. Defaults to 100.
-        resume_from (str): The checkpoint path. Defaults to None.
-    """
-
-    def __init__(self,
-                 momentum=0.0002,
-                 interval=1,
-                 warm_up=100,
-                 resume_from=None):
-        assert isinstance(interval, int) and interval > 0
-        self.warm_up = warm_up
-        self.interval = interval
-        assert momentum > 0 and momentum < 1
-        self.momentum = momentum**interval
-        self.checkpoint = resume_from
-
-    def before_run(self, runner):
-        """To resume model with it's ema parameters more friendly.
-
-        Register ema parameter as ``named_buffer`` to model
-        """
-        model = runner.model
-        if is_module_wrapper(model):
-            model = model.module
-        self.param_ema_buffer = {}
-        self.model_parameters = dict(model.named_parameters(recurse=True))
-        for name, value in self.model_parameters.items():
-            # "." is not allowed in module's buffer name
-            buffer_name = f"ema_{name.replace('.', '_')}"
-            self.param_ema_buffer[name] = buffer_name
-            model.register_buffer(buffer_name, value.data.clone())
-        self.model_buffers = dict(model.named_buffers(recurse=True))
-        if self.checkpoint is not None:
-            runner.resume(self.checkpoint)
-
-    def after_train_iter(self, runner):
-        """Update ema parameter every self.interval iterations."""
-        curr_step = runner.iter
-        # We warm up the momentum considering the instability at beginning
-        momentum = min(self.momentum,
-                       (1 + curr_step) / (self.warm_up + curr_step))
-        if curr_step % self.interval != 0:
-            return
-        for name, parameter in self.model_parameters.items():
-            buffer_name = self.param_ema_buffer[name]
-            buffer_parameter = self.model_buffers[buffer_name]
-            buffer_parameter.mul_(1 - momentum).add_(momentum, parameter.data)
-
-    def after_train_epoch(self, runner):
-        """We load parameter values from ema backup to model before the
-        EvalHook."""
-        self._swap_ema_parameters()
-
-    def before_train_epoch(self, runner):
-        """We recover model's parameter from ema backup after last epoch's
-        EvalHook."""
-        self._swap_ema_parameters()
-
-    def _swap_ema_parameters(self):
-        """Swap the parameter of model with parameter in ema_buffer."""
-        for name, value in self.model_parameters.items():
-            temp = value.data.clone()
-            ema_buffer = self.model_buffers[self.param_ema_buffer[name]]
-            value.data.copy_(ema_buffer.data)
-            ema_buffer.data.copy_(temp)
diff --git a/spaces/Anustup/NS_AI_LABS/src/download.py b/spaces/Anustup/NS_AI_LABS/src/download.py
deleted file mode 100644
index e723e430f0e0f35b0fb9db515420b1fe10961484..0000000000000000000000000000000000000000
--- a/spaces/Anustup/NS_AI_LABS/src/download.py
+++ /dev/null
@@ -1,72 +0,0 @@
-from tempfile import mkdtemp
-from typing import List
-from yt_dlp import YoutubeDL
-
-import yt_dlp
-from yt_dlp.postprocessor import PostProcessor
-
-class FilenameCollectorPP(PostProcessor):
-    def __init__(self):
-        super(FilenameCollectorPP, self).__init__(None)
-        self.filenames = []
-
-    def run(self, information):
-        self.filenames.append(information["filepath"])
-        return [], information
-
-def download_url(url: str, maxDuration: int = None, destinationDirectory: str = None, playlistItems: str = "1") -> List[str]: 
-    try:
-        return _perform_download(url, maxDuration=maxDuration, outputTemplate=None, destinationDirectory=destinationDirectory, playlistItems=playlistItems)
-    except yt_dlp.utils.DownloadError as e:
-        # In case of an OS error, try again with a different output template
-        if e.msg and e.msg.find("[Errno 36] File name too long") >= 0:
-            return _perform_download(url, maxDuration=maxDuration, outputTemplate="%(title).10s %(id)s.%(ext)s")
-        pass
-
-def _perform_download(url: str, maxDuration: int = None, outputTemplate: str = None, destinationDirectory: str = None, playlistItems: str = "1"):
-    # Create a temporary directory to store the downloaded files
-    if destinationDirectory is None:
-        destinationDirectory = mkdtemp()
-
-    ydl_opts = {
-        "format": "bestaudio/best",
-        'paths': {
-            'home': destinationDirectory
-        }
-    }
-    if (playlistItems):
-        ydl_opts['playlist_items'] = playlistItems
-
-    # Add output template if specified
-    if outputTemplate:
-        ydl_opts['outtmpl'] = outputTemplate
-
-    filename_collector = FilenameCollectorPP()
-
-    with YoutubeDL(ydl_opts) as ydl:
-        if maxDuration and maxDuration > 0:
-            info = ydl.extract_info(url, download=False)
-            duration = info['duration']
-
-            if duration >= maxDuration:
-                raise ExceededMaximumDuration(videoDuration=duration, maxDuration=maxDuration, message="Video is too long")
-
-        ydl.add_post_processor(filename_collector)
-        ydl.download([url])
-
-    if len(filename_collector.filenames) <= 0:
-        raise Exception("Cannot download " + url)
-
-    result = []
-
-    for filename in filename_collector.filenames:
-        result.append(filename)
-        print("Downloaded " + filename)
-
-    return result 
-
-class ExceededMaximumDuration(Exception):
-    def __init__(self, videoDuration, maxDuration, message):
-        self.videoDuration = videoDuration
-        self.maxDuration = maxDuration
-        super().__init__(message)
\ No newline at end of file
diff --git a/spaces/ArkanDash/rvc-models-new/lib/infer_pack/transforms.py b/spaces/ArkanDash/rvc-models-new/lib/infer_pack/transforms.py
deleted file mode 100644
index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000
--- a/spaces/ArkanDash/rvc-models-new/lib/infer_pack/transforms.py
+++ /dev/null
@@ -1,209 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import numpy as np
-
-
-DEFAULT_MIN_BIN_WIDTH = 1e-3
-DEFAULT_MIN_BIN_HEIGHT = 1e-3
-DEFAULT_MIN_DERIVATIVE = 1e-3
-
-
-def piecewise_rational_quadratic_transform(
-    inputs,
-    unnormalized_widths,
-    unnormalized_heights,
-    unnormalized_derivatives,
-    inverse=False,
-    tails=None,
-    tail_bound=1.0,
-    min_bin_width=DEFAULT_MIN_BIN_WIDTH,
-    min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
-    min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
-    if tails is None:
-        spline_fn = rational_quadratic_spline
-        spline_kwargs = {}
-    else:
-        spline_fn = unconstrained_rational_quadratic_spline
-        spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
-
-    outputs, logabsdet = spline_fn(
-        inputs=inputs,
-        unnormalized_widths=unnormalized_widths,
-        unnormalized_heights=unnormalized_heights,
-        unnormalized_derivatives=unnormalized_derivatives,
-        inverse=inverse,
-        min_bin_width=min_bin_width,
-        min_bin_height=min_bin_height,
-        min_derivative=min_derivative,
-        **spline_kwargs
-    )
-    return outputs, logabsdet
-
-
-def searchsorted(bin_locations, inputs, eps=1e-6):
-    bin_locations[..., -1] += eps
-    return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
-
-
-def unconstrained_rational_quadratic_spline(
-    inputs,
-    unnormalized_widths,
-    unnormalized_heights,
-    unnormalized_derivatives,
-    inverse=False,
-    tails="linear",
-    tail_bound=1.0,
-    min_bin_width=DEFAULT_MIN_BIN_WIDTH,
-    min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
-    min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
-    inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
-    outside_interval_mask = ~inside_interval_mask
-
-    outputs = torch.zeros_like(inputs)
-    logabsdet = torch.zeros_like(inputs)
-
-    if tails == "linear":
-        unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
-        constant = np.log(np.exp(1 - min_derivative) - 1)
-        unnormalized_derivatives[..., 0] = constant
-        unnormalized_derivatives[..., -1] = constant
-
-        outputs[outside_interval_mask] = inputs[outside_interval_mask]
-        logabsdet[outside_interval_mask] = 0
-    else:
-        raise RuntimeError("{} tails are not implemented.".format(tails))
-
-    (
-        outputs[inside_interval_mask],
-        logabsdet[inside_interval_mask],
-    ) = rational_quadratic_spline(
-        inputs=inputs[inside_interval_mask],
-        unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
-        unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
-        unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
-        inverse=inverse,
-        left=-tail_bound,
-        right=tail_bound,
-        bottom=-tail_bound,
-        top=tail_bound,
-        min_bin_width=min_bin_width,
-        min_bin_height=min_bin_height,
-        min_derivative=min_derivative,
-    )
-
-    return outputs, logabsdet
-
-
-def rational_quadratic_spline(
-    inputs,
-    unnormalized_widths,
-    unnormalized_heights,
-    unnormalized_derivatives,
-    inverse=False,
-    left=0.0,
-    right=1.0,
-    bottom=0.0,
-    top=1.0,
-    min_bin_width=DEFAULT_MIN_BIN_WIDTH,
-    min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
-    min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
-    if torch.min(inputs) < left or torch.max(inputs) > right:
-        raise ValueError("Input to a transform is not within its domain")
-
-    num_bins = unnormalized_widths.shape[-1]
-
-    if min_bin_width * num_bins > 1.0:
-        raise ValueError("Minimal bin width too large for the number of bins")
-    if min_bin_height * num_bins > 1.0:
-        raise ValueError("Minimal bin height too large for the number of bins")
-
-    widths = F.softmax(unnormalized_widths, dim=-1)
-    widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
-    cumwidths = torch.cumsum(widths, dim=-1)
-    cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
-    cumwidths = (right - left) * cumwidths + left
-    cumwidths[..., 0] = left
-    cumwidths[..., -1] = right
-    widths = cumwidths[..., 1:] - cumwidths[..., :-1]
-
-    derivatives = min_derivative + F.softplus(unnormalized_derivatives)
-
-    heights = F.softmax(unnormalized_heights, dim=-1)
-    heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
-    cumheights = torch.cumsum(heights, dim=-1)
-    cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
-    cumheights = (top - bottom) * cumheights + bottom
-    cumheights[..., 0] = bottom
-    cumheights[..., -1] = top
-    heights = cumheights[..., 1:] - cumheights[..., :-1]
-
-    if inverse:
-        bin_idx = searchsorted(cumheights, inputs)[..., None]
-    else:
-        bin_idx = searchsorted(cumwidths, inputs)[..., None]
-
-    input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
-    input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
-
-    input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
-    delta = heights / widths
-    input_delta = delta.gather(-1, bin_idx)[..., 0]
-
-    input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
-    input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
-
-    input_heights = heights.gather(-1, bin_idx)[..., 0]
-
-    if inverse:
-        a = (inputs - input_cumheights) * (
-            input_derivatives + input_derivatives_plus_one - 2 * input_delta
-        ) + input_heights * (input_delta - input_derivatives)
-        b = input_heights * input_derivatives - (inputs - input_cumheights) * (
-            input_derivatives + input_derivatives_plus_one - 2 * input_delta
-        )
-        c = -input_delta * (inputs - input_cumheights)
-
-        discriminant = b.pow(2) - 4 * a * c
-        assert (discriminant >= 0).all()
-
-        root = (2 * c) / (-b - torch.sqrt(discriminant))
-        outputs = root * input_bin_widths + input_cumwidths
-
-        theta_one_minus_theta = root * (1 - root)
-        denominator = input_delta + (
-            (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
-            * theta_one_minus_theta
-        )
-        derivative_numerator = input_delta.pow(2) * (
-            input_derivatives_plus_one * root.pow(2)
-            + 2 * input_delta * theta_one_minus_theta
-            + input_derivatives * (1 - root).pow(2)
-        )
-        logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
-        return outputs, -logabsdet
-    else:
-        theta = (inputs - input_cumwidths) / input_bin_widths
-        theta_one_minus_theta = theta * (1 - theta)
-
-        numerator = input_heights * (
-            input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
-        )
-        denominator = input_delta + (
-            (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
-            * theta_one_minus_theta
-        )
-        outputs = input_cumheights + numerator / denominator
-
-        derivative_numerator = input_delta.pow(2) * (
-            input_derivatives_plus_one * theta.pow(2)
-            + 2 * input_delta * theta_one_minus_theta
-            + input_derivatives * (1 - theta).pow(2)
-        )
-        logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
-        return outputs, logabsdet
diff --git a/spaces/AutoGeneralAI/ChatGPT/README_cn.md b/spaces/AutoGeneralAI/ChatGPT/README_cn.md
deleted file mode 100644
index 4c6f8c8051b2efce6630cfb204e13a0f9ac88e02..0000000000000000000000000000000000000000
--- a/spaces/AutoGeneralAI/ChatGPT/README_cn.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# ChatGPT
-
-GUI for ChatGPT API在线体验网址 https://huggingface.co/spaces/AutoGeneralAI/ChatGPT
-
-## 使用方法
-将自己的OpenAI API KEY https://platform.openai.com/
-放入key输入框,然后就可以愉快的使用ChatGPT,愉快的对话了。
-
-本项目不是web版的ChatGPT,而是调用其官方API的GUI for ChatGPT API。
-
-![image](https://user-images.githubusercontent.com/130114082/233837074-a260c2ff-4d93-4efd-ad3f-b97da7db82e7.png)
-
diff --git a/spaces/Bart92/RVC_HF/infer/modules/train/extract_feature_print.py b/spaces/Bart92/RVC_HF/infer/modules/train/extract_feature_print.py
deleted file mode 100644
index f771dd9b8ba92262e6844e7b5781de43c342833a..0000000000000000000000000000000000000000
--- a/spaces/Bart92/RVC_HF/infer/modules/train/extract_feature_print.py
+++ /dev/null
@@ -1,137 +0,0 @@
-import os
-import sys
-import traceback
-
-os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
-os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0"
-
-device = sys.argv[1]
-n_part = int(sys.argv[2])
-i_part = int(sys.argv[3])
-if len(sys.argv) == 6:
-    exp_dir = sys.argv[4]
-    version = sys.argv[5]
-else:
-    i_gpu = sys.argv[4]
-    exp_dir = sys.argv[5]
-    os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu)
-    version = sys.argv[6]
-import fairseq
-import numpy as np
-import soundfile as sf
-import torch
-import torch.nn.functional as F
-
-if "privateuseone" not in device:
-    device = "cpu"
-    if torch.cuda.is_available():
-        device = "cuda"
-    elif torch.backends.mps.is_available():
-        device = "mps"
-else:
-    import torch_directml
-
-    device = torch_directml.device(torch_directml.default_device())
-
-    def forward_dml(ctx, x, scale):
-        ctx.scale = scale
-        res = x.clone().detach()
-        return res
-
-    fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml
-
-f = open("%s/extract_f0_feature.log" % exp_dir, "a+")
-
-
-def printt(strr):
-    print(strr)
-    f.write("%s\n" % strr)
-    f.flush()
-
-
-printt(sys.argv)
-model_path = "assets/hubert/hubert_base.pt"
-
-printt(exp_dir)
-wavPath = "%s/1_16k_wavs" % exp_dir
-outPath = (
-    "%s/3_feature256" % exp_dir if version == "v1" else "%s/3_feature768" % exp_dir
-)
-os.makedirs(outPath, exist_ok=True)
-
-
-# wave must be 16k, hop_size=320
-def readwave(wav_path, normalize=False):
-    wav, sr = sf.read(wav_path)
-    assert sr == 16000
-    feats = torch.from_numpy(wav).float()
-    if feats.dim() == 2:  # double channels
-        feats = feats.mean(-1)
-    assert feats.dim() == 1, feats.dim()
-    if normalize:
-        with torch.no_grad():
-            feats = F.layer_norm(feats, feats.shape)
-    feats = feats.view(1, -1)
-    return feats
-
-
-# HuBERT model
-printt("load model(s) from {}".format(model_path))
-# if hubert model is exist
-if os.access(model_path, os.F_OK) == False:
-    printt(
-        "Error: Extracting is shut down because %s does not exist, you may download it from https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main"
-        % model_path
-    )
-    exit(0)
-models, saved_cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
-    [model_path],
-    suffix="",
-)
-model = models[0]
-model = model.to(device)
-printt("move model to %s" % device)
-if device not in ["mps", "cpu"]:
-    model = model.half()
-model.eval()
-
-todo = sorted(list(os.listdir(wavPath)))[i_part::n_part]
-n = max(1, len(todo) // 10)  # 最多打印十条
-if len(todo) == 0:
-    printt("no-feature-todo")
-else:
-    printt("all-feature-%s" % len(todo))
-    for idx, file in enumerate(todo):
-        try:
-            if file.endswith(".wav"):
-                wav_path = "%s/%s" % (wavPath, file)
-                out_path = "%s/%s" % (outPath, file.replace("wav", "npy"))
-
-                if os.path.exists(out_path):
-                    continue
-
-                feats = readwave(wav_path, normalize=saved_cfg.task.normalize)
-                padding_mask = torch.BoolTensor(feats.shape).fill_(False)
-                inputs = {
-                    "source": feats.half().to(device)
-                    if device not in ["mps", "cpu"]
-                    else feats.to(device),
-                    "padding_mask": padding_mask.to(device),
-                    "output_layer": 9 if version == "v1" else 12,  # layer 9
-                }
-                with torch.no_grad():
-                    logits = model.extract_features(**inputs)
-                    feats = (
-                        model.final_proj(logits[0]) if version == "v1" else logits[0]
-                    )
-
-                feats = feats.squeeze(0).float().cpu().numpy()
-                if np.isnan(feats).sum() == 0:
-                    np.save(out_path, feats, allow_pickle=False)
-                else:
-                    printt("%s-contains nan" % file)
-                if idx % n == 0:
-                    printt("now-%s,all-%s,%s,%s" % (len(todo), idx, file, feats.shape))
-        except:
-            printt(traceback.format_exc())
-    printt("all-feature-done")
diff --git a/spaces/Benson/text-generation/Examples/Como Hacer Una Hoja De Papel.md b/spaces/Benson/text-generation/Examples/Como Hacer Una Hoja De Papel.md
deleted file mode 100644
index 3313e74580aa6edeb92498dfbe2fc1578c3ff21d..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Como Hacer Una Hoja De Papel.md	
+++ /dev/null
@@ -1,83 +0,0 @@
-
-<h1>Cómo descargar en HBO Go: Una guía completa</h1>
-<p>Si eres un fan de los programas y películas de HBO, es posible que quieras descargarlos en tu teléfono o tableta y verlos sin conexión. De esta manera, podrás disfrutar de tu contenido favorito sin preocuparte por la conexión a Internet o el uso de datos. Pero ¿cómo descargarlo en HBO Go? ¿Y cuáles son las diferencias entre HBO Go, HBO Max y HBO Now? En este artículo, responderemos estas preguntas y más. También te daremos algunos consejos y trucos para descargar en HBO Go.</p>
- <h2>¿Qué es HBO Go y cómo funciona? </h2>
-<p>HBO Go es un servicio de streaming que te permite ver toda la programación de HBO en tus dispositivos. Incluye series originales, películas, documentales, especiales y más. También puede acceder a algunos contenidos de otras propiedades de WarnerMedia, como DC, Cartoon Network y Turner Classic Movies.</p>
-<h2>como hacer una hoja de papel</h2><br /><p><b><b>Download</b> --->>> <a href="https://bltlly.com/2v6Mdv">https://bltlly.com/2v6Mdv</a></b></p><br /><br />
-<p>Para usar HBO Go, necesita tener una suscripción a HBO a través de su proveedor de TV. A continuación, puede iniciar sesión en la aplicación o sitio web con los detalles de su cuenta de proveedor de TV. También puede usar su cuenta para acceder a HBO Max, que es un servicio mejorado que ofrece más contenido y características. </p>
- <h3>HBO Go vs HBO Max vs HBO Ahora</h3>
-<p>HBO Go no es el único servicio de transmisión de HBO. También hay HBO Max y HBO Now. Aquí están las principales diferencias entre ellos:</p>
-<ul>
-<li>HBO Max es un servicio independiente que cuesta $15 al mes. Incluye todo el contenido de HBO, además de originales exclusivos y una selección más amplia de películas y programas de otras marcas WarnerMedia. También ofrece streaming 4K, visualización offline, perfiles, controles parentales y más. </li>
-<li>HBO Now es un servicio heredado que fue renombrado como "HBO". Cuesta $15 al mes y ofrece el mismo contenido que HBO Go, pero sin requerir una suscripción a un proveedor de TV. Sin embargo, no incluye ninguno de los contenidos adicionales o características de HBO Max.</li>
-
-</ul>
-<p>En la mayoría de los casos, si tiene una suscripción a HBO a través de su proveedor de TV, puede acceder a HBO Max sin costo adicional. Puede consultar el sitio web <a href="( 1 )">HBO Max</a> para ver si su suscripción es elegible. </p>
- <h3>Dispositivos compatibles con HBO Go</h3>
-<p>HBO Go es compatible con múltiples dispositivos. Los dispositivos compatibles incluyen:</p>
-<ul>
-<li>Teléfonos móviles y tabletas (Android e iOS)</li>
-<li>Apple TV</li>
-<li>Google Chromecast</li>
-<li>Amazon Fire TV</li>
-<li>Samsung Smart TV</li>
-<li>PlayStation</li>
-<li>Xbox</li>
-<li>Android TV</li>
-<li>Roku</li>
-<li>TiVo</li>
-</ul>
-<p>Para usar HBO Vaya a su dispositivo, necesita descargar la aplicación desde la tienda de aplicaciones o visitar <a href="( 2 )">HBOGO.com</a>. También necesitas activar tu dispositivo ingresando un código en <a href="( 12 )">HBOGo.com/Activate</a>. </p>
- <h3>Planes de suscripción de HBO Go</h3>
-<p>HBO Go no tiene planes de suscripción separados. Se incluye con su suscripción de HBO existente a través de su proveedor de TV. El costo de su suscripción puede variar dependiendo de su proveedor y paquete. </p>
-<p></p>
-<p>Si no tienes una suscripción a HBO, puedes suscribirte a HBO Max, que ofrece el mismo contenido que HBO Go, además de más. También puede obtener una prueba gratuita de HBO Max durante 7 días. </p>
- <h2>Cómo descargar programas y películas en HBO Go</h2>
-<p>Uno de los beneficios de HBO Go es que puede descargar programas y películas en sus dispositivos móviles y verlos sin conexión. Esto es útil cuando viaja, viaja o está en un lugar sin acceso a Internet. Estos son los pasos para descargar en HBO Go:</p>
- <h3>Paso 1: Abra la aplicación HBO Go e inicie sesión</h3>
-<p>El primer paso es abrir la aplicación HBO Go en su teléfono o tableta. Si no tiene la aplicación, puede descargarla desde la tienda de aplicaciones. Luego, inicie sesión con los detalles de su cuenta de proveedor de TV. Verá la pantalla de inicio con diferentes categorías y recomendaciones. </p>
- <h3>Paso 2: Busca el título que quieres descargar</h3>
-
-<p>No todos los títulos están disponibles para su descarga en HBO Go. Puede saber si un título se puede descargar buscando un icono de descarga junto a él. El icono parece una flecha hacia abajo con una línea debajo. </p>
- <h3>Paso 3: Toca el icono de descarga</h3>
-<p>Una vez que encuentre el título que desea descargar, toque en él para abrir su página de detalles. A continuación, toque en el icono de descarga junto al episodio o película que desea descargar. Verá una barra de progreso que muestra el estado de descarga. </p>
-<p>Puedes descargar hasta 15 títulos a la vez en HBO Go. También puedes pausar o cancelar descargas pulsando en los iconos junto a ellos. </p>
- <h3>Paso 4: Ir a Mis descargas para ver sus descargas</h3>
-<p>Después de que tus descargas estén completas, puedes ir a Mis descargas para verlas sin conexión. Para acceder a Mis descargas, toque en el icono del menú en la esquina superior izquierda de la pantalla y, a continuación, toque en Mis descargas. Verá una lista de sus títulos descargados, ordenados por fecha de vencimiento. </p>
-<p>Puedes ver tus descargas en cualquier momento, en cualquier lugar, sin conexión a Internet. Sin embargo, debe iniciar sesión en HBO Go y tener una suscripción activa. También necesitas renovar tus descargas cada 30 días conectándote a internet. </p>
- <h2>Consejos y trucos para descargar en HBO Go</h2>
-<p>Para aprovechar al máximo su experiencia de descarga en HBO Go, aquí hay algunos consejos y trucos que debe saber:</p>
- <h3>Cómo cambiar la calidad de descarga</h3>
-<p>Por defecto, HBO Go descarga títulos en calidad estándar (SD), que utiliza menos espacio de almacenamiento y datos. Sin embargo, si prefiere mayor calidad (HD), puede cambiarlo en la configuración. Para hacerlo, toca el icono del menú en la esquina superior izquierda de la pantalla, luego toca Configuración, luego Opciones de vídeo y luego Calidad de descarga. Puede elegir entre SD y HD.</p>
-<p>Tenga en cuenta que las descargas HD tomarán más tiempo y usarán más espacio de almacenamiento y datos que las descargas SD. </p>
- <h3>Cómo renovar o eliminar descargas</h3>
-
-<p>Si desea eliminar sus descargas, puede hacerlo tocando en Editar en la esquina superior derecha de Mis descargas, luego seleccionando los títulos que desea eliminar, luego tocando en Eliminar en la parte inferior de la pantalla. </p>
- <h3>Cómo descargar a través de una red móvil</h3>
-<p>Por defecto, HBO Go solo le permite descargar a través de una red Wi-Fi, que guarda el uso de sus datos. Sin embargo, si desea descargar a través de una red móvil, puede habilitarlo en la configuración. Para hacerlo, toca el icono del menú en la esquina superior izquierda de la pantalla, luego toca Configuración, luego Opciones de vídeo y luego Descarga de redes móviles. Puedes activarlo o desactivarlo. </p>
-<p>Tenga en cuenta que la descarga a través de una red móvil utilizará su plan de datos y puede incurrir en cargos adicionales de su proveedor. </p>
- <h2>Conclusión</h2>
-<p>HBO Go es una gran manera de ver todo el contenido de HBO en sus dispositivos. También le permite descargar programas y películas y verlas sin conexión. En este artículo, te mostramos cómo descargar en HBO Go y te dimos algunos consejos y trucos para hacerlo. </p>
-<p>Esperamos que este artículo sea útil e informativo. Ahora, veamos algunas preguntas frecuentes que puede tener sobre la descarga en HBO Go.</p>
- <h2>Preguntas frecuentes</h2>
-<p>Aquí están algunas de las preguntas más frecuentes sobre la descarga en HBO Go:</p>
- <h3>¿Cuántos títulos puedo descargar en HBO Go? </h3>
-<p>Puedes descargar hasta 15 títulos a la vez en HBO Go. También puedes descargar el mismo título en hasta tres dispositivos. </p>
- <h3> ¿Cuánto espacio de almacenamiento necesito para las descargas en HBO Go? </h3>
-<p>El espacio de almacenamiento que necesitas para las descargas en HBO Go depende de la calidad y duración de los títulos que descargues. Generalmente, las descargas SD usan aproximadamente 0.5 GB por hora, mientras que las descargas HD usan aproximadamente 1 GB por hora. Puede comprobar el tamaño de sus descargas pulsando en el icono de información junto a ellos en Mis descargas.</p>
- <h3>¿Puedo ver descargas en otros dispositivos o compartirlas con otros? </h3>
-
- <h3>¿Puedo descargar los títulos que están saliendo de HBO Go pronto? </h3>
-<p>Sí, puedes descargar títulos que están saliendo de HBO Go pronto, siempre y cuando todavía estén disponibles en el servicio. Sin embargo, no podrá verlos después de que expiren, incluso si se descargan. Verás una notificación si alguna de tus descargas expira pronto. </p>
- <h3>¿Qué pasa si tengo problemas para descargar o ver descargas en HBO Go? </h3>
-<p>Si tiene problemas para descargar o ver descargas en HBO Go, puede probar algunos de estos pasos de solución de problemas:</p>
-<ul>
-<li> Asegúrese de que tiene suficiente espacio de almacenamiento y duración de la batería en su dispositivo. </li>
-<li>Asegúrese de tener una conexión a Internet estable al descargar o renovar descargas. </li>
-<li>Asegúrese de tener la última versión de la aplicación HBO Go y el software de su dispositivo. </li>
-<li>Reiniciar el dispositivo y la aplicación HBO Go. </li>
-<li>Eliminar y reinstalar la aplicación HBO Go. </li>
-<li>Póngase en contacto con el servicio de atención al cliente de HBO Go. </li>
-</ul></p> 64aa2da5cf<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Clash Royale En El Ordenador.md b/spaces/Benson/text-generation/Examples/Descargar Clash Royale En El Ordenador.md
deleted file mode 100644
index 631d40753f56da92c6c37414e6f4c36b8741294f..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Clash Royale En El Ordenador.md	
+++ /dev/null
@@ -1,116 +0,0 @@
-<br />
-<h1>Cómo descargar Clash Royale en el ordenador</h1>
-<p>¿Te encanta jugar juegos de estrategia en tu dispositivo móvil? ¿Quieres experimentar un juego de ritmo rápido y adictivo que combina la recolección de cartas, defensa de torres y batallas en tiempo real? Si respondiste que sí, entonces definitivamente deberías probar <strong>Clash Royale</strong>, uno de los juegos más populares y exitosos de Supercell, los creadores de <strong>Clash of Clans</strong>. </p>
-<h2>descargar clash royale en el ordenador</h2><br /><p><b><b>DOWNLOAD</b> &#10004; <a href="https://bltlly.com/2v6LXo">https://bltlly.com/2v6LXo</a></b></p><br /><br />
-<p>Pero ¿qué pasa si quieres jugar Clash Royale en una pantalla más grande, con mejores gráficos, un rendimiento más suave y controles más cómodos? ¡Bueno, puedes hacer eso también! En este artículo, te mostraremos cómo descargar Clash Royale en tu computadora usando un emulador de Android y cómo jugarlo como un profesional. ¡Vamos a empezar! </p>
- <h2>¿Qué es Clash Royale? </h2>
-<h3>Una breve introducción al juego y sus características</h3>
-<p>Clash Royale es un juego multijugador en tiempo real que cuenta con tus personajes favoritos del universo Clash. Puedes recoger y actualizar docenas de cartas con las tropas, hechizos y defensas de Clash of Clans, así como los Royales: Princes, Knights, Baby Dragons y más. También puede desbloquear nuevas tarjetas a medida que avanza a través de diferentes arenas. </p>
-<p>El objetivo del juego es derribar el enemigo Rey y Princesas de sus torres usando sus cartas. También puedes defender tus propias torres de los ataques enemigos. Cada partida dura tres minutos o hasta que un jugador destruye la torre del rey enemigo. Puedes ganar trofeos, coronas, cofres, oro, gemas y cartas al ganar partidas. </p>
-<p>El juego también ofrece varios modos y eventos para que usted disfrute. Puedes unirte o crear un clan con otros jugadores para compartir cartas y luchar en guerras de clanes. Puedes participar en desafíos y torneos de temporada para ganar recompensas y gloria. También puedes personalizar tus torres con skins, emotes y objetos mágicos. </p>
- <h3>¿Por qué es popular y divertido jugar? </h3>
-
-<p>El juego también requiere que pienses rápido y actúes inteligente en batallas en tiempo real contra jugadores de todo el mundo. Tienes que manejar tu elixir sabiamente, desplegar tus cartas estratégicamente, contrarrestar los movimientos de tu enemigo y adaptarte a situaciones cambiantes. ¡Nunca se sabe lo que pasará en un partido de Clash Royale! </p>
-<p>Además, el juego se actualiza constantemente con nuevas características, cartas, modos, eventos y cambios de equilibrio que lo mantienen fresco y emocionante. Siempre hay algo nuevo que descubrir y dominar en Clash Royale. Si usted es un jugador casual o un jugador competitivo, encontrará algo que se adapte a su gusto y nivel de habilidad en este juego. </p>
-<p></p>
- <h2>Cómo descargar Clash Royale en PC</h2>
-<h3>Los beneficios de jugar Clash Royale en PC</h3>
-<p>Aunque Clash Royale está diseñado principalmente para dispositivos móviles, también puedes reproducirlo en tu ordenador usando un emulador de Android. Un emulador de Android es un software que le permite ejecutar aplicaciones y juegos de Android en su PC. Hay muchos beneficios de jugar Clash Royale en PC usando un emulador, como:</p>
-<ul>
-<li> Puedes disfrutar del juego en una pantalla más grande con mayor resolución y mejores gráficos. </li>
-<li>Puedes usar el teclado y el ratón para controlar el juego con mayor facilidad y precisión. </li>
-<li> Puede evitar el drenaje de la batería, el sobrecalentamiento y los problemas de retraso que pueden ocurrir en su dispositivo móvil. </li>
-<li> Puede acceder al juego desde su PC en cualquier momento sin transferir su cuenta o datos. </li>
-<li> Puede grabar, transmitir o capturar imágenes de su juego con facilidad. </li>
-</ul>
-<p>Por supuesto, todavía necesitará una conexión a Internet estable y un PC compatible para ejecutar el emulador y el juego sin problemas. Pero si los tienes, ¡entonces estás listo para descargar Clash Royale en tu computadora! </p>
- <h3>El mejor emulador para usar: Bluestacks</h3>
-
-<ul>
-<li> Tiene un proceso de instalación rápido y fácil que no requiere ninguna habilidad técnica o conocimiento. </li>
-<li> Tiene una interfaz fácil de usar que le permite acceder a la Google Play Store y descargar Clash Royale con solo unos pocos clics. </li>
-<li> Tiene una alta compatibilidad y rendimiento que garantiza una experiencia de juego suave y sin fisuras. </li>
-<li> Tiene una herramienta de asignación de teclas incorporada que le permite personalizar la configuración del teclado y el ratón para un control y comodidad óptimos. </li>
-<li> Tiene una función de varias instancias que le permite ejecutar varias instancias de Clash Royale u otras aplicaciones simultáneamente. </li>
-</ul>
-<p>Con Bluestacks, puedes disfrutar jugando Clash Royale en PC como nunca antes. También puede consultar el sitio web oficial de Bluestacks para obtener más información y soporte. </p>
- <h3>Los pasos para descargar e instalar Bluestacks y Clash Royale</h3>
-<p>Ahora que sabes por qué y cómo usar Bluestacks, vamos a ver los pasos para descargarlo e instalarlo y Clash Royale en tu PC. Siga estos sencillos pasos:</p>
-<ol>
-<li>Ve al sitio web oficial de Bluestacks y haz clic en el botón <strong>Descargar Bluestacks</strong>. Esto comenzará a descargar el archivo de instalación en su PC.</li>
-<li>Una vez completada la descarga, abra el archivo de instalación y siga las instrucciones en la pantalla para instalar Bluestacks en su PC. Esto puede tardar unos minutos dependiendo de las especificaciones de su PC. </li>
-<li>Después de la instalación, inicie Bluestacks desde su escritorio o menú de inicio. Verá la pantalla de inicio de Bluestacks con varios iconos y opciones. </li>
-<li>En la pantalla de inicio, busque el icono <strong>Google Play Store</strong> y haga clic en él. Esto abrirá la aplicación Google Play Store dentro de Bluestacks.</li>
-<li>En la aplicación Google Play Store, busca <strong>Clash Royale</strong> usando la barra de búsqueda. Verás el icono y el nombre del juego en los resultados de búsqueda. </li>
-
-<li>Una vez completada la instalación, verás el icono del juego en la pantalla de inicio de Bluestacks. También puedes encontrarlo en la pestaña <strong>Mis juegos</strong>. </li>
-<li>Haga clic en el icono del juego para iniciar Clash Royale en su PC. Verá la pantalla de carga del juego con el logotipo y la música de Supercell. </li>
-<li>Si esta es la primera vez que juegas a Clash Royale, tendrás que pasar por un tutorial que te enseña los fundamentos del juego. También puedes iniciar sesión con tu cuenta de Google o Supercell ID para sincronizar tu progreso desde tu dispositivo móvil. </li>
-<li>Si ya has jugado Clash Royale antes, puedes saltarte el tutorial e iniciar sesión con tu cuenta de Google o Supercell ID para continuar donde lo dejaste. </li>
-</ol>
-<p>¡Felicidades! Has descargado e instalado Clash Royale en tu PC usando Bluestacks. Ahora puedes disfrutar de este increíble juego en una pantalla más grande con mejores gráficos, un rendimiento más suave y controles más cómodos. </p>
- <h2>Cómo jugar Clash Royale en PC</h2>
-<h3>El juego básico y los controles</h3>
-<p>La jugabilidad de Clash Royale en PC es similar a la de los dispositivos móviles. Tienes que usar tus cartas para atacar las torres enemigas y defender tus propias torres. Tienes que administrar tu elixir, que se utiliza para jugar a las cartas, y elegir las cartas adecuadas para cada situación. Tienes que ganar batallas para ganar trofeos, coronas, cofres y otras recompensas. </p>
-<p>Los controles de Clash Royale en PC son diferentes de los de los dispositivos móviles. Puede utilizar el teclado y el ratón para jugar el juego en lugar de tocar y deslizar en la pantalla. Aquí están los controles básicos de Clash Royale en PC usando Bluestacks:</p>
-<ul>
-<li>Puedes usar el <strong>mouse</strong> para seleccionar cartas, arrastrarlas y soltarlas en el campo de batalla, e interactuar con el menú del juego y la interfaz. </li>
-<li> Puede utilizar las teclas de flecha <strong></strong> para mover la cámara y ver diferentes partes de la arena. </li>
-
-<li> Puedes usar la tecla <strong>ESC</strong> para pausar el juego o salir del partido. </li>
-<li> Puede usar la tecla <strong>F1</strong> para acceder al menú de ayuda y ver los atajos de teclado. </li>
-</ul>
-<p>También puede personalizar la configuración del teclado y el ratón utilizando la herramienta de asignación de teclas en Bluestacks. Puede asignar diferentes teclas o botones a diferentes acciones, como jugar a las cartas, usar emotes, abrir cofres, etc. También puede ajustar la sensibilidad y la velocidad de su ratón. Para acceder a la herramienta de asignación de teclas, haga clic en el icono del teclado <strong></strong> en el lado derecho de la ventana Bluestacks. </p>
- <h3>Los consejos y trucos para ganar batallas y progresar más rápido</h3>
-<p>Jugar Clash Royale en PC no es solo acerca de conocer el juego básico y los controles. También necesitas aprender algunos consejos y trucos para ganar batallas y progresar más rápido en el juego. Estos son algunos de ellos:</p>
-<ul>
-<li>Conozca las fortalezas y debilidades de cada tarjeta y cómo interactúan entre sí. Por ejemplo, debes saber qué cartas pueden contrarrestar o ser contrarrestadas por otras cartas, qué cartas pueden infligir daño por salpicadura o apuntar unidades aéreas, qué cartas pueden empujar o tirar a los enemigos, etc.</li>
-<li>Construir una cubierta equilibrada y versátil que puede manejar diferentes situaciones y oponentes. Por ejemplo, tienen una mezcla de cartas de bajo costo y alto costo, tarjetas ofensivas y defensivas, tarjetas de un solo objetivo y de área de efecto, unidades terrestres y aéreas, etc.</li>
-<li>Usa tu elixir sabiamente y eficientemente. No lo desperdicies en movimientos innecesarios o ineficaces. No lo dejes reposar a plena capacidad por mucho tiempo. Trata de obtener una ventaja de elixir sobre tu oponente haciendo operaciones de elixir positivas. </li>
-<li>Presta atención a los movimientos y cartas de tu oponente. Trata de predecir lo que harán a continuación y cómo puedes contrarrestarlos. Trata de atraerlos a cometer errores o desperdiciar su elixir. Explota sus debilidades y castiga sus errores. </li>
-
-<li>Sé agresivo pero no temerario. No tengas miedo de atacar cuando tengas una oportunidad o una ventaja. No sea demasiado pasivo o defensivo cuando esté detrás o bajo presión. Pero tampoco sea demasiado codicioso o descuidado. Sepa cuándo retirarse o defenderse cuando sea necesario. </li>
-<li>Sé flexible y adaptable. No te apegues a una estrategia o plan todo el tiempo. No tengas miedo de cambiar tus tácticas o cartas según la situación o el oponente. Experimenta con diferentes combinaciones y sinergias. Aprende de tus ganancias y pérdidas. </li>
-</ul>
- <h3>Las mejores barajas y estrategias para diferentes arenas y modos</h3>
-<p>El juego también ofrece varias arenas y modos para que usted juegue en. Cada arena tiene un tema diferente, fondo, música, y tarjetas desbloqueables. Cada modo tiene una regla diferente, objetivo, recompensa y desafío. Necesitas usar diferentes barajas y estrategias para diferentes arenas y modos para tener éxito en ellas. </p>
-<p>Aquí hay algunos ejemplos de las mejores barajas y estrategias para algunas de las arenas y modos en Clash Royale:</p>
- <tabla>
-<tr><th>Arena/Mode</th><th>Deck</th><th>Estrategia</th></tr>
-<tr><td>Estadio Duende (Arena 1)</td><td>Gigante, Mosquetero, Mini P.E.K.K.A., Bola de fuego, Flechas, Caballero, Arqueros, Duendes</td><td>Esta es una baraja simple pero efectiva que puede ayudarte a ganar tus primeras batallas en Clash Royale. La idea principal es utilizar el Gigante como un tanque para proteger a sus otras unidades detrás de él, como el Mosquetero o el Mini P.E.K.K.A., que puede hacer mucho daño a las torres o unidades enemigas. Usa tus hechizos, como las filas de Bola de Fuego o Ar, para despejar el camino para tu Gigante o para acabar con torres o unidades enemigas de baja salud. Usa tus unidades baratas, como Knight, Archers o Goblins, para pedalear tus cartas más rápido o para defenderte de los ataques enemigos. </td></tr>
-
-<tr><td>Legendary Arena (Arena 13)</td><td>Golem, Night Witch, Lumberjack, Baby Dragon, Tornado, Lightning, Mega Minion, Zap</td><td>Esta es una baraja poderosa y popular que puede ayudarte a llegar a la Arena Legendaria y más allá. La idea principal es usar el Golem como un tanque masivo que puede absorber mucho daño y explotar en los Golemitas cuando se destruye. Usa a la Bruja Nocturna y al Leñador como tus principales unidades de apoyo detrás del Golem, que pueden engendrar Murciélagos y soltar Rabia cuando te maten. Usa el Dragón Bebé y el Minion Mega como tus unidades de apoyo secundarias que pueden infligir daño aéreo y terrestre. Usa el hechizo Tornado para reunir a las unidades enemigas y activar tu Torre Rey o para crear sinergias con el daño por salpicadura de tu Dragón Bebé. Usa el hechizo Rayo para aturdir y dañar las unidades o torres enemigas. Usa el hechizo Zap para restablecer las unidades enemigas o para acabar con los enemigos de baja salud. </td></tr>
-</tabla>
- <h2>Conclusión</h2>
-<p>Clash Royale es un increíble juego que puedes disfrutar en tu dispositivo móvil o en tu PC usando un emulador de Android. Siguiendo esta guía, puedes aprender cómo descargar Clash Royale en tu computadora usando Bluestacks y cómo jugarlo como un profesional. También puedes aprender algunas de las mejores barajas y estrategias para diferentes escenarios y modos en Clash Royale.</p>
-<p>Entonces, ¿qué estás esperando? Descarga Clash Royale en tu PC hoy y únete a millones de jugadores en este épico juego de estrategia, habilidad, suerte y diversión. ¡No te arrepentirás! </p>
- <h2>Preguntas frecuentes</h2>
-<h3>Q1: ¿Clash Royale es libre de jugar? </h3>
-<p>A1: Sí, Clash Royale es gratis para jugar y descargar en dispositivos móviles y PC. Sin embargo, también ofrece compras en la aplicación que pueden mejorar su experiencia de juego. Puedes comprar gemas con dinero real, que se pueden usar para comprar cofres, oro, tarjetas, pieles, emotes, objetos mágicos, etc. También puedes comprar una suscripción Pass Royale que te da acceso a recompensas y beneficios exclusivos. </p>
- <h3>Q2: ¿Puedo jugar Clash Royale con mis amigos? </h3>
-
- <h3>Q3: ¿Cómo puedo obtener más cartas y gemas en Clash Royale? </h3>
-<p>A3: Hay muchas maneras de obtener más cartas y gemas en Clash Royale sin gastar dinero real. Puedes obtener cartas abriendo los cofres que ganes ganando batallas o completando misiones. También puedes conseguir cartas solicitándoselas a tus compañeros de clan o comprándolas en la tienda con oro. Puedes obtener gemas completando logros o eventos que recompensen gemas. También puedes obtener gemas abriendo cofres libres o cofres que contengan gemas. </p>
- <h3>Q4: ¿Cuáles son los requisitos del sistema para jugar Clash Royale en PC? </h3>
-<p>A4: Los requisitos del sistema para jugar Clash Royale en PC usando Bluestacks son los siguientes:</p>
-<ul>
-<li>Sistema operativo: Windows 7 o superior</li>
-<li>Procesador: Procesador Intel o AMD</li>
-<li>RAM: Al menos 2GB de RAM</li>
-<li>HDD: 5GB de espacio libre en disco</li>
-<li>Internet: Conexión a Internet de banda ancha</li>
-</ul>
-<p>Si su PC cumple con estos requisitos, usted debe ser capaz de jugar Clash Royale en el PC sin ningún problema. Sin embargo, si encuentra algún problema o error, puede consultar el sitio web oficial de Bluestacks para la solución de problemas y soporte. </p>
- <h3>Q5: ¿Dónde puedo encontrar más información y soporte para Clash Royale? </h3>
-<p>A5: Si desea encontrar más información y soporte para Clash Royale, puede visitar las siguientes fuentes:</p>
-<ul>
-<li>El sitio web oficial de Clash Royale, donde puedes encontrar las últimas noticias, actualizaciones, eventos y medios sobre el juego. </li>
-<li>El blog oficial de Clash Royale, donde puedes encontrar publicaciones detalladas sobre las características del juego, cambios de equilibrio, consejos y trucos, y más. </li>
-<li>El canal oficial de YouTube de Clash Royale, donde puedes ver videos de gameplay, tutoriales, destacados y más. </li>
-<li>La cuenta oficial de Twitter de Clash Royale, donde puedes seguir los últimos tweets y anuncios sobre el juego. </li>
-
-<li>La comunidad oficial de Reddit de Clash Royale, donde puedes unir discusiones e hilos con otros jugadores y fans del juego. </li>
-<li>El servidor oficial de Discord de Clash Royale, donde puedes chatear y chatear por voz con otros jugadores y fans del juego. </li>
-</ul></p> 64aa2da5cf<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/imagenet.py b/spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/imagenet.py
deleted file mode 100644
index 9a02ec44ba4af9e993f58c91fa43482a4ecbe54c..0000000000000000000000000000000000000000
--- a/spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/imagenet.py
+++ /dev/null
@@ -1,558 +0,0 @@
-import os, tarfile, glob, shutil
-import yaml
-import numpy as np
-from tqdm import tqdm
-from PIL import Image
-import albumentations
-from omegaconf import OmegaConf
-from torch.utils.data import Dataset
-
-from taming.data.base import ImagePaths
-from taming.util import download, retrieve
-import taming.data.utils as bdu
-
-
-def give_synsets_from_indices(indices, path_to_yaml="data/imagenet_idx_to_synset.yaml"):
-    synsets = []
-    with open(path_to_yaml) as f:
-        di2s = yaml.load(f)
-    for idx in indices:
-        synsets.append(str(di2s[idx]))
-    print("Using {} different synsets for construction of Restriced Imagenet.".format(len(synsets)))
-    return synsets
-
-
-def str_to_indices(string):
-    """Expects a string in the format '32-123, 256, 280-321'"""
-    assert not string.endswith(","), "provided string '{}' ends with a comma, pls remove it".format(string)
-    subs = string.split(",")
-    indices = []
-    for sub in subs:
-        subsubs = sub.split("-")
-        assert len(subsubs) > 0
-        if len(subsubs) == 1:
-            indices.append(int(subsubs[0]))
-        else:
-            rang = [j for j in range(int(subsubs[0]), int(subsubs[1]))]
-            indices.extend(rang)
-    return sorted(indices)
-
-
-class ImageNetBase(Dataset):
-    def __init__(self, config=None):
-        self.config = config or OmegaConf.create()
-        if not type(self.config)==dict:
-            self.config = OmegaConf.to_container(self.config)
-        self._prepare()
-        self._prepare_synset_to_human()
-        self._prepare_idx_to_synset()
-        self._load()
-
-    def __len__(self):
-        return len(self.data)
-
-    def __getitem__(self, i):
-        return self.data[i]
-
-    def _prepare(self):
-        raise NotImplementedError()
-
-    def _filter_relpaths(self, relpaths):
-        ignore = set([
-            "n06596364_9591.JPEG",
-        ])
-        relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore]
-        if "sub_indices" in self.config:
-            indices = str_to_indices(self.config["sub_indices"])
-            synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn)  # returns a list of strings
-            files = []
-            for rpath in relpaths:
-                syn = rpath.split("/")[0]
-                if syn in synsets:
-                    files.append(rpath)
-            return files
-        else:
-            return relpaths
-
-    def _prepare_synset_to_human(self):
-        SIZE = 2655750
-        URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1"
-        self.human_dict = os.path.join(self.root, "synset_human.txt")
-        if (not os.path.exists(self.human_dict) or
-                not os.path.getsize(self.human_dict)==SIZE):
-            download(URL, self.human_dict)
-
-    def _prepare_idx_to_synset(self):
-        URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1"
-        self.idx2syn = os.path.join(self.root, "index_synset.yaml")
-        if (not os.path.exists(self.idx2syn)):
-            download(URL, self.idx2syn)
-
-    def _load(self):
-        with open(self.txt_filelist, "r") as f:
-            self.relpaths = f.read().splitlines()
-            l1 = len(self.relpaths)
-            self.relpaths = self._filter_relpaths(self.relpaths)
-            print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths)))
-
-        self.synsets = [p.split("/")[0] for p in self.relpaths]
-        self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths]
-
-        unique_synsets = np.unique(self.synsets)
-        class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets))
-        self.class_labels = [class_dict[s] for s in self.synsets]
-
-        with open(self.human_dict, "r") as f:
-            human_dict = f.read().splitlines()
-            human_dict = dict(line.split(maxsplit=1) for line in human_dict)
-
-        self.human_labels = [human_dict[s] for s in self.synsets]
-
-        labels = {
-            "relpath": np.array(self.relpaths),
-            "synsets": np.array(self.synsets),
-            "class_label": np.array(self.class_labels),
-            "human_label": np.array(self.human_labels),
-        }
-        self.data = ImagePaths(self.abspaths,
-                               labels=labels,
-                               size=retrieve(self.config, "size", default=0),
-                               random_crop=self.random_crop)
-
-
-class ImageNetTrain(ImageNetBase):
-    NAME = "ILSVRC2012_train"
-    URL = "http://www.image-net.org/challenges/LSVRC/2012/"
-    AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2"
-    FILES = [
-        "ILSVRC2012_img_train.tar",
-    ]
-    SIZES = [
-        147897477120,
-    ]
-
-    def _prepare(self):
-        self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop",
-                                    default=True)
-        cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
-        self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
-        self.datadir = os.path.join(self.root, "data")
-        self.txt_filelist = os.path.join(self.root, "filelist.txt")
-        self.expected_length = 1281167
-        if not bdu.is_prepared(self.root):
-            # prep
-            print("Preparing dataset {} in {}".format(self.NAME, self.root))
-
-            datadir = self.datadir
-            if not os.path.exists(datadir):
-                path = os.path.join(self.root, self.FILES[0])
-                if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
-                    import academictorrents as at
-                    atpath = at.get(self.AT_HASH, datastore=self.root)
-                    assert atpath == path
-
-                print("Extracting {} to {}".format(path, datadir))
-                os.makedirs(datadir, exist_ok=True)
-                with tarfile.open(path, "r:") as tar:
-                    tar.extractall(path=datadir)
-
-                print("Extracting sub-tars.")
-                subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar")))
-                for subpath in tqdm(subpaths):
-                    subdir = subpath[:-len(".tar")]
-                    os.makedirs(subdir, exist_ok=True)
-                    with tarfile.open(subpath, "r:") as tar:
-                        tar.extractall(path=subdir)
-
-
-            filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
-            filelist = [os.path.relpath(p, start=datadir) for p in filelist]
-            filelist = sorted(filelist)
-            filelist = "\n".join(filelist)+"\n"
-            with open(self.txt_filelist, "w") as f:
-                f.write(filelist)
-
-            bdu.mark_prepared(self.root)
-
-
-class ImageNetValidation(ImageNetBase):
-    NAME = "ILSVRC2012_validation"
-    URL = "http://www.image-net.org/challenges/LSVRC/2012/"
-    AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5"
-    VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1"
-    FILES = [
-        "ILSVRC2012_img_val.tar",
-        "validation_synset.txt",
-    ]
-    SIZES = [
-        6744924160,
-        1950000,
-    ]
-
-    def _prepare(self):
-        self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop",
-                                    default=False)
-        cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
-        self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
-        self.datadir = os.path.join(self.root, "data")
-        self.txt_filelist = os.path.join(self.root, "filelist.txt")
-        self.expected_length = 50000
-        if not bdu.is_prepared(self.root):
-            # prep
-            print("Preparing dataset {} in {}".format(self.NAME, self.root))
-
-            datadir = self.datadir
-            if not os.path.exists(datadir):
-                path = os.path.join(self.root, self.FILES[0])
-                if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
-                    import academictorrents as at
-                    atpath = at.get(self.AT_HASH, datastore=self.root)
-                    assert atpath == path
-
-                print("Extracting {} to {}".format(path, datadir))
-                os.makedirs(datadir, exist_ok=True)
-                with tarfile.open(path, "r:") as tar:
-                    tar.extractall(path=datadir)
-
-                vspath = os.path.join(self.root, self.FILES[1])
-                if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]:
-                    download(self.VS_URL, vspath)
-
-                with open(vspath, "r") as f:
-                    synset_dict = f.read().splitlines()
-                    synset_dict = dict(line.split() for line in synset_dict)
-
-                print("Reorganizing into synset folders")
-                synsets = np.unique(list(synset_dict.values()))
-                for s in synsets:
-                    os.makedirs(os.path.join(datadir, s), exist_ok=True)
-                for k, v in synset_dict.items():
-                    src = os.path.join(datadir, k)
-                    dst = os.path.join(datadir, v)
-                    shutil.move(src, dst)
-
-            filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
-            filelist = [os.path.relpath(p, start=datadir) for p in filelist]
-            filelist = sorted(filelist)
-            filelist = "\n".join(filelist)+"\n"
-            with open(self.txt_filelist, "w") as f:
-                f.write(filelist)
-
-            bdu.mark_prepared(self.root)
-
-
-def get_preprocessor(size=None, random_crop=False, additional_targets=None,
-                     crop_size=None):
-    if size is not None and size > 0:
-        transforms = list()
-        rescaler = albumentations.SmallestMaxSize(max_size = size)
-        transforms.append(rescaler)
-        if not random_crop:
-            cropper = albumentations.CenterCrop(height=size,width=size)
-            transforms.append(cropper)
-        else:
-            cropper = albumentations.RandomCrop(height=size,width=size)
-            transforms.append(cropper)
-            flipper = albumentations.HorizontalFlip()
-            transforms.append(flipper)
-        preprocessor = albumentations.Compose(transforms,
-                                              additional_targets=additional_targets)
-    elif crop_size is not None and crop_size > 0:
-        if not random_crop:
-            cropper = albumentations.CenterCrop(height=crop_size,width=crop_size)
-        else:
-            cropper = albumentations.RandomCrop(height=crop_size,width=crop_size)
-        transforms = [cropper]
-        preprocessor = albumentations.Compose(transforms,
-                                              additional_targets=additional_targets)
-    else:
-        preprocessor = lambda **kwargs: kwargs
-    return preprocessor
-
-
-def rgba_to_depth(x):
-    assert x.dtype == np.uint8
-    assert len(x.shape) == 3 and x.shape[2] == 4
-    y = x.copy()
-    y.dtype = np.float32
-    y = y.reshape(x.shape[:2])
-    return np.ascontiguousarray(y)
-
-
-class BaseWithDepth(Dataset):
-    DEFAULT_DEPTH_ROOT="data/imagenet_depth"
-
-    def __init__(self, config=None, size=None, random_crop=False,
-                 crop_size=None, root=None):
-        self.config = config
-        self.base_dset = self.get_base_dset()
-        self.preprocessor = get_preprocessor(
-            size=size,
-            crop_size=crop_size,
-            random_crop=random_crop,
-            additional_targets={"depth": "image"})
-        self.crop_size = crop_size
-        if self.crop_size is not None:
-            self.rescaler = albumentations.Compose(
-                [albumentations.SmallestMaxSize(max_size = self.crop_size)],
-                additional_targets={"depth": "image"})
-        if root is not None:
-            self.DEFAULT_DEPTH_ROOT = root
-
-    def __len__(self):
-        return len(self.base_dset)
-
-    def preprocess_depth(self, path):
-        rgba = np.array(Image.open(path))
-        depth = rgba_to_depth(rgba)
-        depth = (depth - depth.min())/max(1e-8, depth.max()-depth.min())
-        depth = 2.0*depth-1.0
-        return depth
-
-    def __getitem__(self, i):
-        e = self.base_dset[i]
-        e["depth"] = self.preprocess_depth(self.get_depth_path(e))
-        # up if necessary
-        h,w,c = e["image"].shape
-        if self.crop_size and min(h,w) < self.crop_size:
-            # have to upscale to be able to crop - this just uses bilinear
-            out = self.rescaler(image=e["image"], depth=e["depth"])
-            e["image"] = out["image"]
-            e["depth"] = out["depth"]
-        transformed = self.preprocessor(image=e["image"], depth=e["depth"])
-        e["image"] = transformed["image"]
-        e["depth"] = transformed["depth"]
-        return e
-
-
-class ImageNetTrainWithDepth(BaseWithDepth):
-    # default to random_crop=True
-    def __init__(self, random_crop=True, sub_indices=None, **kwargs):
-        self.sub_indices = sub_indices
-        super().__init__(random_crop=random_crop, **kwargs)
-
-    def get_base_dset(self):
-        if self.sub_indices is None:
-            return ImageNetTrain()
-        else:
-            return ImageNetTrain({"sub_indices": self.sub_indices})
-
-    def get_depth_path(self, e):
-        fid = os.path.splitext(e["relpath"])[0]+".png"
-        fid = os.path.join(self.DEFAULT_DEPTH_ROOT, "train", fid)
-        return fid
-
-
-class ImageNetValidationWithDepth(BaseWithDepth):
-    def __init__(self, sub_indices=None, **kwargs):
-        self.sub_indices = sub_indices
-        super().__init__(**kwargs)
-
-    def get_base_dset(self):
-        if self.sub_indices is None:
-            return ImageNetValidation()
-        else:
-            return ImageNetValidation({"sub_indices": self.sub_indices})
-
-    def get_depth_path(self, e):
-        fid = os.path.splitext(e["relpath"])[0]+".png"
-        fid = os.path.join(self.DEFAULT_DEPTH_ROOT, "val", fid)
-        return fid
-
-
-class RINTrainWithDepth(ImageNetTrainWithDepth):
-    def __init__(self, config=None, size=None, random_crop=True, crop_size=None):
-        sub_indices = "30-32, 33-37, 151-268, 281-285, 80-100, 365-382, 389-397, 118-121, 300-319"
-        super().__init__(config=config, size=size, random_crop=random_crop,
-                         sub_indices=sub_indices, crop_size=crop_size)
-
-
-class RINValidationWithDepth(ImageNetValidationWithDepth):
-    def __init__(self, config=None, size=None, random_crop=False, crop_size=None):
-        sub_indices = "30-32, 33-37, 151-268, 281-285, 80-100, 365-382, 389-397, 118-121, 300-319"
-        super().__init__(config=config, size=size, random_crop=random_crop,
-                         sub_indices=sub_indices, crop_size=crop_size)
-
-
-class DRINExamples(Dataset):
-    def __init__(self):
-        self.preprocessor = get_preprocessor(size=256, additional_targets={"depth": "image"})
-        with open("data/drin_examples.txt", "r") as f:
-            relpaths = f.read().splitlines()
-        self.image_paths = [os.path.join("data/drin_images",
-                                         relpath) for relpath in relpaths]
-        self.depth_paths = [os.path.join("data/drin_depth",
-                                         relpath.replace(".JPEG", ".png")) for relpath in relpaths]
-
-    def __len__(self):
-        return len(self.image_paths)
-
-    def preprocess_image(self, image_path):
-        image = Image.open(image_path)
-        if not image.mode == "RGB":
-            image = image.convert("RGB")
-        image = np.array(image).astype(np.uint8)
-        image = self.preprocessor(image=image)["image"]
-        image = (image/127.5 - 1.0).astype(np.float32)
-        return image
-
-    def preprocess_depth(self, path):
-        rgba = np.array(Image.open(path))
-        depth = rgba_to_depth(rgba)
-        depth = (depth - depth.min())/max(1e-8, depth.max()-depth.min())
-        depth = 2.0*depth-1.0
-        return depth
-
-    def __getitem__(self, i):
-        e = dict()
-        e["image"] = self.preprocess_image(self.image_paths[i])
-        e["depth"] = self.preprocess_depth(self.depth_paths[i])
-        transformed = self.preprocessor(image=e["image"], depth=e["depth"])
-        e["image"] = transformed["image"]
-        e["depth"] = transformed["depth"]
-        return e
-
-
-def imscale(x, factor, keepshapes=False, keepmode="bicubic"):
-    if factor is None or factor==1:
-        return x
-
-    dtype = x.dtype
-    assert dtype in [np.float32, np.float64]
-    assert x.min() >= -1
-    assert x.max() <= 1
-
-    keepmode = {"nearest": Image.NEAREST, "bilinear": Image.BILINEAR,
-                "bicubic": Image.BICUBIC}[keepmode]
-
-    lr = (x+1.0)*127.5
-    lr = lr.clip(0,255).astype(np.uint8)
-    lr = Image.fromarray(lr)
-
-    h, w, _ = x.shape
-    nh = h//factor
-    nw = w//factor
-    assert nh > 0 and nw > 0, (nh, nw)
-
-    lr = lr.resize((nw,nh), Image.BICUBIC)
-    if keepshapes:
-        lr = lr.resize((w,h), keepmode)
-    lr = np.array(lr)/127.5-1.0
-    lr = lr.astype(dtype)
-
-    return lr
-
-
-class ImageNetScale(Dataset):
-    def __init__(self, size=None, crop_size=None, random_crop=False,
-                 up_factor=None, hr_factor=None, keep_mode="bicubic"):
-        self.base = self.get_base()
-
-        self.size = size
-        self.crop_size = crop_size if crop_size is not None else self.size
-        self.random_crop = random_crop
-        self.up_factor = up_factor
-        self.hr_factor = hr_factor
-        self.keep_mode = keep_mode
-
-        transforms = list()
-
-        if self.size is not None and self.size > 0:
-            rescaler = albumentations.SmallestMaxSize(max_size = self.size)
-            self.rescaler = rescaler
-            transforms.append(rescaler)
-
-        if self.crop_size is not None and self.crop_size > 0:
-            if len(transforms) == 0:
-                self.rescaler = albumentations.SmallestMaxSize(max_size = self.crop_size)
-
-            if not self.random_crop:
-                cropper = albumentations.CenterCrop(height=self.crop_size,width=self.crop_size)
-            else:
-                cropper = albumentations.RandomCrop(height=self.crop_size,width=self.crop_size)
-            transforms.append(cropper)
-
-        if len(transforms) > 0:
-            if self.up_factor is not None:
-                additional_targets = {"lr": "image"}
-            else:
-                additional_targets = None
-            self.preprocessor = albumentations.Compose(transforms,
-                                                       additional_targets=additional_targets)
-        else:
-            self.preprocessor = lambda **kwargs: kwargs
-
-    def __len__(self):
-        return len(self.base)
-
-    def __getitem__(self, i):
-        example = self.base[i]
-        image = example["image"]
-        # adjust resolution
-        image = imscale(image, self.hr_factor, keepshapes=False)
-        h,w,c = image.shape
-        if self.crop_size and min(h,w) < self.crop_size:
-            # have to upscale to be able to crop - this just uses bilinear
-            image = self.rescaler(image=image)["image"]
-        if self.up_factor is None:
-            image = self.preprocessor(image=image)["image"]
-            example["image"] = image
-        else:
-            lr = imscale(image, self.up_factor, keepshapes=True,
-                         keepmode=self.keep_mode)
-
-            out = self.preprocessor(image=image, lr=lr)
-            example["image"] = out["image"]
-            example["lr"] = out["lr"]
-
-        return example
-
-class ImageNetScaleTrain(ImageNetScale):
-    def __init__(self, random_crop=True, **kwargs):
-        super().__init__(random_crop=random_crop, **kwargs)
-
-    def get_base(self):
-        return ImageNetTrain()
-
-class ImageNetScaleValidation(ImageNetScale):
-    def get_base(self):
-        return ImageNetValidation()
-
-
-from skimage.feature import canny
-from skimage.color import rgb2gray
-
-
-class ImageNetEdges(ImageNetScale):
-    def __init__(self, up_factor=1, **kwargs):
-        super().__init__(up_factor=1, **kwargs)
-
-    def __getitem__(self, i):
-        example = self.base[i]
-        image = example["image"]
-        h,w,c = image.shape
-        if self.crop_size and min(h,w) < self.crop_size:
-            # have to upscale to be able to crop - this just uses bilinear
-            image = self.rescaler(image=image)["image"]
-
-        lr = canny(rgb2gray(image), sigma=2)
-        lr = lr.astype(np.float32)
-        lr = lr[:,:,None][:,:,[0,0,0]]
-
-        out = self.preprocessor(image=image, lr=lr)
-        example["image"] = out["image"]
-        example["lr"] = out["lr"]
-
-        return example
-
-
-class ImageNetEdgesTrain(ImageNetEdges):
-    def __init__(self, random_crop=True, **kwargs):
-        super().__init__(random_crop=random_crop, **kwargs)
-
-    def get_base(self):
-        return ImageNetTrain()
-
-class ImageNetEdgesValidation(ImageNetEdges):
-    def get_base(self):
-        return ImageNetValidation()
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/model.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/model.py
deleted file mode 100644
index ea888ce1bb2a654d89ec16e0586a05c53537a907..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/model.py
+++ /dev/null
@@ -1,946 +0,0 @@
-# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"). You
-# may not use this file except in compliance with the License. A copy of
-# the License is located at
-#
-# http://aws.amazon.com/apache2.0/
-#
-# or in the "license" file accompanying this file. This file is
-# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
-# ANY KIND, either express or implied. See the License for the specific
-# language governing permissions and limitations under the License.
-"""Abstractions to interact with service models."""
-from collections import defaultdict
-from typing import NamedTuple, Union
-
-from botocore.compat import OrderedDict
-from botocore.exceptions import (
-    MissingServiceIdError,
-    UndefinedModelAttributeError,
-)
-from botocore.utils import CachedProperty, hyphenize_service_id, instance_cache
-
-NOT_SET = object()
-
-
-class NoShapeFoundError(Exception):
-    pass
-
-
-class InvalidShapeError(Exception):
-    pass
-
-
-class OperationNotFoundError(Exception):
-    pass
-
-
-class InvalidShapeReferenceError(Exception):
-    pass
-
-
-class ServiceId(str):
-    def hyphenize(self):
-        return hyphenize_service_id(self)
-
-
-class Shape:
-    """Object representing a shape from the service model."""
-
-    # To simplify serialization logic, all shape params that are
-    # related to serialization are moved from the top level hash into
-    # a 'serialization' hash.  This list below contains the names of all
-    # the attributes that should be moved.
-    SERIALIZED_ATTRS = [
-        'locationName',
-        'queryName',
-        'flattened',
-        'location',
-        'payload',
-        'streaming',
-        'timestampFormat',
-        'xmlNamespace',
-        'resultWrapper',
-        'xmlAttribute',
-        'eventstream',
-        'event',
-        'eventheader',
-        'eventpayload',
-        'jsonvalue',
-        'timestampFormat',
-        'hostLabel',
-    ]
-    METADATA_ATTRS = [
-        'required',
-        'min',
-        'max',
-        'pattern',
-        'sensitive',
-        'enum',
-        'idempotencyToken',
-        'error',
-        'exception',
-        'endpointdiscoveryid',
-        'retryable',
-        'document',
-        'union',
-        'contextParam',
-        'clientContextParams',
-    ]
-    MAP_TYPE = OrderedDict
-
-    def __init__(self, shape_name, shape_model, shape_resolver=None):
-        """
-
-        :type shape_name: string
-        :param shape_name: The name of the shape.
-
-        :type shape_model: dict
-        :param shape_model: The shape model.  This would be the value
-            associated with the key in the "shapes" dict of the
-            service model (i.e ``model['shapes'][shape_name]``)
-
-        :type shape_resolver: botocore.model.ShapeResolver
-        :param shape_resolver: A shape resolver object.  This is used to
-            resolve references to other shapes.  For scalar shape types
-            (string, integer, boolean, etc.), this argument is not
-            required.  If a shape_resolver is not provided for a complex
-            type, then a ``ValueError`` will be raised when an attempt
-            to resolve a shape is made.
-
-        """
-        self.name = shape_name
-        self.type_name = shape_model['type']
-        self.documentation = shape_model.get('documentation', '')
-        self._shape_model = shape_model
-        if shape_resolver is None:
-            # If a shape_resolver is not provided, we create an object
-            # that will throw errors if you attempt to resolve
-            # a shape.  This is actually ok for scalar shapes
-            # because they don't need to resolve shapes and shouldn't
-            # be required to provide an object they won't use.
-            shape_resolver = UnresolvableShapeMap()
-        self._shape_resolver = shape_resolver
-        self._cache = {}
-
-    @CachedProperty
-    def serialization(self):
-        """Serialization information about the shape.
-
-        This contains information that may be needed for input serialization
-        or response parsing.  This can include:
-
-            * name
-            * queryName
-            * flattened
-            * location
-            * payload
-            * streaming
-            * xmlNamespace
-            * resultWrapper
-            * xmlAttribute
-            * jsonvalue
-            * timestampFormat
-
-        :rtype: dict
-        :return: Serialization information about the shape.
-
-        """
-        model = self._shape_model
-        serialization = {}
-        for attr in self.SERIALIZED_ATTRS:
-            if attr in self._shape_model:
-                serialization[attr] = model[attr]
-        # For consistency, locationName is renamed to just 'name'.
-        if 'locationName' in serialization:
-            serialization['name'] = serialization.pop('locationName')
-        return serialization
-
-    @CachedProperty
-    def metadata(self):
-        """Metadata about the shape.
-
-        This requires optional information about the shape, including:
-
-            * min
-            * max
-            * pattern
-            * enum
-            * sensitive
-            * required
-            * idempotencyToken
-            * document
-            * union
-
-        :rtype: dict
-        :return: Metadata about the shape.
-
-        """
-        model = self._shape_model
-        metadata = {}
-        for attr in self.METADATA_ATTRS:
-            if attr in self._shape_model:
-                metadata[attr] = model[attr]
-        return metadata
-
-    @CachedProperty
-    def required_members(self):
-        """A list of members that are required.
-
-        A structure shape can define members that are required.
-        This value will return a list of required members.  If there
-        are no required members an empty list is returned.
-
-        """
-        return self.metadata.get('required', [])
-
-    def _resolve_shape_ref(self, shape_ref):
-        return self._shape_resolver.resolve_shape_ref(shape_ref)
-
-    def __repr__(self):
-        return f"<{self.__class__.__name__}({self.name})>"
-
-    @property
-    def event_stream_name(self):
-        return None
-
-
-class StructureShape(Shape):
-    @CachedProperty
-    def members(self):
-        members = self._shape_model.get('members', self.MAP_TYPE())
-        # The members dict looks like:
-        #    'members': {
-        #        'MemberName': {'shape': 'shapeName'},
-        #        'MemberName2': {'shape': 'shapeName'},
-        #    }
-        # We return a dict of member name to Shape object.
-        shape_members = self.MAP_TYPE()
-        for name, shape_ref in members.items():
-            shape_members[name] = self._resolve_shape_ref(shape_ref)
-        return shape_members
-
-    @CachedProperty
-    def event_stream_name(self):
-        for member_name, member in self.members.items():
-            if member.serialization.get('eventstream'):
-                return member_name
-        return None
-
-    @CachedProperty
-    def error_code(self):
-        if not self.metadata.get('exception', False):
-            return None
-        error_metadata = self.metadata.get("error", {})
-        code = error_metadata.get("code")
-        if code:
-            return code
-        # Use the exception name if there is no explicit code modeled
-        return self.name
-
-    @CachedProperty
-    def is_document_type(self):
-        return self.metadata.get('document', False)
-
-    @CachedProperty
-    def is_tagged_union(self):
-        return self.metadata.get('union', False)
-
-
-class ListShape(Shape):
-    @CachedProperty
-    def member(self):
-        return self._resolve_shape_ref(self._shape_model['member'])
-
-
-class MapShape(Shape):
-    @CachedProperty
-    def key(self):
-        return self._resolve_shape_ref(self._shape_model['key'])
-
-    @CachedProperty
-    def value(self):
-        return self._resolve_shape_ref(self._shape_model['value'])
-
-
-class StringShape(Shape):
-    @CachedProperty
-    def enum(self):
-        return self.metadata.get('enum', [])
-
-
-class StaticContextParameter(NamedTuple):
-    name: str
-    value: Union[bool, str]
-
-
-class ContextParameter(NamedTuple):
-    name: str
-    member_name: str
-
-
-class ClientContextParameter(NamedTuple):
-    name: str
-    type: str
-    documentation: str
-
-
-class ServiceModel:
-    """
-
-    :ivar service_description: The parsed service description dictionary.
-
-    """
-
-    def __init__(self, service_description, service_name=None):
-        """
-
-        :type service_description: dict
-        :param service_description: The service description model.  This value
-            is obtained from a botocore.loader.Loader, or from directly loading
-            the file yourself::
-
-                service_description = json.load(
-                    open('/path/to/service-description-model.json'))
-                model = ServiceModel(service_description)
-
-        :type service_name: str
-        :param service_name: The name of the service.  Normally this is
-            the endpoint prefix defined in the service_description.  However,
-            you can override this value to provide a more convenient name.
-            This is done in a few places in botocore (ses instead of email,
-            emr instead of elasticmapreduce).  If this value is not provided,
-            it will default to the endpointPrefix defined in the model.
-
-        """
-        self._service_description = service_description
-        # We want clients to be able to access metadata directly.
-        self.metadata = service_description.get('metadata', {})
-        self._shape_resolver = ShapeResolver(
-            service_description.get('shapes', {})
-        )
-        self._signature_version = NOT_SET
-        self._service_name = service_name
-        self._instance_cache = {}
-
-    def shape_for(self, shape_name, member_traits=None):
-        return self._shape_resolver.get_shape_by_name(
-            shape_name, member_traits
-        )
-
-    def shape_for_error_code(self, error_code):
-        return self._error_code_cache.get(error_code, None)
-
-    @CachedProperty
-    def _error_code_cache(self):
-        error_code_cache = {}
-        for error_shape in self.error_shapes:
-            code = error_shape.error_code
-            error_code_cache[code] = error_shape
-        return error_code_cache
-
-    def resolve_shape_ref(self, shape_ref):
-        return self._shape_resolver.resolve_shape_ref(shape_ref)
-
-    @CachedProperty
-    def shape_names(self):
-        return list(self._service_description.get('shapes', {}))
-
-    @CachedProperty
-    def error_shapes(self):
-        error_shapes = []
-        for shape_name in self.shape_names:
-            error_shape = self.shape_for(shape_name)
-            if error_shape.metadata.get('exception', False):
-                error_shapes.append(error_shape)
-        return error_shapes
-
-    @instance_cache
-    def operation_model(self, operation_name):
-        try:
-            model = self._service_description['operations'][operation_name]
-        except KeyError:
-            raise OperationNotFoundError(operation_name)
-        return OperationModel(model, self, operation_name)
-
-    @CachedProperty
-    def documentation(self):
-        return self._service_description.get('documentation', '')
-
-    @CachedProperty
-    def operation_names(self):
-        return list(self._service_description.get('operations', []))
-
-    @CachedProperty
-    def service_name(self):
-        """The name of the service.
-
-        This defaults to the endpointPrefix defined in the service model.
-        However, this value can be overriden when a ``ServiceModel`` is
-        created.  If a service_name was not provided when the ``ServiceModel``
-        was created and if there is no endpointPrefix defined in the
-        service model, then an ``UndefinedModelAttributeError`` exception
-        will be raised.
-
-        """
-        if self._service_name is not None:
-            return self._service_name
-        else:
-            return self.endpoint_prefix
-
-    @CachedProperty
-    def service_id(self):
-        try:
-            return ServiceId(self._get_metadata_property('serviceId'))
-        except UndefinedModelAttributeError:
-            raise MissingServiceIdError(service_name=self._service_name)
-
-    @CachedProperty
-    def signing_name(self):
-        """The name to use when computing signatures.
-
-        If the model does not define a signing name, this
-        value will be the endpoint prefix defined in the model.
-        """
-        signing_name = self.metadata.get('signingName')
-        if signing_name is None:
-            signing_name = self.endpoint_prefix
-        return signing_name
-
-    @CachedProperty
-    def api_version(self):
-        return self._get_metadata_property('apiVersion')
-
-    @CachedProperty
-    def protocol(self):
-        return self._get_metadata_property('protocol')
-
-    @CachedProperty
-    def endpoint_prefix(self):
-        return self._get_metadata_property('endpointPrefix')
-
-    @CachedProperty
-    def endpoint_discovery_operation(self):
-        for operation in self.operation_names:
-            model = self.operation_model(operation)
-            if model.is_endpoint_discovery_operation:
-                return model
-
-    @CachedProperty
-    def endpoint_discovery_required(self):
-        for operation in self.operation_names:
-            model = self.operation_model(operation)
-            if (
-                model.endpoint_discovery is not None
-                and model.endpoint_discovery.get('required')
-            ):
-                return True
-        return False
-
-    @CachedProperty
-    def client_context_parameters(self):
-        params = self._service_description.get('clientContextParams', {})
-        return [
-            ClientContextParameter(
-                name=param_name,
-                type=param_val['type'],
-                documentation=param_val['documentation'],
-            )
-            for param_name, param_val in params.items()
-        ]
-
-    def _get_metadata_property(self, name):
-        try:
-            return self.metadata[name]
-        except KeyError:
-            raise UndefinedModelAttributeError(
-                f'"{name}" not defined in the metadata of the model: {self}'
-            )
-
-    # Signature version is one of the rare properties
-    # that can be modified so a CachedProperty is not used here.
-
-    @property
-    def signature_version(self):
-        if self._signature_version is NOT_SET:
-            signature_version = self.metadata.get('signatureVersion')
-            self._signature_version = signature_version
-        return self._signature_version
-
-    @signature_version.setter
-    def signature_version(self, value):
-        self._signature_version = value
-
-    def __repr__(self):
-        return f'{self.__class__.__name__}({self.service_name})'
-
-
-class OperationModel:
-    def __init__(self, operation_model, service_model, name=None):
-        """
-
-        :type operation_model: dict
-        :param operation_model: The operation model.  This comes from the
-            service model, and is the value associated with the operation
-            name in the service model (i.e ``model['operations'][op_name]``).
-
-        :type service_model: botocore.model.ServiceModel
-        :param service_model: The service model associated with the operation.
-
-        :type name: string
-        :param name: The operation name.  This is the operation name exposed to
-            the users of this model.  This can potentially be different from
-            the "wire_name", which is the operation name that *must* by
-            provided over the wire.  For example, given::
-
-               "CreateCloudFrontOriginAccessIdentity":{
-                 "name":"CreateCloudFrontOriginAccessIdentity2014_11_06",
-                  ...
-              }
-
-           The ``name`` would be ``CreateCloudFrontOriginAccessIdentity``,
-           but the ``self.wire_name`` would be
-           ``CreateCloudFrontOriginAccessIdentity2014_11_06``, which is the
-           value we must send in the corresponding HTTP request.
-
-        """
-        self._operation_model = operation_model
-        self._service_model = service_model
-        self._api_name = name
-        # Clients can access '.name' to get the operation name
-        # and '.metadata' to get the top level metdata of the service.
-        self._wire_name = operation_model.get('name')
-        self.metadata = service_model.metadata
-        self.http = operation_model.get('http', {})
-
-    @CachedProperty
-    def name(self):
-        if self._api_name is not None:
-            return self._api_name
-        else:
-            return self.wire_name
-
-    @property
-    def wire_name(self):
-        """The wire name of the operation.
-
-        In many situations this is the same value as the
-        ``name``, value, but in some services, the operation name
-        exposed to the user is different from the operaiton name
-        we send across the wire (e.g cloudfront).
-
-        Any serialization code should use ``wire_name``.
-
-        """
-        return self._operation_model.get('name')
-
-    @property
-    def service_model(self):
-        return self._service_model
-
-    @CachedProperty
-    def documentation(self):
-        return self._operation_model.get('documentation', '')
-
-    @CachedProperty
-    def deprecated(self):
-        return self._operation_model.get('deprecated', False)
-
-    @CachedProperty
-    def endpoint_discovery(self):
-        # Explicit None default. An empty dictionary for this trait means it is
-        # enabled but not required to be used.
-        return self._operation_model.get('endpointdiscovery', None)
-
-    @CachedProperty
-    def is_endpoint_discovery_operation(self):
-        return self._operation_model.get('endpointoperation', False)
-
-    @CachedProperty
-    def input_shape(self):
-        if 'input' not in self._operation_model:
-            # Some operations do not accept any input and do not define an
-            # input shape.
-            return None
-        return self._service_model.resolve_shape_ref(
-            self._operation_model['input']
-        )
-
-    @CachedProperty
-    def output_shape(self):
-        if 'output' not in self._operation_model:
-            # Some operations do not define an output shape,
-            # in which case we return None to indicate the
-            # operation has no expected output.
-            return None
-        return self._service_model.resolve_shape_ref(
-            self._operation_model['output']
-        )
-
-    @CachedProperty
-    def idempotent_members(self):
-        input_shape = self.input_shape
-        if not input_shape:
-            return []
-
-        return [
-            name
-            for (name, shape) in input_shape.members.items()
-            if 'idempotencyToken' in shape.metadata
-            and shape.metadata['idempotencyToken']
-        ]
-
-    @CachedProperty
-    def static_context_parameters(self):
-        params = self._operation_model.get('staticContextParams', {})
-        return [
-            StaticContextParameter(name=name, value=props.get('value'))
-            for name, props in params.items()
-        ]
-
-    @CachedProperty
-    def context_parameters(self):
-        if not self.input_shape:
-            return []
-
-        return [
-            ContextParameter(
-                name=shape.metadata['contextParam']['name'],
-                member_name=name,
-            )
-            for name, shape in self.input_shape.members.items()
-            if 'contextParam' in shape.metadata
-            and 'name' in shape.metadata['contextParam']
-        ]
-
-    @CachedProperty
-    def auth_type(self):
-        return self._operation_model.get('authtype')
-
-    @CachedProperty
-    def error_shapes(self):
-        shapes = self._operation_model.get("errors", [])
-        return list(self._service_model.resolve_shape_ref(s) for s in shapes)
-
-    @CachedProperty
-    def endpoint(self):
-        return self._operation_model.get('endpoint')
-
-    @CachedProperty
-    def http_checksum_required(self):
-        return self._operation_model.get('httpChecksumRequired', False)
-
-    @CachedProperty
-    def http_checksum(self):
-        return self._operation_model.get('httpChecksum', {})
-
-    @CachedProperty
-    def has_event_stream_input(self):
-        return self.get_event_stream_input() is not None
-
-    @CachedProperty
-    def has_event_stream_output(self):
-        return self.get_event_stream_output() is not None
-
-    def get_event_stream_input(self):
-        return self._get_event_stream(self.input_shape)
-
-    def get_event_stream_output(self):
-        return self._get_event_stream(self.output_shape)
-
-    def _get_event_stream(self, shape):
-        """Returns the event stream member's shape if any or None otherwise."""
-        if shape is None:
-            return None
-        event_name = shape.event_stream_name
-        if event_name:
-            return shape.members[event_name]
-        return None
-
-    @CachedProperty
-    def has_streaming_input(self):
-        return self.get_streaming_input() is not None
-
-    @CachedProperty
-    def has_streaming_output(self):
-        return self.get_streaming_output() is not None
-
-    def get_streaming_input(self):
-        return self._get_streaming_body(self.input_shape)
-
-    def get_streaming_output(self):
-        return self._get_streaming_body(self.output_shape)
-
-    def _get_streaming_body(self, shape):
-        """Returns the streaming member's shape if any; or None otherwise."""
-        if shape is None:
-            return None
-        payload = shape.serialization.get('payload')
-        if payload is not None:
-            payload_shape = shape.members[payload]
-            if payload_shape.type_name == 'blob':
-                return payload_shape
-        return None
-
-    def __repr__(self):
-        return f'{self.__class__.__name__}(name={self.name})'
-
-
-class ShapeResolver:
-    """Resolves shape references."""
-
-    # Any type not in this mapping will default to the Shape class.
-    SHAPE_CLASSES = {
-        'structure': StructureShape,
-        'list': ListShape,
-        'map': MapShape,
-        'string': StringShape,
-    }
-
-    def __init__(self, shape_map):
-        self._shape_map = shape_map
-        self._shape_cache = {}
-
-    def get_shape_by_name(self, shape_name, member_traits=None):
-        try:
-            shape_model = self._shape_map[shape_name]
-        except KeyError:
-            raise NoShapeFoundError(shape_name)
-        try:
-            shape_cls = self.SHAPE_CLASSES.get(shape_model['type'], Shape)
-        except KeyError:
-            raise InvalidShapeError(
-                f"Shape is missing required key 'type': {shape_model}"
-            )
-        if member_traits:
-            shape_model = shape_model.copy()
-            shape_model.update(member_traits)
-        result = shape_cls(shape_name, shape_model, self)
-        return result
-
-    def resolve_shape_ref(self, shape_ref):
-        # A shape_ref is a dict that has a 'shape' key that
-        # refers to a shape name as well as any additional
-        # member traits that are then merged over the shape
-        # definition.  For example:
-        # {"shape": "StringType", "locationName": "Foobar"}
-        if len(shape_ref) == 1 and 'shape' in shape_ref:
-            # It's just a shape ref with no member traits, we can avoid
-            # a .copy().  This is the common case so it's specifically
-            # called out here.
-            return self.get_shape_by_name(shape_ref['shape'])
-        else:
-            member_traits = shape_ref.copy()
-            try:
-                shape_name = member_traits.pop('shape')
-            except KeyError:
-                raise InvalidShapeReferenceError(
-                    f"Invalid model, missing shape reference: {shape_ref}"
-                )
-            return self.get_shape_by_name(shape_name, member_traits)
-
-
-class UnresolvableShapeMap:
-    """A ShapeResolver that will throw ValueErrors when shapes are resolved."""
-
-    def get_shape_by_name(self, shape_name, member_traits=None):
-        raise ValueError(
-            f"Attempted to lookup shape '{shape_name}', but no shape map was provided."
-        )
-
-    def resolve_shape_ref(self, shape_ref):
-        raise ValueError(
-            f"Attempted to resolve shape '{shape_ref}', but no shape "
-            f"map was provided."
-        )
-
-
-class DenormalizedStructureBuilder:
-    """Build a StructureShape from a denormalized model.
-
-    This is a convenience builder class that makes it easy to construct
-    ``StructureShape``s based on a denormalized model.
-
-    It will handle the details of creating unique shape names and creating
-    the appropriate shape map needed by the ``StructureShape`` class.
-
-    Example usage::
-
-        builder = DenormalizedStructureBuilder()
-        shape = builder.with_members({
-            'A': {
-                'type': 'structure',
-                'members': {
-                    'B': {
-                        'type': 'structure',
-                        'members': {
-                            'C': {
-                                'type': 'string',
-                            }
-                        }
-                    }
-                }
-            }
-        }).build_model()
-        # ``shape`` is now an instance of botocore.model.StructureShape
-
-    :type dict_type: class
-    :param dict_type: The dictionary type to use, allowing you to opt-in
-                      to using OrderedDict or another dict type. This can
-                      be particularly useful for testing when order
-                      matters, such as for documentation.
-
-    """
-
-    SCALAR_TYPES = (
-        'string',
-        'integer',
-        'boolean',
-        'blob',
-        'float',
-        'timestamp',
-        'long',
-        'double',
-        'char',
-    )
-
-    def __init__(self, name=None):
-        self.members = OrderedDict()
-        self._name_generator = ShapeNameGenerator()
-        if name is None:
-            self.name = self._name_generator.new_shape_name('structure')
-
-    def with_members(self, members):
-        """
-
-        :type members: dict
-        :param members: The denormalized members.
-
-        :return: self
-
-        """
-        self._members = members
-        return self
-
-    def build_model(self):
-        """Build the model based on the provided members.
-
-        :rtype: botocore.model.StructureShape
-        :return: The built StructureShape object.
-
-        """
-        shapes = OrderedDict()
-        denormalized = {
-            'type': 'structure',
-            'members': self._members,
-        }
-        self._build_model(denormalized, shapes, self.name)
-        resolver = ShapeResolver(shape_map=shapes)
-        return StructureShape(
-            shape_name=self.name,
-            shape_model=shapes[self.name],
-            shape_resolver=resolver,
-        )
-
-    def _build_model(self, model, shapes, shape_name):
-        if model['type'] == 'structure':
-            shapes[shape_name] = self._build_structure(model, shapes)
-        elif model['type'] == 'list':
-            shapes[shape_name] = self._build_list(model, shapes)
-        elif model['type'] == 'map':
-            shapes[shape_name] = self._build_map(model, shapes)
-        elif model['type'] in self.SCALAR_TYPES:
-            shapes[shape_name] = self._build_scalar(model)
-        else:
-            raise InvalidShapeError(f"Unknown shape type: {model['type']}")
-
-    def _build_structure(self, model, shapes):
-        members = OrderedDict()
-        shape = self._build_initial_shape(model)
-        shape['members'] = members
-
-        for name, member_model in model.get('members', OrderedDict()).items():
-            member_shape_name = self._get_shape_name(member_model)
-            members[name] = {'shape': member_shape_name}
-            self._build_model(member_model, shapes, member_shape_name)
-        return shape
-
-    def _build_list(self, model, shapes):
-        member_shape_name = self._get_shape_name(model)
-        shape = self._build_initial_shape(model)
-        shape['member'] = {'shape': member_shape_name}
-        self._build_model(model['member'], shapes, member_shape_name)
-        return shape
-
-    def _build_map(self, model, shapes):
-        key_shape_name = self._get_shape_name(model['key'])
-        value_shape_name = self._get_shape_name(model['value'])
-        shape = self._build_initial_shape(model)
-        shape['key'] = {'shape': key_shape_name}
-        shape['value'] = {'shape': value_shape_name}
-        self._build_model(model['key'], shapes, key_shape_name)
-        self._build_model(model['value'], shapes, value_shape_name)
-        return shape
-
-    def _build_initial_shape(self, model):
-        shape = {
-            'type': model['type'],
-        }
-        if 'documentation' in model:
-            shape['documentation'] = model['documentation']
-        for attr in Shape.METADATA_ATTRS:
-            if attr in model:
-                shape[attr] = model[attr]
-        return shape
-
-    def _build_scalar(self, model):
-        return self._build_initial_shape(model)
-
-    def _get_shape_name(self, model):
-        if 'shape_name' in model:
-            return model['shape_name']
-        else:
-            return self._name_generator.new_shape_name(model['type'])
-
-
-class ShapeNameGenerator:
-    """Generate unique shape names for a type.
-
-    This class can be used in conjunction with the DenormalizedStructureBuilder
-    to generate unique shape names for a given type.
-
-    """
-
-    def __init__(self):
-        self._name_cache = defaultdict(int)
-
-    def new_shape_name(self, type_name):
-        """Generate a unique shape name.
-
-        This method will guarantee a unique shape name each time it is
-        called with the same type.
-
-        ::
-
-            >>> s = ShapeNameGenerator()
-            >>> s.new_shape_name('structure')
-            'StructureType1'
-            >>> s.new_shape_name('structure')
-            'StructureType2'
-            >>> s.new_shape_name('list')
-            'ListType1'
-            >>> s.new_shape_name('list')
-            'ListType2'
-
-
-        :type type_name: string
-        :param type_name: The type name (structure, list, map, string, etc.)
-
-        :rtype: string
-        :return: A unique shape name for the given type
-
-        """
-        self._name_cache[type_name] += 1
-        current_index = self._name_cache[type_name]
-        return f'{type_name.capitalize()}Type{current_index}'
diff --git a/spaces/BilalSardar/Gpt4All/app.py b/spaces/BilalSardar/Gpt4All/app.py
deleted file mode 100644
index 91ee75c82efa79543c12d0895c14e4cc403ed703..0000000000000000000000000000000000000000
--- a/spaces/BilalSardar/Gpt4All/app.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import gradio as gr
-from nomic.gpt4all.gpt4all import GPT4AllGPU
-
-m = GPT4AllGPU()
-m.open()
-
-def chat(input):
-    return m.prompt(input)
-
-demo=gr.Interface(fn=chat,
-                  inputs="text",
-                  outputs="text",
-                  examples=[['write me a story about a lonely computer']],
-                  title="GPT4ALL",
-                  description="Check https://github.com/nomic-ai/gpt4all"
-                 )
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/Branon/TurboKeys/README.md b/spaces/Branon/TurboKeys/README.md
deleted file mode 100644
index afb11a93a52894a3b9ce9ad18b2f6b640f61a050..0000000000000000000000000000000000000000
--- a/spaces/Branon/TurboKeys/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: TurboKeys
-emoji: 😔
-colorFrom: red
-colorTo: blue
-sdk: docker
-pinned: false
-duplicated_from: Branon/TempBRICS
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Brasd99/JustClothify/app.py b/spaces/Brasd99/JustClothify/app.py
deleted file mode 100644
index f54be10bd7cd5e271b5711e549330f59cac7d511..0000000000000000000000000000000000000000
--- a/spaces/Brasd99/JustClothify/app.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import os
-import subprocess
-from typing import Dict
-import json
-import numpy as np
-import wget
-import gradio as gr
-subprocess.call(['pip', 'install', 'git+https://github.com/facebookresearch/detectron2@main#subdirectory=projects/DensePose'])
-from helpers.processor import TextureProcessor
-
-def image_processing(person_img: np.ndarray, model_img: np.ndarray) -> np.ndarray:
-    print('Attempt to get textured image.')
-    return texture_processor.extract(person_img, model_img)
-
-def load_model(current_path: str, config: Dict) -> None:
-    data_path = os.path.join(current_path, 'data')
-    if not os.path.isdir(data_path):
-        os.mkdir(data_path)
-        for filename, url in config.items():
-            wget.download(url, os.path.join(data_path, filename))
-
-with open("config.json", "r") as f:
-    config = json.load(f)
-
-current_path = os.getcwd()
-load_model(current_path, config)
-densepose_config = os.path.join(current_path, 'data', 'config.yaml')
-densepose_weights = os.path.join(current_path, 'data', 'weights.pkl')
-
-texture_processor = TextureProcessor(densepose_config, densepose_weights)
-
-title = '<h1 style="text-align:center">JustClothify</h1>'
-
-with gr.Blocks(theme='soft', title='JustClothify') as blocks:
-    gr.HTML(title)
-    gr.Markdown('Upload an image of a person and an image of a model with clothes, the system will generate an image of a person wearing these clothes.')
-    with gr.Row():
-        person_image = gr.inputs.Image(label='Person Image', type='numpy')
-        model_image = gr.inputs.Image(label='Model Image (with clothes)', type='numpy')
-    process_button = gr.Button('Process')
-    outputs = gr.outputs.Image(label='Result Image', type='numpy')
-
-    process_button.click(fn=image_processing, inputs=[person_image, model_image], outputs=outputs)
-
-blocks.launch()
\ No newline at end of file
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign_cpu.cpp b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign_cpu.cpp
deleted file mode 100644
index faabe0011f33f21fed6486963c91b0dc7c8fa32d..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign_cpu.cpp
+++ /dev/null
@@ -1,503 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-#include <ATen/TensorUtils.h>
-#include "ROIAlign.h"
-
-namespace {
-
-// implementation taken from Caffe2
-template <typename T>
-struct PreCalc {
-  int pos1;
-  int pos2;
-  int pos3;
-  int pos4;
-  T w1;
-  T w2;
-  T w3;
-  T w4;
-};
-
-template <typename T>
-void pre_calc_for_bilinear_interpolate(
-    const int height,
-    const int width,
-    const int pooled_height,
-    const int pooled_width,
-    const int iy_upper,
-    const int ix_upper,
-    T roi_start_h,
-    T roi_start_w,
-    T bin_size_h,
-    T bin_size_w,
-    int roi_bin_grid_h,
-    int roi_bin_grid_w,
-    std::vector<PreCalc<T>>& pre_calc) {
-  int pre_calc_index = 0;
-  for (int ph = 0; ph < pooled_height; ph++) {
-    for (int pw = 0; pw < pooled_width; pw++) {
-      for (int iy = 0; iy < iy_upper; iy++) {
-        const T yy = roi_start_h + ph * bin_size_h +
-            static_cast<T>(iy + .5f) * bin_size_h /
-                static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
-        for (int ix = 0; ix < ix_upper; ix++) {
-          const T xx = roi_start_w + pw * bin_size_w +
-              static_cast<T>(ix + .5f) * bin_size_w /
-                  static_cast<T>(roi_bin_grid_w);
-
-          T x = xx;
-          T y = yy;
-          // deal with: inverse elements are out of feature map boundary
-          if (y < -1.0 || y > height || x < -1.0 || x > width) {
-            // empty
-            PreCalc<T> pc;
-            pc.pos1 = 0;
-            pc.pos2 = 0;
-            pc.pos3 = 0;
-            pc.pos4 = 0;
-            pc.w1 = 0;
-            pc.w2 = 0;
-            pc.w3 = 0;
-            pc.w4 = 0;
-            pre_calc[pre_calc_index] = pc;
-            pre_calc_index += 1;
-            continue;
-          }
-
-          if (y <= 0) {
-            y = 0;
-          }
-          if (x <= 0) {
-            x = 0;
-          }
-
-          int y_low = (int)y;
-          int x_low = (int)x;
-          int y_high;
-          int x_high;
-
-          if (y_low >= height - 1) {
-            y_high = y_low = height - 1;
-            y = (T)y_low;
-          } else {
-            y_high = y_low + 1;
-          }
-
-          if (x_low >= width - 1) {
-            x_high = x_low = width - 1;
-            x = (T)x_low;
-          } else {
-            x_high = x_low + 1;
-          }
-
-          T ly = y - y_low;
-          T lx = x - x_low;
-          T hy = 1. - ly, hx = 1. - lx;
-          T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
-
-          // save weights and indices
-          PreCalc<T> pc;
-          pc.pos1 = y_low * width + x_low;
-          pc.pos2 = y_low * width + x_high;
-          pc.pos3 = y_high * width + x_low;
-          pc.pos4 = y_high * width + x_high;
-          pc.w1 = w1;
-          pc.w2 = w2;
-          pc.w3 = w3;
-          pc.w4 = w4;
-          pre_calc[pre_calc_index] = pc;
-
-          pre_calc_index += 1;
-        }
-      }
-    }
-  }
-}
-
-template <typename T>
-void ROIAlignForward(
-    const int nthreads,
-    const T* input,
-    const T& spatial_scale,
-    const int channels,
-    const int height,
-    const int width,
-    const int pooled_height,
-    const int pooled_width,
-    const int sampling_ratio,
-    const T* rois,
-    T* output,
-    bool aligned) {
-  int n_rois = nthreads / channels / pooled_width / pooled_height;
-  // (n, c, ph, pw) is an element in the pooled output
-  // can be parallelized using omp
-  // #pragma omp parallel for num_threads(32)
-  for (int n = 0; n < n_rois; n++) {
-    int index_n = n * channels * pooled_width * pooled_height;
-
-    const T* offset_rois = rois + n * 5;
-    int roi_batch_ind = offset_rois[0];
-
-    // Do not use rounding; this implementation detail is critical
-    T offset = aligned ? (T)0.5 : (T)0.0;
-    T roi_start_w = offset_rois[1] * spatial_scale - offset;
-    T roi_start_h = offset_rois[2] * spatial_scale - offset;
-    T roi_end_w = offset_rois[3] * spatial_scale - offset;
-    T roi_end_h = offset_rois[4] * spatial_scale - offset;
-
-    T roi_width = roi_end_w - roi_start_w;
-    T roi_height = roi_end_h - roi_start_h;
-    if (aligned) {
-      AT_ASSERTM(
-          roi_width >= 0 && roi_height >= 0,
-          "ROIs in ROIAlign cannot have non-negative size!");
-    } else { // for backward-compatibility only
-      roi_width = std::max(roi_width, (T)1.);
-      roi_height = std::max(roi_height, (T)1.);
-    }
-    T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
-    T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
-
-    // We use roi_bin_grid to sample the grid and mimic integral
-    int roi_bin_grid_h = (sampling_ratio > 0)
-        ? sampling_ratio
-        : ceil(roi_height / pooled_height); // e.g., = 2
-    int roi_bin_grid_w =
-        (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
-
-    // We do average (integral) pooling inside a bin
-    // When the grid is empty, output zeros == 0/1, instead of NaN.
-    const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
-
-    // we want to precalculate indices and weights shared by all channels,
-    // this is the key point of optimization
-    std::vector<PreCalc<T>> pre_calc(
-        roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height);
-    pre_calc_for_bilinear_interpolate(
-        height,
-        width,
-        pooled_height,
-        pooled_width,
-        roi_bin_grid_h,
-        roi_bin_grid_w,
-        roi_start_h,
-        roi_start_w,
-        bin_size_h,
-        bin_size_w,
-        roi_bin_grid_h,
-        roi_bin_grid_w,
-        pre_calc);
-
-    for (int c = 0; c < channels; c++) {
-      int index_n_c = index_n + c * pooled_width * pooled_height;
-      const T* offset_input =
-          input + (roi_batch_ind * channels + c) * height * width;
-      int pre_calc_index = 0;
-
-      for (int ph = 0; ph < pooled_height; ph++) {
-        for (int pw = 0; pw < pooled_width; pw++) {
-          int index = index_n_c + ph * pooled_width + pw;
-
-          T output_val = 0.;
-          for (int iy = 0; iy < roi_bin_grid_h; iy++) {
-            for (int ix = 0; ix < roi_bin_grid_w; ix++) {
-              PreCalc<T> pc = pre_calc[pre_calc_index];
-              output_val += pc.w1 * offset_input[pc.pos1] +
-                  pc.w2 * offset_input[pc.pos2] +
-                  pc.w3 * offset_input[pc.pos3] + pc.w4 * offset_input[pc.pos4];
-
-              pre_calc_index += 1;
-            }
-          }
-          output_val /= count;
-
-          output[index] = output_val;
-        } // for pw
-      } // for ph
-    } // for c
-  } // for n
-}
-
-template <typename T>
-void bilinear_interpolate_gradient(
-    const int height,
-    const int width,
-    T y,
-    T x,
-    T& w1,
-    T& w2,
-    T& w3,
-    T& w4,
-    int& x_low,
-    int& x_high,
-    int& y_low,
-    int& y_high,
-    const int index /* index for debug only*/) {
-  // deal with cases that inverse elements are out of feature map boundary
-  if (y < -1.0 || y > height || x < -1.0 || x > width) {
-    // empty
-    w1 = w2 = w3 = w4 = 0.;
-    x_low = x_high = y_low = y_high = -1;
-    return;
-  }
-
-  if (y <= 0)
-    y = 0;
-  if (x <= 0)
-    x = 0;
-
-  y_low = (int)y;
-  x_low = (int)x;
-
-  if (y_low >= height - 1) {
-    y_high = y_low = height - 1;
-    y = (T)y_low;
-  } else {
-    y_high = y_low + 1;
-  }
-
-  if (x_low >= width - 1) {
-    x_high = x_low = width - 1;
-    x = (T)x_low;
-  } else {
-    x_high = x_low + 1;
-  }
-
-  T ly = y - y_low;
-  T lx = x - x_low;
-  T hy = 1. - ly, hx = 1. - lx;
-
-  // reference in forward
-  // T v1 = input[y_low * width + x_low];
-  // T v2 = input[y_low * width + x_high];
-  // T v3 = input[y_high * width + x_low];
-  // T v4 = input[y_high * width + x_high];
-  // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
-
-  w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
-
-  return;
-}
-
-template <class T>
-inline void add(T* address, const T& val) {
-  *address += val;
-}
-
-template <typename T>
-void ROIAlignBackward(
-    const int nthreads,
-    const T* grad_output,
-    const T& spatial_scale,
-    const int channels,
-    const int height,
-    const int width,
-    const int pooled_height,
-    const int pooled_width,
-    const int sampling_ratio,
-    T* grad_input,
-    const T* rois,
-    const int n_stride,
-    const int c_stride,
-    const int h_stride,
-    const int w_stride,
-    bool aligned) {
-  for (int index = 0; index < nthreads; index++) {
-    // (n, c, ph, pw) is an element in the pooled output
-    int pw = index % pooled_width;
-    int ph = (index / pooled_width) % pooled_height;
-    int c = (index / pooled_width / pooled_height) % channels;
-    int n = index / pooled_width / pooled_height / channels;
-
-    const T* offset_rois = rois + n * 5;
-    int roi_batch_ind = offset_rois[0];
-
-    // Do not use rounding; this implementation detail is critical
-    T offset = aligned ? (T)0.5 : (T)0.0;
-    T roi_start_w = offset_rois[1] * spatial_scale - offset;
-    T roi_start_h = offset_rois[2] * spatial_scale - offset;
-    T roi_end_w = offset_rois[3] * spatial_scale - offset;
-    T roi_end_h = offset_rois[4] * spatial_scale - offset;
-
-    T roi_width = roi_end_w - roi_start_w;
-    T roi_height = roi_end_h - roi_start_h;
-    if (aligned) {
-      AT_ASSERTM(
-          roi_width >= 0 && roi_height >= 0,
-          "ROIs in ROIAlign do not have non-negative size!");
-    } else { // for backward-compatibility only
-      roi_width = std::max(roi_width, (T)1.);
-      roi_height = std::max(roi_height, (T)1.);
-    }
-    T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
-    T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
-
-    T* offset_grad_input =
-        grad_input + ((roi_batch_ind * channels + c) * height * width);
-
-    int output_offset = n * n_stride + c * c_stride;
-    const T* offset_grad_output = grad_output + output_offset;
-    const T grad_output_this_bin =
-        offset_grad_output[ph * h_stride + pw * w_stride];
-
-    // We use roi_bin_grid to sample the grid and mimic integral
-    int roi_bin_grid_h = (sampling_ratio > 0)
-        ? sampling_ratio
-        : ceil(roi_height / pooled_height); // e.g., = 2
-    int roi_bin_grid_w =
-        (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
-
-    // We do average (integral) pooling inside a bin
-    const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
-
-    for (int iy = 0; iy < roi_bin_grid_h; iy++) {
-      const T y = roi_start_h + ph * bin_size_h +
-          static_cast<T>(iy + .5f) * bin_size_h /
-              static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
-      for (int ix = 0; ix < roi_bin_grid_w; ix++) {
-        const T x = roi_start_w + pw * bin_size_w +
-            static_cast<T>(ix + .5f) * bin_size_w /
-                static_cast<T>(roi_bin_grid_w);
-
-        T w1, w2, w3, w4;
-        int x_low, x_high, y_low, y_high;
-
-        bilinear_interpolate_gradient(
-            height,
-            width,
-            y,
-            x,
-            w1,
-            w2,
-            w3,
-            w4,
-            x_low,
-            x_high,
-            y_low,
-            y_high,
-            index);
-
-        T g1 = grad_output_this_bin * w1 / count;
-        T g2 = grad_output_this_bin * w2 / count;
-        T g3 = grad_output_this_bin * w3 / count;
-        T g4 = grad_output_this_bin * w4 / count;
-
-        if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
-          // atomic add is not needed for now since it is single threaded
-          add(offset_grad_input + y_low * width + x_low, static_cast<T>(g1));
-          add(offset_grad_input + y_low * width + x_high, static_cast<T>(g2));
-          add(offset_grad_input + y_high * width + x_low, static_cast<T>(g3));
-          add(offset_grad_input + y_high * width + x_high, static_cast<T>(g4));
-        } // if
-      } // ix
-    } // iy
-  } // for
-} // ROIAlignBackward
-
-} // namespace
-
-namespace detectron2 {
-
-at::Tensor ROIAlign_forward_cpu(
-    const at::Tensor& input,
-    const at::Tensor& rois,
-    const float spatial_scale,
-    const int pooled_height,
-    const int pooled_width,
-    const int sampling_ratio,
-    bool aligned) {
-  AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor");
-  AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor");
-
-  at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
-
-  at::CheckedFrom c = "ROIAlign_forward_cpu";
-  at::checkAllSameType(c, {input_t, rois_t});
-
-  auto num_rois = rois.size(0);
-  auto channels = input.size(1);
-  auto height = input.size(2);
-  auto width = input.size(3);
-
-  at::Tensor output = at::zeros(
-      {num_rois, channels, pooled_height, pooled_width}, input.options());
-
-  auto output_size = num_rois * pooled_height * pooled_width * channels;
-
-  if (output.numel() == 0)
-    return output;
-
-  AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIAlign_forward", [&] {
-    ROIAlignForward<scalar_t>(
-        output_size,
-        input.contiguous().data_ptr<scalar_t>(),
-        spatial_scale,
-        channels,
-        height,
-        width,
-        pooled_height,
-        pooled_width,
-        sampling_ratio,
-        rois.contiguous().data_ptr<scalar_t>(),
-        output.data_ptr<scalar_t>(),
-        aligned);
-  });
-  return output;
-}
-
-at::Tensor ROIAlign_backward_cpu(
-    const at::Tensor& grad,
-    const at::Tensor& rois,
-    const float spatial_scale,
-    const int pooled_height,
-    const int pooled_width,
-    const int batch_size,
-    const int channels,
-    const int height,
-    const int width,
-    const int sampling_ratio,
-    bool aligned) {
-  AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor");
-  AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor");
-
-  at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
-
-  at::CheckedFrom c = "ROIAlign_backward_cpu";
-  at::checkAllSameType(c, {grad_t, rois_t});
-
-  at::Tensor grad_input =
-      at::zeros({batch_size, channels, height, width}, grad.options());
-
-  // handle possibly empty gradients
-  if (grad.numel() == 0) {
-    return grad_input;
-  }
-
-  // get stride values to ensure indexing into gradients is correct.
-  int n_stride = grad.stride(0);
-  int c_stride = grad.stride(1);
-  int h_stride = grad.stride(2);
-  int w_stride = grad.stride(3);
-
-  AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIAlign_forward", [&] {
-    ROIAlignBackward<scalar_t>(
-        grad.numel(),
-        grad.contiguous().data_ptr<scalar_t>(),
-        spatial_scale,
-        channels,
-        height,
-        width,
-        pooled_height,
-        pooled_width,
-        sampling_ratio,
-        grad_input.data_ptr<scalar_t>(),
-        rois.contiguous().data_ptr<scalar_t>(),
-        n_stride,
-        c_stride,
-        h_stride,
-        w_stride,
-        aligned);
-  });
-  return grad_input;
-}
-
-} // namespace detectron2
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/for_each.h b/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/for_each.h
deleted file mode 100644
index 542dcf754e752866324d38b630364a8d44a7b75f..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/for_each.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/******************************************************************************
- * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in the
- *       documentation and/or other materials provided with the distribution.
- *     * Neither the name of the NVIDIA CORPORATION nor the
- *       names of its contributors may be used to endorse or promote products
- *       derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- ******************************************************************************/
-#pragma once
-
-
-#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
-#include <iterator>
-#include <thrust/system/cuda/config.h>
-
-#include <thrust/system/cuda/detail/util.h>
-#include <thrust/system/cuda/detail/parallel_for.h>
-#include <thrust/detail/function.h>
-#include <thrust/distance.h>
-
-namespace thrust
-{
-
-namespace cuda_cub {
-
-  // for_each functor
-  template <class Input, class UnaryOp>
-  struct for_each_f
-  {
-    Input input;
-    UnaryOp op;
-
-    THRUST_FUNCTION
-    for_each_f(Input input, UnaryOp op)
-        : input(input), op(op) {}
-
-    template <class Size>
-    THRUST_DEVICE_FUNCTION void operator()(Size idx)
-    {
-      op(raw_reference_cast(input[idx]));
-    }
-  };
-
-  //-------------------------
-  // Thrust API entry points
-  //-------------------------
-
-  // for_each_n
-  template <class Derived,
-            class Input,
-            class Size,
-            class UnaryOp>
-  Input THRUST_FUNCTION
-  for_each_n(execution_policy<Derived> &policy,
-             Input                      first,
-             Size                       count,
-             UnaryOp                    op)
-  {
-    typedef thrust::detail::wrapped_function<UnaryOp, void> wrapped_t;
-    wrapped_t wrapped_op(op);
-
-    cuda_cub::parallel_for(policy,
-                           for_each_f<Input, wrapped_t>(first, wrapped_op),
-                           count);
-
-    cuda_cub::throw_on_error(
-      cuda_cub::synchronize(policy)
-    , "for_each: failed to synchronize"
-    );
-
-    return first + count;
-  }
-
-  // for_each
-  template <class Derived,
-            class Input,
-            class UnaryOp>
-  Input THRUST_FUNCTION
-  for_each(execution_policy<Derived> &policy,
-           Input                      first,
-           Input                      last,
-           UnaryOp                    op)
-  {
-    typedef typename iterator_traits<Input>::difference_type size_type;
-    size_type count = static_cast<size_type>(thrust::distance(first,last));
-    return cuda_cub::for_each_n(policy, first,  count, op);
-  }
-}    // namespace cuda_cub
-
-} // end namespace thrust
-#endif
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/generate.h b/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/generate.h
deleted file mode 100644
index f907b6acc079577642c446d6f0736073defc44b8..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/generate.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- *  Copyright 2008-2013 NVIDIA Corporation
- *
- *  Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-#pragma once
-
-#include <thrust/detail/config.h>
-
-// this system inherits generate
-#include <thrust/system/cpp/detail/generate.h>
-
diff --git a/spaces/CVPR/regionclip-demo/datasets/prepare_cocofied_lvis.py b/spaces/CVPR/regionclip-demo/datasets/prepare_cocofied_lvis.py
deleted file mode 100644
index 245c88482a9e2405e5a912b5c560aed78a614a13..0000000000000000000000000000000000000000
--- a/spaces/CVPR/regionclip-demo/datasets/prepare_cocofied_lvis.py
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import copy
-import json
-import os
-from collections import defaultdict
-
-# This mapping is extracted from the official LVIS mapping:
-# https://github.com/lvis-dataset/lvis-api/blob/master/data/coco_to_synset.json
-COCO_SYNSET_CATEGORIES = [
-    {"synset": "person.n.01", "coco_cat_id": 1},
-    {"synset": "bicycle.n.01", "coco_cat_id": 2},
-    {"synset": "car.n.01", "coco_cat_id": 3},
-    {"synset": "motorcycle.n.01", "coco_cat_id": 4},
-    {"synset": "airplane.n.01", "coco_cat_id": 5},
-    {"synset": "bus.n.01", "coco_cat_id": 6},
-    {"synset": "train.n.01", "coco_cat_id": 7},
-    {"synset": "truck.n.01", "coco_cat_id": 8},
-    {"synset": "boat.n.01", "coco_cat_id": 9},
-    {"synset": "traffic_light.n.01", "coco_cat_id": 10},
-    {"synset": "fireplug.n.01", "coco_cat_id": 11},
-    {"synset": "stop_sign.n.01", "coco_cat_id": 13},
-    {"synset": "parking_meter.n.01", "coco_cat_id": 14},
-    {"synset": "bench.n.01", "coco_cat_id": 15},
-    {"synset": "bird.n.01", "coco_cat_id": 16},
-    {"synset": "cat.n.01", "coco_cat_id": 17},
-    {"synset": "dog.n.01", "coco_cat_id": 18},
-    {"synset": "horse.n.01", "coco_cat_id": 19},
-    {"synset": "sheep.n.01", "coco_cat_id": 20},
-    {"synset": "beef.n.01", "coco_cat_id": 21},
-    {"synset": "elephant.n.01", "coco_cat_id": 22},
-    {"synset": "bear.n.01", "coco_cat_id": 23},
-    {"synset": "zebra.n.01", "coco_cat_id": 24},
-    {"synset": "giraffe.n.01", "coco_cat_id": 25},
-    {"synset": "backpack.n.01", "coco_cat_id": 27},
-    {"synset": "umbrella.n.01", "coco_cat_id": 28},
-    {"synset": "bag.n.04", "coco_cat_id": 31},
-    {"synset": "necktie.n.01", "coco_cat_id": 32},
-    {"synset": "bag.n.06", "coco_cat_id": 33},
-    {"synset": "frisbee.n.01", "coco_cat_id": 34},
-    {"synset": "ski.n.01", "coco_cat_id": 35},
-    {"synset": "snowboard.n.01", "coco_cat_id": 36},
-    {"synset": "ball.n.06", "coco_cat_id": 37},
-    {"synset": "kite.n.03", "coco_cat_id": 38},
-    {"synset": "baseball_bat.n.01", "coco_cat_id": 39},
-    {"synset": "baseball_glove.n.01", "coco_cat_id": 40},
-    {"synset": "skateboard.n.01", "coco_cat_id": 41},
-    {"synset": "surfboard.n.01", "coco_cat_id": 42},
-    {"synset": "tennis_racket.n.01", "coco_cat_id": 43},
-    {"synset": "bottle.n.01", "coco_cat_id": 44},
-    {"synset": "wineglass.n.01", "coco_cat_id": 46},
-    {"synset": "cup.n.01", "coco_cat_id": 47},
-    {"synset": "fork.n.01", "coco_cat_id": 48},
-    {"synset": "knife.n.01", "coco_cat_id": 49},
-    {"synset": "spoon.n.01", "coco_cat_id": 50},
-    {"synset": "bowl.n.03", "coco_cat_id": 51},
-    {"synset": "banana.n.02", "coco_cat_id": 52},
-    {"synset": "apple.n.01", "coco_cat_id": 53},
-    {"synset": "sandwich.n.01", "coco_cat_id": 54},
-    {"synset": "orange.n.01", "coco_cat_id": 55},
-    {"synset": "broccoli.n.01", "coco_cat_id": 56},
-    {"synset": "carrot.n.01", "coco_cat_id": 57},
-    {"synset": "frank.n.02", "coco_cat_id": 58},
-    {"synset": "pizza.n.01", "coco_cat_id": 59},
-    {"synset": "doughnut.n.02", "coco_cat_id": 60},
-    {"synset": "cake.n.03", "coco_cat_id": 61},
-    {"synset": "chair.n.01", "coco_cat_id": 62},
-    {"synset": "sofa.n.01", "coco_cat_id": 63},
-    {"synset": "pot.n.04", "coco_cat_id": 64},
-    {"synset": "bed.n.01", "coco_cat_id": 65},
-    {"synset": "dining_table.n.01", "coco_cat_id": 67},
-    {"synset": "toilet.n.02", "coco_cat_id": 70},
-    {"synset": "television_receiver.n.01", "coco_cat_id": 72},
-    {"synset": "laptop.n.01", "coco_cat_id": 73},
-    {"synset": "mouse.n.04", "coco_cat_id": 74},
-    {"synset": "remote_control.n.01", "coco_cat_id": 75},
-    {"synset": "computer_keyboard.n.01", "coco_cat_id": 76},
-    {"synset": "cellular_telephone.n.01", "coco_cat_id": 77},
-    {"synset": "microwave.n.02", "coco_cat_id": 78},
-    {"synset": "oven.n.01", "coco_cat_id": 79},
-    {"synset": "toaster.n.02", "coco_cat_id": 80},
-    {"synset": "sink.n.01", "coco_cat_id": 81},
-    {"synset": "electric_refrigerator.n.01", "coco_cat_id": 82},
-    {"synset": "book.n.01", "coco_cat_id": 84},
-    {"synset": "clock.n.01", "coco_cat_id": 85},
-    {"synset": "vase.n.01", "coco_cat_id": 86},
-    {"synset": "scissors.n.01", "coco_cat_id": 87},
-    {"synset": "teddy.n.01", "coco_cat_id": 88},
-    {"synset": "hand_blower.n.01", "coco_cat_id": 89},
-    {"synset": "toothbrush.n.01", "coco_cat_id": 90},
-]
-
-
-def cocofy_lvis(input_filename, output_filename):
-    """
-    Filter LVIS instance segmentation annotations to remove all categories that are not included in
-    COCO. The new json files can be used to evaluate COCO AP using `lvis-api`. The category ids in
-    the output json are the incontiguous COCO dataset ids.
-
-    Args:
-        input_filename (str): path to the LVIS json file.
-        output_filename (str): path to the COCOfied json file.
-    """
-
-    with open(input_filename, "r") as f:
-        lvis_json = json.load(f)
-
-    lvis_annos = lvis_json.pop("annotations")
-    cocofied_lvis = copy.deepcopy(lvis_json)
-    lvis_json["annotations"] = lvis_annos
-
-    # Mapping from lvis cat id to coco cat id via synset
-    lvis_cat_id_to_synset = {cat["id"]: cat["synset"] for cat in lvis_json["categories"]}
-    synset_to_coco_cat_id = {x["synset"]: x["coco_cat_id"] for x in COCO_SYNSET_CATEGORIES}
-    # Synsets that we will keep in the dataset
-    synsets_to_keep = set(synset_to_coco_cat_id.keys())
-    coco_cat_id_with_instances = defaultdict(int)
-
-    new_annos = []
-    ann_id = 1
-    for ann in lvis_annos:
-        lvis_cat_id = ann["category_id"]
-        synset = lvis_cat_id_to_synset[lvis_cat_id]
-        if synset not in synsets_to_keep:
-            continue
-        coco_cat_id = synset_to_coco_cat_id[synset]
-        new_ann = copy.deepcopy(ann)
-        new_ann["category_id"] = coco_cat_id
-        new_ann["id"] = ann_id
-        ann_id += 1
-        new_annos.append(new_ann)
-        coco_cat_id_with_instances[coco_cat_id] += 1
-    cocofied_lvis["annotations"] = new_annos
-
-    for image in cocofied_lvis["images"]:
-        for key in ["not_exhaustive_category_ids", "neg_category_ids"]:
-            new_category_list = []
-            for lvis_cat_id in image[key]:
-                synset = lvis_cat_id_to_synset[lvis_cat_id]
-                if synset not in synsets_to_keep:
-                    continue
-                coco_cat_id = synset_to_coco_cat_id[synset]
-                new_category_list.append(coco_cat_id)
-                coco_cat_id_with_instances[coco_cat_id] += 1
-            image[key] = new_category_list
-
-    coco_cat_id_with_instances = set(coco_cat_id_with_instances.keys())
-
-    new_categories = []
-    for cat in lvis_json["categories"]:
-        synset = cat["synset"]
-        if synset not in synsets_to_keep:
-            continue
-        coco_cat_id = synset_to_coco_cat_id[synset]
-        if coco_cat_id not in coco_cat_id_with_instances:
-            continue
-        new_cat = copy.deepcopy(cat)
-        new_cat["id"] = coco_cat_id
-        new_categories.append(new_cat)
-    cocofied_lvis["categories"] = new_categories
-
-    with open(output_filename, "w") as f:
-        json.dump(cocofied_lvis, f)
-    print("{} is COCOfied and stored in {}.".format(input_filename, output_filename))
-
-
-if __name__ == "__main__":
-    dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "lvis")
-    for s in ["lvis_v0.5_train", "lvis_v0.5_val"]:
-        print("Start COCOfing {}.".format(s))
-        cocofy_lvis(
-            os.path.join(dataset_dir, "{}.json".format(s)),
-            os.path.join(dataset_dir, "{}_cocofied.json".format(s)),
-        )
diff --git a/spaces/Chirag4579/prakalpa-image-comparator/app.py b/spaces/Chirag4579/prakalpa-image-comparator/app.py
deleted file mode 100644
index 5f30db23694f1cc683dc7579bf61cab58afd6ff8..0000000000000000000000000000000000000000
--- a/spaces/Chirag4579/prakalpa-image-comparator/app.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import streamlit as st
-from streamlit_image_comparison import image_comparison
-
-st.set_page_config(page_title="Prakalpa", page_icon="🔭", layout="centered")
-# title_container = st.container()
-# col1, col2 = st.columns([1, 20])
-# image = Image.open('C:/Users/Chirag Chauhan/Desktop/100409820-removebg-preview.png')
-# with title_container:
-#     with col1:
-#         st.sidebar.image(image, width=300)
-    # with col2:
-    #     st.sidebar.markdown('<h1 style="color: blue;">Prakalpa</h1>',
-    #     unsafe_allow_html=True)
-st.sidebar.markdown("# *Hubble vs James Webb*")
-
-
-def main():
-    html_temp = """
-    <style>
-    #MainMenu {visibility:hidden;}
-    tbody th {display:none}
-    .blank {display:none}
-    h1 {
-    text-align: center;
-    }
-    [data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
-        width: 450px;
-    }
-    [data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
-        width: 450px;
-    }
-    div.block-container{top:-20px;}
-    </style>
-    """
-
-    st.markdown(html_temp, unsafe_allow_html=True)
-
-    selection = st.sidebar.multiselect('Select comparison image:', ['Southern Nebula', "Stephan's Quintet",
-                                                                    'Galaxy Cluster SMACS 0723'])
-
-    if 'Southern Nebula' in selection:
-        with st.expander('Southern Nebula'):
-            image_comparison(
-                img1="https://www.webbcompare.com/img/hubble/southern_nebula_700.jpg",
-                img2="https://www.webbcompare.com/img/webb/southern_nebula_700.jpg",
-                label1="Hubble",
-                label2="Webb",
-                width=660,
-                make_responsive=True
-            )
-
-    if "Stephan's Quintet" in selection:
-        with st.expander("Stephan's Quintet"):
-            image_comparison(
-                img1="https://www.webbcompare.com/img/hubble/stephans_quintet_1400.jpg",
-                img2="https://www.webbcompare.com/img/webb/stephans_quintet_1400.jpg",
-                label1="Hubble",
-                label2="Webb",
-                width=660,
-                make_responsive=True
-            )
-
-    if "Galaxy Cluster SMACS 0723" in selection:
-        with st.expander('Galaxy Cluster SMACS 0723'):
-            image_comparison(
-                img1="https://www.webbcompare.com/img/hubble/deep_field_700.jpg",
-                img2="https://www.webbcompare.com/img/webb/deep_field_700.jpg",
-                label1="Hubble",
-                label2="Webb",
-                width=660,
-                make_responsive=True
-            )
-
-    # if "Carina Nebula" in selection:
-    #     with st.expander('Carina Nebula'):
-    #         image_comparison(
-    #             img1="https://www.webbcompare.com/img/hubble/carina_1400.png",
-    #             img2="https://www.webbcompare.com/img/webb/carina_1400.jpg",
-    #             label1="Hubble",
-    #             label2="Webb",
-    #             width=660,
-    #             make_responsive=True,
-    #             starting_position=50
-    #         )
-
-
-main()
\ No newline at end of file
diff --git a/spaces/CognitiveAIForHealth/README/README.md b/spaces/CognitiveAIForHealth/README/README.md
deleted file mode 100644
index 76e2f7de910354014950cb360461f1021e7bf3d3..0000000000000000000000000000000000000000
--- a/spaces/CognitiveAIForHealth/README/README.md
+++ /dev/null
@@ -1,143 +0,0 @@
-
-# Classroom Examples for Today: 🚀[Examples](https://huggingface.co/spaces/awacke1/AIZTH-03-09-2023)
-
-# 👋 Two easy ways to turbo boost your AI learning journey! 💻
-# 🌐 AI Pair Programming 
-## Open 2 Browsers to:
-1. __🌐 ChatGPT__ [URL](https://chat.openai.com/chat) or [URL2](https://platform.openai.com/playground) and 
-2. __🌐 Huggingface__ [URL](https://huggingface.co/awacke1)  in separate browser windows.
-1. 🤖 Use prompts to generate a streamlit program on Huggingface or locally to test it.
-2. 🔧 For advanced work, add Python 3.10 and VSCode locally, and debug as gradio or streamlit apps.
-3. 🚀 Use these two superpower processes to reduce the time it takes you to make a new AI program! ⏱️
-
-Example Starter Prompt:
-
-Write a streamlit program that demonstrates Data synthesis.
-Synthesize data from multiple sources to create new datasets.
-Use two datasets and demonstrate pandas dataframe query merge and join
-with two datasets in python list dictionaries:
-List of Hospitals that are over 1000 bed count by city and state, and
-State population size and square miles.
-Perform a calculated function on the merged dataset.
-
-
-# 🎥 YouTube University Method:
-1. 🏋️‍♀️ Plan two hours each weekday to exercise your body and brain.
-2. 🎬 Make a playlist of videos you want to learn from on YouTube. Save the links to edit later.
-3. 🚀 Try watching the videos at a faster speed while exercising, and sample the first five minutes of each video.
-4. 📜 Reorder the playlist so the most useful videos are at the front, and take breaks to exercise.
-5. 📝 Practice note-taking in markdown to instantly save what you want to remember. Share your notes with others!
-6. 👥 AI Pair Programming Using Long Answer Language Models with Human Feedback:
-## 🎥 2023 AI/ML Advanced Learning Playlists:
-1. [2023 QA Models and Long Form Question Answering NLP](https://www.youtube.com/playlist?list=PLHgX2IExbFovrkkx8HMTLNgYdjCMNYmX_)
-2. [FHIR Bioinformatics Development Using AI/ML and Python, Streamlit, and Gradio - 2022](https://www.youtube.com/playlist?list=PLHgX2IExbFovoMUC3hYXeFegpk_Y0Lz0Q)
-3. [2023 ChatGPT for Coding Assistant Streamlit, Gradio and Python Apps](https://www.youtube.com/playlist?list=PLHgX2IExbFouOEnppexiKZVdz_k5b0pvI)
-4. [2023 BigScience Bloom - Large Language Model for AI Systems and NLP](https://www.youtube.com/playlist?list=PLHgX2IExbFouqnsIqziThlPCX_miiDq14)
-5. [2023 Streamlit Pro Tips for AI UI UX for Data Science, Engineering, and Mathematics](https://www.youtube.com/playlist?list=PLHgX2IExbFou3cP19hHO9Xb-cN8uwr5RM)
-6. [2023 Fun, New and Interesting AI, Videos, and AI/ML Techniques](https://www.youtube.com/playlist?list=PLHgX2IExbFotoMt32SrT3Xynt5BXTGnEP)
-7. [2023 Best Minds in AGI AI Gamification and Large Language Models](https://www.youtube.com/playlist?list=PLHgX2IExbFotmFeBTpyje1uI22n0GAkXT)
-8. [2023 State of the Art for Vision Image Classification, Text Classification and Regression, Extractive Question Answering and Tabular Classification](https://www.youtube.com/playlist?list=PLHgX2IExbFotPcPu6pauNHOoZTTbnAQ2F)
-9. [2023 AutoML DataRobot and AI Platforms for Building Models, Features, Test, and Transparency](https://www.youtube.com/playlist?list=PLHgX2IExbFovsY2oGbDwdEhPrakkC8i3g)
-
-
-
-
-## Language Models 🗣️
-🏆 Bloom sets new record for most performant and efficient AI model in science! 🌸
-
-### Comparison of Large Language Models
-| Model Name        | Model Size (in Parameters) |
-| ----------------- | -------------------------- |
-| BigScience-tr11-176B | 176 billion |
-| GPT-3             | 175 billion               |
-| OpenAI's DALL-E 2.0 | 500 million               |
-| NVIDIA's Megatron | 8.3 billion               |
-| Transformer-XL    | 250 million               |
-| XLNet             | 210 million               |
-
-## ChatGPT Datasets 📚
-- WebText
-- Common Crawl
-- BooksCorpus
-- English Wikipedia
-- Toronto Books Corpus
-- OpenWebText
-- 
-## ChatGPT Datasets - Details 📚
-- **WebText:** A dataset of web pages crawled from domains on the Alexa top 5,000 list. This dataset was used to pretrain GPT-2.
-  - [WebText: A Large-Scale Unsupervised Text Corpus by Radford et al.](https://paperswithcode.com/dataset/webtext)
-- **Common Crawl:** A dataset of web pages from a variety of domains, which is updated regularly. This dataset was used to pretrain GPT-3.
-  - [Language Models are Few-Shot Learners](https://paperswithcode.com/dataset/common-crawl) by Brown et al.
-- **BooksCorpus:** A dataset of over 11,000 books from a variety of genres.
-  - [Scalable Methods for 8 Billion Token Language Modeling](https://paperswithcode.com/dataset/bookcorpus) by Zhu et al.
-- **English Wikipedia:** A dump of the English-language Wikipedia as of 2018, with articles from 2001-2017.
-  - [Improving Language Understanding by Generative Pre-Training](https://huggingface.co/spaces/awacke1/WikipediaUltimateAISearch?logs=build) Space for Wikipedia Search
-- **Toronto Books Corpus:** A dataset of over 7,000 books from a variety of genres, collected by the University of Toronto.
-  - [Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond](https://paperswithcode.com/dataset/bookcorpus) by Schwenk and Douze.
-- **OpenWebText:** A dataset of web pages that were filtered to remove content that was likely to be low-quality or spammy. This dataset was used to pretrain GPT-3.
-  - [Language Models are Few-Shot Learners](https://paperswithcode.com/dataset/openwebtext) by Brown et al.
-    
-## Big Science Model 🚀
-- 📜 Papers:
-  1. BLOOM: A 176B-Parameter Open-Access Multilingual Language Model [Paper](https://arxiv.org/abs/2211.05100)
-  2. Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism [Paper](https://arxiv.org/abs/1909.08053)
-  3. 8-bit Optimizers via Block-wise Quantization [Paper](https://arxiv.org/abs/2110.02861)
-  4. Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation [Paper](https://arxiv.org/abs/2108.12409)
-  5. [Other papers related to Big Science](https://huggingface.co/models?other=doi:10.57967/hf/0003)
-  6. [217 other models optimized for use with Bloom](https://huggingface.co/models?other=bloom)
- 
-- 📚 Datasets:
-  
-**Datasets:**
-1. - **Universal Dependencies:** A collection of annotated corpora for natural language processing in a range of languages, with a focus on dependency parsing.
-  - [Universal Dependencies official website.](https://universaldependencies.org/)
-2. - **WMT 2014:** The fourth edition of the Workshop on Statistical Machine Translation, featuring shared tasks on translating between English and various other languages.
-  - [WMT14 website.](http://www.statmt.org/wmt14/)
-3. - **The Pile:** An English language corpus of diverse text, sourced from various places on the internet.
-  - [The Pile official website.](https://pile.eleuther.ai/)
-4. - **HumanEval:** A dataset of English sentences, annotated with human judgments on a range of linguistic qualities.
-  - [HumanEval: An Evaluation Benchmark for Language Understanding](https://github.com/google-research-datasets/humaneval) by Gabriel Ilharco, Daniel Loureiro, Pedro Rodriguez, and Afonso Mendes.
-5. - **FLORES-101:** A dataset of parallel sentences in 101 languages, designed for multilingual machine translation.
-  - [FLORES-101: A Massively Multilingual Parallel Corpus for Language Understanding](https://flores101.opennmt.net/) by Aman Madaan, Shruti Rijhwani, Raghav Gupta, and Mitesh M. Khapra.
-6. - **CrowS-Pairs:** A dataset of sentence pairs, designed for evaluating the plausibility of generated text.
-  - [CrowS-Pairs: A Challenge Dataset for Plausible Plausibility Judgments](https://github.com/stanford-cogsci/crows-pairs) by Andrea Madotto, Zhaojiang Lin, Chien-Sheng Wu, Pascale Fung, and Caiming Xiong.
-7. - **WikiLingua:** A dataset of parallel sentences in 75 languages, sourced from Wikipedia.
-  - [WikiLingua: A New Benchmark Dataset for Cross-Lingual Wikification](https://arxiv.org/abs/2105.08031) by Jiarui Yao, Yanqiao Zhu, Ruihan Bao, Guosheng Lin, Lidong Bing, and Bei Shi.
-8. - **MTEB:** A dataset of English sentences, annotated with their entailment relationships with respect to other sentences.
-  - [Multi-Task Evaluation Benchmark for Natural Language Inference](https://github.com/google-research-datasets/mteb) by Michał Lukasik, Marcin Junczys-Dowmunt, and Houda Bouamor.
-9. - **xP3:** A dataset of English sentences, annotated with their paraphrase relationships with respect to other sentences.
-  - [xP3: A Large-Scale Evaluation Benchmark for Paraphrase Identification in Context](https://github.com/nyu-dl/xp3) by Aniket Didolkar, James Mayfield, Markus Saers, and Jason Baldridge.
-10. - **DiaBLa:** A dataset of English dialogue, annotated with dialogue acts.
-  - [A Large-Scale Corpus for Conversation Disentanglement](https://github.com/HLTCHKUST/DiaBLA) by Samuel Broscheit, António Branco, and André F. T. Martins.
-    
-- 📚 Dataset Papers with Code
-  1. [Universal Dependencies](https://paperswithcode.com/dataset/universal-dependencies)
-  2. [WMT 2014](https://paperswithcode.com/dataset/wmt-2014)
-  3. [The Pile](https://paperswithcode.com/dataset/the-pile)
-  4. [HumanEval](https://paperswithcode.com/dataset/humaneval)
-  5. [FLORES-101](https://paperswithcode.com/dataset/flores-101)
-  6. [CrowS-Pairs](https://paperswithcode.com/dataset/crows-pairs)
-  7. [WikiLingua](https://paperswithcode.com/dataset/wikilingua)
-  8. [MTEB](https://paperswithcode.com/dataset/mteb)
-  9. [xP3](https://paperswithcode.com/dataset/xp3)
-  10. [DiaBLa](https://paperswithcode.com/dataset/diabla)
-      
-# Deep RL ML Strategy 🧠
-The AI strategies are:
-- Language Model Preparation using Human Augmented with Supervised Fine Tuning 🤖
-- Reward Model Training with Prompts Dataset Multi-Model Generate Data to Rank 🎁
-- Fine Tuning with Reinforcement Reward and Distance Distribution Regret Score 🎯
-- Proximal Policy Optimization Fine Tuning 🤝
-- Variations - Preference Model Pretraining 🤔
-- Use Ranking Datasets Sentiment - Thumbs Up/Down, Distribution 📊
-- Online Version Getting Feedback 💬
-- OpenAI - InstructGPT - Humans generate LM Training Text 🔍
-- DeepMind - Advantage Actor Critic Sparrow, GopherCite 🦜
-- Reward Model Human Prefence Feedback 🏆
-
-  
-For more information on specific techniques and implementations, check out the following resources:
-- OpenAI's paper on [GPT-3](https://arxiv.org/abs/2005.14165) which details their Language Model Preparation approach
-- DeepMind's paper on [SAC](https://arxiv.org/abs/1801.01290) which describes the Advantage Actor Critic algorithm
-- OpenAI's paper on [Reward Learning](https://arxiv.org/abs/1810.06580) which explains their approach to training Reward Models
-- OpenAI's blog post on [GPT-3's fine-tuning process](https://openai.com/blog/fine-tuning-gpt-3/)
diff --git a/spaces/Datasculptor/MusicGen/app.py b/spaces/Datasculptor/MusicGen/app.py
deleted file mode 100644
index 0f92495d323f1c70a9c8dde3b7680e3f9491ab83..0000000000000000000000000000000000000000
--- a/spaces/Datasculptor/MusicGen/app.py
+++ /dev/null
@@ -1,407 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-# Updated to account for UI changes from https://github.com/rkfg/audiocraft/blob/long/app.py
-# also released under the MIT license.
-
-import argparse
-from concurrent.futures import ProcessPoolExecutor
-import os
-from pathlib import Path
-import subprocess as sp
-from tempfile import NamedTemporaryFile
-import time
-import typing as tp
-import warnings
-
-import torch
-import gradio as gr
-
-from audiocraft.data.audio_utils import convert_audio
-from audiocraft.data.audio import audio_write
-from audiocraft.models import MusicGen
-
-
-MODEL = None  # Last used model
-IS_BATCHED = "facebook/MusicGen" in os.environ.get('SPACE_ID', '')
-MAX_BATCH_SIZE = 6
-BATCHED_DURATION = 15
-INTERRUPTING = False
-# We have to wrap subprocess call to clean a bit the log when using gr.make_waveform
-_old_call = sp.call
-
-
-def _call_nostderr(*args, **kwargs):
-    # Avoid ffmpeg vomitting on the logs.
-    kwargs['stderr'] = sp.DEVNULL
-    kwargs['stdout'] = sp.DEVNULL
-    _old_call(*args, **kwargs)
-
-
-sp.call = _call_nostderr
-# Preallocating the pool of processes.
-pool = ProcessPoolExecutor(3)
-pool.__enter__()
-
-
-def interrupt():
-    global INTERRUPTING
-    INTERRUPTING = True
-
-
-class FileCleaner:
-    def __init__(self, file_lifetime: float = 3600):
-        self.file_lifetime = file_lifetime
-        self.files = []
-
-    def add(self, path: tp.Union[str, Path]):
-        self._cleanup()
-        self.files.append((time.time(), Path(path)))
-
-    def _cleanup(self):
-        now = time.time()
-        for time_added, path in list(self.files):
-            if now - time_added > self.file_lifetime:
-                if path.exists():
-                    path.unlink()
-                self.files.pop(0)
-            else:
-                break
-
-
-file_cleaner = FileCleaner()
-
-
-def make_waveform(*args, **kwargs):
-    # Further remove some warnings.
-    be = time.time()
-    with warnings.catch_warnings():
-        warnings.simplefilter('ignore')
-        out = gr.make_waveform(*args, **kwargs)
-        print("Make a video took", time.time() - be)
-        return out
-
-
-def load_model(version='melody'):
-    global MODEL
-    print("Loading model", version)
-    if MODEL is None or MODEL.name != version:
-        MODEL = MusicGen.get_pretrained(version)
-
-
-def _do_predictions(texts, melodies, duration, progress=False, **gen_kwargs):
-    MODEL.set_generation_params(duration=duration, **gen_kwargs)
-    print("new batch", len(texts), texts, [None if m is None else (m[0], m[1].shape) for m in melodies])
-    be = time.time()
-    processed_melodies = []
-    target_sr = 32000
-    target_ac = 1
-    for melody in melodies:
-        if melody is None:
-            processed_melodies.append(None)
-        else:
-            sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t()
-            if melody.dim() == 1:
-                melody = melody[None]
-            melody = melody[..., :int(sr * duration)]
-            melody = convert_audio(melody, sr, target_sr, target_ac)
-            processed_melodies.append(melody)
-
-    if any(m is not None for m in processed_melodies):
-        outputs = MODEL.generate_with_chroma(
-            descriptions=texts,
-            melody_wavs=processed_melodies,
-            melody_sample_rate=target_sr,
-            progress=progress,
-        )
-    else:
-        outputs = MODEL.generate(texts, progress=progress)
-
-    outputs = outputs.detach().cpu().float()
-    out_files = []
-    for output in outputs:
-        with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file:
-            audio_write(
-                file.name, output, MODEL.sample_rate, strategy="loudness",
-                loudness_headroom_db=16, loudness_compressor=True, add_suffix=False)
-            out_files.append(pool.submit(make_waveform, file.name))
-            file_cleaner.add(file.name)
-    res = [out_file.result() for out_file in out_files]
-    for file in res:
-        file_cleaner.add(file)
-    print("batch finished", len(texts), time.time() - be)
-    print("Tempfiles currently stored: ", len(file_cleaner.files))
-    return res
-
-
-def predict_batched(texts, melodies):
-    max_text_length = 512
-    texts = [text[:max_text_length] for text in texts]
-    load_model('melody')
-    res = _do_predictions(texts, melodies, BATCHED_DURATION)
-    return [res]
-
-
-def predict_full(model, text, melody, duration, topk, topp, temperature, cfg_coef, progress=gr.Progress()):
-    global INTERRUPTING
-    INTERRUPTING = False
-    if temperature < 0:
-        raise gr.Error("Temperature must be >= 0.")
-    if topk < 0:
-        raise gr.Error("Topk must be non-negative.")
-    if topp < 0:
-        raise gr.Error("Topp must be non-negative.")
-
-    topk = int(topk)
-    load_model(model)
-
-    def _progress(generated, to_generate):
-        progress((generated, to_generate))
-        if INTERRUPTING:
-            raise gr.Error("Interrupted.")
-    MODEL.set_custom_progress_callback(_progress)
-
-    outs = _do_predictions(
-        [text], [melody], duration, progress=True,
-        top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef)
-    return outs[0]
-
-
-def toggle_audio_src(choice):
-    if choice == "mic":
-        return gr.update(source="microphone", value=None, label="Microphone")
-    else:
-        return gr.update(source="upload", value=None, label="File")
-
-
-def ui_full(launch_kwargs):
-    with gr.Blocks() as interface:
-        gr.Markdown(
-            """
-            # MusicGen
-            This is your private demo for [MusicGen](https://github.com/facebookresearch/audiocraft),
-            a simple and controllable model for music generation
-            presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284)
-            """
-        )
-        with gr.Row():
-            with gr.Column():
-                with gr.Row():
-                    text = gr.Text(label="Input Text", interactive=True)
-                    with gr.Column():
-                        radio = gr.Radio(["file", "mic"], value="file",
-                                         label="Condition on a melody (optional) File or Mic")
-                        melody = gr.Audio(source="upload", type="numpy", label="File",
-                                          interactive=True, elem_id="melody-input")
-                with gr.Row():
-                    submit = gr.Button("Submit")
-                    # Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license.
-                    _ = gr.Button("Interrupt").click(fn=interrupt, queue=False)
-                with gr.Row():
-                    model = gr.Radio(["melody", "medium", "small", "large"],
-                                     label="Model", value="melody", interactive=True)
-                with gr.Row():
-                    duration = gr.Slider(minimum=1, maximum=120, value=10, label="Duration", interactive=True)
-                with gr.Row():
-                    topk = gr.Number(label="Top-k", value=250, interactive=True)
-                    topp = gr.Number(label="Top-p", value=0, interactive=True)
-                    temperature = gr.Number(label="Temperature", value=1.0, interactive=True)
-                    cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
-            with gr.Column():
-                output = gr.Video(label="Generated Music")
-        submit.click(predict_full,
-                     inputs=[model, text, melody, duration, topk, topp, temperature, cfg_coef],
-                     outputs=[output])
-        radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
-        gr.Examples(
-            fn=predict_full,
-            examples=[
-                [
-                    "An 80s driving pop song with heavy drums and synth pads in the background",
-                    "./assets/bach.mp3",
-                    "melody"
-                ],
-                [
-                    "A cheerful country song with acoustic guitars",
-                    "./assets/bolero_ravel.mp3",
-                    "melody"
-                ],
-                [
-                    "90s rock song with electric guitar and heavy drums",
-                    None,
-                    "medium"
-                ],
-                [
-                    "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions",
-                    "./assets/bach.mp3",
-                    "melody"
-                ],
-                [
-                    "lofi slow bpm electro chill with organic samples",
-                    None,
-                    "medium",
-                ],
-            ],
-            inputs=[text, melody, model],
-            outputs=[output]
-        )
-        gr.Markdown(
-            """
-            ### More details
-
-            The model will generate a short music extract based on the description you provided.
-            The model can generate up to 30 seconds of audio in one pass. It is now possible
-            to extend the generation by feeding back the end of the previous chunk of audio.
-            This can take a long time, and the model might lose consistency. The model might also
-            decide at arbitrary positions that the song ends.
-
-            **WARNING:** Choosing long durations will take a long time to generate (2min might take ~10min).
-            An overlap of 12 seconds is kept with the previously generated chunk, and 18 "new" seconds
-            are generated each time.
-
-            We present 4 model variations:
-            1. Melody -- a music generation model capable of generating music condition
-                on text and melody inputs. **Note**, you can also use text only.
-            2. Small -- a 300M transformer decoder conditioned on text only.
-            3. Medium -- a 1.5B transformer decoder conditioned on text only.
-            4. Large -- a 3.3B transformer decoder conditioned on text only (might OOM for the longest sequences.)
-
-            When using `melody`, ou can optionaly provide a reference audio from
-            which a broad melody will be extracted. The model will then try to follow both
-            the description and melody provided.
-
-            You can also use your own GPU or a Google Colab by following the instructions on our repo.
-            See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
-            for more details.
-            """
-        )
-
-        interface.queue().launch(**launch_kwargs)
-
-
-def ui_batched(launch_kwargs):
-    with gr.Blocks() as demo:
-        gr.Markdown(
-            """
-            # MusicGen
-
-            This is the demo for [MusicGen](https://github.com/facebookresearch/audiocraft),
-            a simple and controllable model for music generation
-            presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284).
-            <br/>
-            <a href="https://huggingface.co/spaces/facebook/MusicGen?duplicate=true"
-                style="display: inline-block;margin-top: .5em;margin-right: .25em;" target="_blank">
-            <img style="margin-bottom: 0em;display: inline;margin-top: -.25em;"
-                src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
-            for longer sequences, more control and no queue.</p>
-            """
-        )
-        with gr.Row():
-            with gr.Column():
-                with gr.Row():
-                    text = gr.Text(label="Describe your music", lines=2, interactive=True)
-                    with gr.Column():
-                        radio = gr.Radio(["file", "mic"], value="file",
-                                         label="Condition on a melody (optional) File or Mic")
-                        melody = gr.Audio(source="upload", type="numpy", label="File",
-                                          interactive=True, elem_id="melody-input")
-                with gr.Row():
-                    submit = gr.Button("Generate")
-            with gr.Column():
-                output = gr.Video(label="Generated Music")
-        submit.click(predict_batched, inputs=[text, melody],
-                     outputs=[output], batch=True, max_batch_size=MAX_BATCH_SIZE)
-        radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
-        gr.Examples(
-            fn=predict_batched,
-            examples=[
-                [
-                    "An 80s driving pop song with heavy drums and synth pads in the background",
-                    "./assets/bach.mp3",
-                ],
-                [
-                    "A cheerful country song with acoustic guitars",
-                    "./assets/bolero_ravel.mp3",
-                ],
-                [
-                    "90s rock song with electric guitar and heavy drums",
-                    None,
-                ],
-                [
-                    "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130",
-                    "./assets/bach.mp3",
-                ],
-                [
-                    "lofi slow bpm electro chill with organic samples",
-                    None,
-                ],
-            ],
-            inputs=[text, melody],
-            outputs=[output]
-        )
-        gr.Markdown("""
-        ### More details
-
-        The model will generate 12 seconds of audio based on the description you provided.
-        You can optionaly provide a reference audio from which a broad melody will be extracted.
-        The model will then try to follow both the description and melody provided.
-        All samples are generated with the `melody` model.
-
-        You can also use your own GPU or a Google Colab by following the instructions on our repo.
-
-        See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
-        for more details.
-        """)
-
-        demo.queue(max_size=8 * 4).launch(**launch_kwargs)
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()
-    parser.add_argument(
-        '--listen',
-        type=str,
-        default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1',
-        help='IP to listen on for connections to Gradio',
-    )
-    parser.add_argument(
-        '--username', type=str, default='', help='Username for authentication'
-    )
-    parser.add_argument(
-        '--password', type=str, default='', help='Password for authentication'
-    )
-    parser.add_argument(
-        '--server_port',
-        type=int,
-        default=0,
-        help='Port to run the server listener on',
-    )
-    parser.add_argument(
-        '--inbrowser', action='store_true', help='Open in browser'
-    )
-    parser.add_argument(
-        '--share', action='store_true', help='Share the gradio UI'
-    )
-
-    args = parser.parse_args()
-
-    launch_kwargs = {}
-    launch_kwargs['server_name'] = args.listen
-
-    if args.username and args.password:
-        launch_kwargs['auth'] = (args.username, args.password)
-    if args.server_port:
-        launch_kwargs['server_port'] = args.server_port
-    if args.inbrowser:
-        launch_kwargs['inbrowser'] = args.inbrowser
-    if args.share:
-        launch_kwargs['share'] = args.share
-
-    # Show the interface
-    if IS_BATCHED:
-        ui_batched(launch_kwargs)
-    else:
-        ui_full(launch_kwargs)
diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/stylegan2/op/fused_bias_act.cpp b/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/stylegan2/op/fused_bias_act.cpp
deleted file mode 100644
index 02be898f970bcc8ea297867fcaa4e71b24b3d949..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/stylegan2/op/fused_bias_act.cpp
+++ /dev/null
@@ -1,21 +0,0 @@
-#include <torch/extension.h>
-
-
-torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
-    int act, int grad, float alpha, float scale);
-
-#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
-#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
-#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
-
-torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
-    int act, int grad, float alpha, float scale) {
-    CHECK_CUDA(input);
-    CHECK_CUDA(bias);
-
-    return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
-}
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
-    m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
-}
\ No newline at end of file
diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/utils/__init__.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Duskfallcrew/lambdalabs-sd-pokemon-diffusers/app.py b/spaces/Duskfallcrew/lambdalabs-sd-pokemon-diffusers/app.py
deleted file mode 100644
index ba69d664f64f09c76d2a761d5c5de53c7da3a8ff..0000000000000000000000000000000000000000
--- a/spaces/Duskfallcrew/lambdalabs-sd-pokemon-diffusers/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/lambdalabs/sd-pokemon-diffusers").launch()
\ No newline at end of file
diff --git a/spaces/ECCV2022/PSG/utils.py b/spaces/ECCV2022/PSG/utils.py
deleted file mode 100644
index b75619cc9f519e9e5f88e1eba00e6f3e82d56ebd..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/PSG/utils.py
+++ /dev/null
@@ -1,300 +0,0 @@
-from typing import Tuple
-import PIL
-import mmcv
-import numpy as np
-from detectron2.utils.colormap import colormap
-from detectron2.utils.visualizer import VisImage, Visualizer
-from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
-from PIL import Image
-
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush', 'banner', 'blanket', 'bridge', 'cardboard',
-    'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit',
-    'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform',
-    'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea',
-    'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone',
-    'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other',
-    'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',
-    'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged',
-    'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged',
-    'food-other-merged', 'building-other-merged', 'rock-merged',
-    'wall-other-merged', 'rug-merged', 'background'
-]
-
-PREDICATES = [
-    'over',
-    'in front of',
-    'beside',
-    'on',
-    'in',
-    'attached to',
-    'hanging from',
-    'on back of',
-    'falling off',
-    'going down',
-    'painted on',
-    'walking on',
-    'running on',
-    'crossing',
-    'standing on',
-    'lying on',
-    'sitting on',
-    'flying over',
-    'jumping over',
-    'jumping from',
-    'wearing',
-    'holding',
-    'carrying',
-    'looking at',
-    'guiding',
-    'kissing',
-    'eating',
-    'drinking',
-    'feeding',
-    'biting',
-    'catching',
-    'picking',
-    'playing with',
-    'chasing',
-    'climbing',
-    'cleaning',
-    'playing',
-    'touching',
-    'pushing',
-    'pulling',
-    'opening',
-    'cooking',
-    'talking to',
-    'throwing',
-    'slicing',
-    'driving',
-    'riding',
-    'parked on',
-    'driving on',
-    'about to hit',
-    'kicking',
-    'swinging',
-    'entering',
-    'exiting',
-    'enclosing',
-    'leaning on',
-]
-
-
-def get_colormap(num_colors: int):
-    return (np.resize(colormap(), (num_colors, 3))).tolist()
-
-
-def draw_text(
-    viz_img: VisImage = None,
-    text: str = None,
-    x: float = None,
-    y: float = None,
-    color: Tuple[float, float, float] = [0, 0, 0],
-    size: float = 10,
-    padding: float = 5,
-    box_color: str = 'black',
-    font: str = None,
-) -> float:
-    text_obj = viz_img.ax.text(
-        x,
-        y,
-        text,
-        size=size,
-        # family="sans-serif",
-        bbox={
-            'facecolor': box_color,
-            'alpha': 0.8,
-            'pad': padding,
-            'edgecolor': 'none',
-        },
-        verticalalignment='top',
-        horizontalalignment='left',
-        color=color,
-        zorder=10,
-        rotation=0,
-    )
-    viz_img.get_image()
-    text_dims = text_obj.get_bbox_patch().get_extents()
-
-    return text_dims.width
-
-
-def show_result(img,
-                result,
-                is_one_stage,
-                num_rel=20,
-                show=False,
-                out_dir=None,
-                out_file=None):
-    # Load image
-    img = mmcv.imread(img)
-    img = img.copy()  # (H, W, 3)
-    img_h, img_w = img.shape[:-1]
-    
-    # Decrease contrast
-    img = PIL.Image.fromarray(img)
-    converter = PIL.ImageEnhance.Color(img)
-    img = converter.enhance(0.01)
-    if out_file is not None:
-        mmcv.imwrite(np.asarray(img), 'bw'+out_file)
-
-    # Draw masks
-    pan_results = result.pan_results
-
-    ids = np.unique(pan_results)[::-1]
-    num_classes = 133
-    legal_indices = (ids != num_classes)  # for VOID label
-    ids = ids[legal_indices]
-
-    # Get predicted labels
-    labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
-    labels = [CLASSES[l] for l in labels]
-
-    #For psgtr
-    rel_obj_labels = result.labels
-    rel_obj_labels = [CLASSES[l - 1] for l in rel_obj_labels]
-
-    # (N_m, H, W)
-    segms = pan_results[None] == ids[:, None, None]
-    # Resize predicted masks
-    segms = [
-        mmcv.image.imresize(m.astype(float), (img_w, img_h)) for m in segms
-    ]
-    # One stage segmentation
-    masks = result.masks
-
-    # Choose colors for each instance in coco
-    colormap_coco = get_colormap(len(masks)) if is_one_stage else get_colormap(len(segms))
-    colormap_coco = (np.array(colormap_coco) / 255).tolist()
-
-    # Viualize masks
-    viz = Visualizer(img)
-    viz.overlay_instances(
-        labels=rel_obj_labels if is_one_stage else labels,
-        masks=masks if is_one_stage else segms,
-        assigned_colors=colormap_coco,
-    )
-    viz_img = viz.get_output().get_image()
-    if out_file is not None:
-        mmcv.imwrite(viz_img, out_file)
-
-    # Draw relations
-
-    # Filter out relations
-    ### Debug: output all relations if not enough
-    n_rel_topk = min(num_rel, len(result.labels)//2)
-    # Exclude background class
-    rel_dists = result.rel_dists[:, 1:]
-    # rel_dists = result.rel_dists
-    rel_scores = rel_dists.max(1)
-    # rel_scores = result.triplet_scores
-    # Extract relations with top scores
-    rel_topk_idx = np.argpartition(rel_scores, -n_rel_topk)[-n_rel_topk:]
-    rel_labels_topk = rel_dists[rel_topk_idx].argmax(1)
-    rel_pair_idxes_topk = result.rel_pair_idxes[rel_topk_idx]
-    relations = np.concatenate(
-        [rel_pair_idxes_topk, rel_labels_topk[..., None]], axis=1)
-    n_rels = len(relations)
-    
-    top_padding = 20
-    bottom_padding = 20
-    left_padding = 20
-    text_size = 10
-    text_padding = 5
-    text_height = text_size + 2 * text_padding
-    row_padding = 10
-    height = (top_padding + bottom_padding + n_rels *
-              (text_height + row_padding) - row_padding)
-    width = img_w
-    curr_x = left_padding
-    curr_y = top_padding
-    
-    # # Adjust colormaps
-    # colormap_coco = [adjust_text_color(c, viz) for c in colormap_coco]
-    viz_graph = VisImage(np.full((height, width, 3), 255))
-    
-    all_rel_vis = []
-    
-    for i, r in enumerate(relations):
-        s_idx, o_idx, rel_id = r
-        s_label = rel_obj_labels[s_idx]
-        o_label = rel_obj_labels[o_idx]
-        rel_label = PREDICATES[rel_id]
-        viz = Visualizer(img)
-        viz.overlay_instances(
-            labels=[s_label, o_label],
-            masks=[masks[s_idx], masks[o_idx]],
-            assigned_colors=[colormap_coco[s_idx], colormap_coco[o_idx]],
-        )
-        viz_masked_img = viz.get_output().get_image()
-
-        viz_graph = VisImage(np.full((40, width, 3), 255))
-        curr_x = 2
-        curr_y = 2
-        text_size = 25
-        text_padding = 20
-        font = 36
-        text_width = draw_text(
-            viz_img=viz_graph,
-            text=s_label,
-            x=curr_x,
-            y=curr_y,
-            color=colormap_coco[s_idx],
-            size=text_size,
-            padding=text_padding,
-            font=font,
-        )
-        curr_x += text_width
-        # Draw relation text
-        text_width = draw_text(
-            viz_img=viz_graph,
-            text=rel_label,
-            x=curr_x,
-            y=curr_y,
-            size=text_size,
-            padding=text_padding,
-            box_color='gainsboro',
-            font=font,
-        )
-        curr_x += text_width
-
-        # Draw object text
-        text_width = draw_text(
-            viz_img=viz_graph,
-            text=o_label,
-            x=curr_x,
-            y=curr_y,
-            color=colormap_coco[o_idx],
-            size=text_size,
-            padding=text_padding,
-            font=font,
-        )
-        output_viz_graph = np.vstack([viz_masked_img, viz_graph.get_image()])
-        if show:
-           all_rel_vis.append(output_viz_graph)
-
-    return all_rel_vis
-
-
-def make_gif(np_images):
-    frames = [Image.fromarray(numpy_image.astype('uint8'), 'RGB') for numpy_image in np_images]
-    # frames = [Image.open(image) for image in images]
-    frame_one = frames[0]
-    file_name = "top_rel.gif"
-    frame_one.save(file_name, format="GIF", append_images=frames,
-               save_all=True, duration=1000, loop=0)
-    return file_name
\ No newline at end of file
diff --git a/spaces/ECCV2022/bytetrack/tutorials/centertrack/mot_online/kalman_filter.py b/spaces/ECCV2022/bytetrack/tutorials/centertrack/mot_online/kalman_filter.py
deleted file mode 100644
index b4c4e9854d8abd2fea75ad6b1fe8cd6846c43680..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/tutorials/centertrack/mot_online/kalman_filter.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# vim: expandtab:ts=4:sw=4
-import numpy as np
-import scipy.linalg
-
-"""
-Table for the 0.95 quantile of the chi-square distribution with N degrees of
-freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
-function and used as Mahalanobis gating threshold.
-"""
-chi2inv95 = {
-    1: 3.8415,
-    2: 5.9915,
-    3: 7.8147,
-    4: 9.4877,
-    5: 11.070,
-    6: 12.592,
-    7: 14.067,
-    8: 15.507,
-    9: 16.919}
-
-
-class KalmanFilter(object):
-    """
-    A simple Kalman filter for tracking bounding boxes in image space.
-
-    The 8-dimensional state space
-
-        x, y, a, h, vx, vy, va, vh
-
-    contains the bounding box center position (x, y), aspect ratio a, height h,
-    and their respective velocities.
-
-    Object motion follows a constant velocity model. The bounding box location
-    (x, y, a, h) is taken as direct observation of the state space (linear
-    observation model).
-
-    """
-
-    def __init__(self):
-        ndim, dt = 4, 1.
-
-        # Create Kalman filter model matrices.
-        self._motion_mat = np.eye(2 * ndim, 2 * ndim)
-        for i in range(ndim):
-            self._motion_mat[i, ndim + i] = dt
-        self._update_mat = np.eye(ndim, 2 * ndim)
-
-        # Motion and observation uncertainty are chosen relative to the current
-        # state estimate. These weights control the amount of uncertainty in
-        # the model. This is a bit hacky.
-        self._std_weight_position = 1. / 20
-        self._std_weight_velocity = 1. / 160
-
-    def initiate(self, measurement):
-        """Create track from unassociated measurement.
-
-        Parameters
-        ----------
-        measurement : ndarray
-            Bounding box coordinates (x, y, a, h) with center position (x, y),
-            aspect ratio a, and height h.
-
-        Returns
-        -------
-        (ndarray, ndarray)
-            Returns the mean vector (8 dimensional) and covariance matrix (8x8
-            dimensional) of the new track. Unobserved velocities are initialized
-            to 0 mean.
-
-        """
-        mean_pos = measurement
-        mean_vel = np.zeros_like(mean_pos)
-        mean = np.r_[mean_pos, mean_vel]
-
-        std = [
-            2 * self._std_weight_position * measurement[3],
-            2 * self._std_weight_position * measurement[3],
-            1e-2,
-            2 * self._std_weight_position * measurement[3],
-            10 * self._std_weight_velocity * measurement[3],
-            10 * self._std_weight_velocity * measurement[3],
-            1e-5,
-            10 * self._std_weight_velocity * measurement[3]]
-        covariance = np.diag(np.square(std))
-        return mean, covariance
-
-    def predict(self, mean, covariance):
-        """Run Kalman filter prediction step.
-
-        Parameters
-        ----------
-        mean : ndarray
-            The 8 dimensional mean vector of the object state at the previous
-            time step.
-        covariance : ndarray
-            The 8x8 dimensional covariance matrix of the object state at the
-            previous time step.
-
-        Returns
-        -------
-        (ndarray, ndarray)
-            Returns the mean vector and covariance matrix of the predicted
-            state. Unobserved velocities are initialized to 0 mean.
-
-        """
-        std_pos = [
-            self._std_weight_position * mean[3],
-            self._std_weight_position * mean[3],
-            1e-2,
-            self._std_weight_position * mean[3]]
-        std_vel = [
-            self._std_weight_velocity * mean[3],
-            self._std_weight_velocity * mean[3],
-            1e-5,
-            self._std_weight_velocity * mean[3]]
-        motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
-
-        #mean = np.dot(self._motion_mat, mean)
-        mean = np.dot(mean, self._motion_mat.T)
-        covariance = np.linalg.multi_dot((
-            self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
-
-        return mean, covariance
-
-    def project(self, mean, covariance):
-        """Project state distribution to measurement space.
-
-        Parameters
-        ----------
-        mean : ndarray
-            The state's mean vector (8 dimensional array).
-        covariance : ndarray
-            The state's covariance matrix (8x8 dimensional).
-
-        Returns
-        -------
-        (ndarray, ndarray)
-            Returns the projected mean and covariance matrix of the given state
-            estimate.
-
-        """
-        std = [
-            self._std_weight_position * mean[3],
-            self._std_weight_position * mean[3],
-            1e-1,
-            self._std_weight_position * mean[3]]
-        innovation_cov = np.diag(np.square(std))
-
-        mean = np.dot(self._update_mat, mean)
-        covariance = np.linalg.multi_dot((
-            self._update_mat, covariance, self._update_mat.T))
-        return mean, covariance + innovation_cov
-
-    def multi_predict(self, mean, covariance):
-        """Run Kalman filter prediction step (Vectorized version).
-        Parameters
-        ----------
-        mean : ndarray
-            The Nx8 dimensional mean matrix of the object states at the previous
-            time step.
-        covariance : ndarray
-            The Nx8x8 dimensional covariance matrics of the object states at the
-            previous time step.
-        Returns
-        -------
-        (ndarray, ndarray)
-            Returns the mean vector and covariance matrix of the predicted
-            state. Unobserved velocities are initialized to 0 mean.
-        """
-        std_pos = [
-            self._std_weight_position * mean[:, 3],
-            self._std_weight_position * mean[:, 3],
-            1e-2 * np.ones_like(mean[:, 3]),
-            self._std_weight_position * mean[:, 3]]
-        std_vel = [
-            self._std_weight_velocity * mean[:, 3],
-            self._std_weight_velocity * mean[:, 3],
-            1e-5 * np.ones_like(mean[:, 3]),
-            self._std_weight_velocity * mean[:, 3]]
-        sqr = np.square(np.r_[std_pos, std_vel]).T
-
-        motion_cov = []
-        for i in range(len(mean)):
-            motion_cov.append(np.diag(sqr[i]))
-        motion_cov = np.asarray(motion_cov)
-
-        mean = np.dot(mean, self._motion_mat.T)
-        left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
-        covariance = np.dot(left, self._motion_mat.T) + motion_cov
-
-        return mean, covariance
-
-    def update(self, mean, covariance, measurement):
-        """Run Kalman filter correction step.
-
-        Parameters
-        ----------
-        mean : ndarray
-            The predicted state's mean vector (8 dimensional).
-        covariance : ndarray
-            The state's covariance matrix (8x8 dimensional).
-        measurement : ndarray
-            The 4 dimensional measurement vector (x, y, a, h), where (x, y)
-            is the center position, a the aspect ratio, and h the height of the
-            bounding box.
-
-        Returns
-        -------
-        (ndarray, ndarray)
-            Returns the measurement-corrected state distribution.
-
-        """
-        projected_mean, projected_cov = self.project(mean, covariance)
-
-        chol_factor, lower = scipy.linalg.cho_factor(
-            projected_cov, lower=True, check_finite=False)
-        kalman_gain = scipy.linalg.cho_solve(
-            (chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
-            check_finite=False).T
-        innovation = measurement - projected_mean
-
-        new_mean = mean + np.dot(innovation, kalman_gain.T)
-        new_covariance = covariance - np.linalg.multi_dot((
-            kalman_gain, projected_cov, kalman_gain.T))
-        return new_mean, new_covariance
-
-    def gating_distance(self, mean, covariance, measurements,
-                        only_position=False, metric='maha'):
-        """Compute gating distance between state distribution and measurements.
-        A suitable distance threshold can be obtained from `chi2inv95`. If
-        `only_position` is False, the chi-square distribution has 4 degrees of
-        freedom, otherwise 2.
-        Parameters
-        ----------
-        mean : ndarray
-            Mean vector over the state distribution (8 dimensional).
-        covariance : ndarray
-            Covariance of the state distribution (8x8 dimensional).
-        measurements : ndarray
-            An Nx4 dimensional matrix of N measurements, each in
-            format (x, y, a, h) where (x, y) is the bounding box center
-            position, a the aspect ratio, and h the height.
-        only_position : Optional[bool]
-            If True, distance computation is done with respect to the bounding
-            box center position only.
-        Returns
-        -------
-        ndarray
-            Returns an array of length N, where the i-th element contains the
-            squared Mahalanobis distance between (mean, covariance) and
-            `measurements[i]`.
-        """
-        mean, covariance = self.project(mean, covariance)
-        if only_position:
-            mean, covariance = mean[:2], covariance[:2, :2]
-            measurements = measurements[:, :2]
-
-        d = measurements - mean
-        if metric == 'gaussian':
-            return np.sum(d * d, axis=1)
-        elif metric == 'maha':
-            cholesky_factor = np.linalg.cholesky(covariance)
-            z = scipy.linalg.solve_triangular(
-                cholesky_factor, d.T, lower=True, check_finite=False,
-                overwrite_b=True)
-            squared_maha = np.sum(z * z, axis=0)
-            return squared_maha
-        else:
-            raise ValueError('invalid distance metric')
diff --git a/spaces/ECCV2022/bytetrack/yolox/deepsort_tracker/iou_matching.py b/spaces/ECCV2022/bytetrack/yolox/deepsort_tracker/iou_matching.py
deleted file mode 100644
index 7a27a4dbff2360a09943442140d52743cd66d8c4..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/yolox/deepsort_tracker/iou_matching.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# vim: expandtab:ts=4:sw=4
-from __future__ import absolute_import
-import numpy as np
-from yolox.deepsort_tracker import linear_assignment
-
-
-def iou(bbox, candidates):
-    """Computer intersection over union.
-    Parameters
-    ----------
-    bbox : ndarray
-        A bounding box in format `(top left x, top left y, width, height)`.
-    candidates : ndarray
-        A matrix of candidate bounding boxes (one per row) in the same format
-        as `bbox`.
-    Returns
-    -------
-    ndarray
-        The intersection over union in [0, 1] between the `bbox` and each
-        candidate. A higher score means a larger fraction of the `bbox` is
-        occluded by the candidate.
-    """
-    bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:]
-    candidates_tl = candidates[:, :2]
-    candidates_br = candidates[:, :2] + candidates[:, 2:]
-
-    tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
-               np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
-    br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
-               np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
-    wh = np.maximum(0., br - tl)
-
-    area_intersection = wh.prod(axis=1)
-    area_bbox = bbox[2:].prod()
-    area_candidates = candidates[:, 2:].prod(axis=1)
-    return area_intersection / (area_bbox + area_candidates - area_intersection)
-
-
-def iou_cost(tracks, detections, track_indices=None,
-             detection_indices=None):
-    """An intersection over union distance metric.
-    Parameters
-    ----------
-    tracks : List[deep_sort.track.Track]
-        A list of tracks.
-    detections : List[deep_sort.detection.Detection]
-        A list of detections.
-    track_indices : Optional[List[int]]
-        A list of indices to tracks that should be matched. Defaults to
-        all `tracks`.
-    detection_indices : Optional[List[int]]
-        A list of indices to detections that should be matched. Defaults
-        to all `detections`.
-    Returns
-    -------
-    ndarray
-        Returns a cost matrix of shape
-        len(track_indices), len(detection_indices) where entry (i, j) is
-        `1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
-    """
-    if track_indices is None:
-        track_indices = np.arange(len(tracks))
-    if detection_indices is None:
-        detection_indices = np.arange(len(detections))
-
-    cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
-    for row, track_idx in enumerate(track_indices):
-        if tracks[track_idx].time_since_update > 1:
-            cost_matrix[row, :] = linear_assignment.INFTY_COST
-            continue
-
-        bbox = tracks[track_idx].to_tlwh()
-        candidates = np.asarray(
-            [detections[i].tlwh for i in detection_indices])
-        cost_matrix[row, :] = 1. - iou(bbox, candidates)
-    return cost_matrix
\ No newline at end of file
diff --git a/spaces/ECCV2022/bytetrack/yolox/models/__init__.py b/spaces/ECCV2022/bytetrack/yolox/models/__init__.py
deleted file mode 100644
index c4641a61bf466259c88e0a0b92e4ff55b2abcd61..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/yolox/models/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
-
-from .darknet import CSPDarknet, Darknet
-from .losses import IOUloss
-from .yolo_fpn import YOLOFPN
-from .yolo_head import YOLOXHead
-from .yolo_pafpn import YOLOPAFPN
-from .yolox import YOLOX
diff --git a/spaces/ECCV2022/bytetrack/yolox/models/losses.py b/spaces/ECCV2022/bytetrack/yolox/models/losses.py
deleted file mode 100644
index a789ebab8ba28a3927d467947c7d918fe4f2478b..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/yolox/models/losses.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env python
-# -*- encoding: utf-8 -*-
-# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-class IOUloss(nn.Module):
-    def __init__(self, reduction="none", loss_type="iou"):
-        super(IOUloss, self).__init__()
-        self.reduction = reduction
-        self.loss_type = loss_type
-
-    def forward(self, pred, target):
-        assert pred.shape[0] == target.shape[0]
-
-        pred = pred.view(-1, 4)
-        target = target.view(-1, 4)
-        tl = torch.max(
-            (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)
-        )
-        br = torch.min(
-            (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)
-        )
-
-        area_p = torch.prod(pred[:, 2:], 1)
-        area_g = torch.prod(target[:, 2:], 1)
-
-        en = (tl < br).type(tl.type()).prod(dim=1)
-        area_i = torch.prod(br - tl, 1) * en
-        iou = (area_i) / (area_p + area_g - area_i + 1e-16)
-
-        if self.loss_type == "iou":
-            loss = 1 - iou ** 2
-        elif self.loss_type == "giou":
-            c_tl = torch.min(
-                (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)
-            )
-            c_br = torch.max(
-                (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)
-            )
-            area_c = torch.prod(c_br - c_tl, 1)
-            giou = iou - (area_c - area_i) / area_c.clamp(1e-16)
-            loss = 1 - giou.clamp(min=-1.0, max=1.0)
-
-        if self.reduction == "mean":
-            loss = loss.mean()
-        elif self.reduction == "sum":
-            loss = loss.sum()
-
-        return loss
-
-
-def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
-    """
-    Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
-    Args:
-        inputs: A float tensor of arbitrary shape.
-                The predictions for each example.
-        targets: A float tensor with the same shape as inputs. Stores the binary
-                 classification label for each element in inputs
-                (0 for the negative class and 1 for the positive class).
-        alpha: (optional) Weighting factor in range (0,1) to balance
-                positive vs negative examples. Default = -1 (no weighting).
-        gamma: Exponent of the modulating factor (1 - p_t) to
-               balance easy vs hard examples.
-    Returns:
-        Loss tensor
-    """
-    prob = inputs.sigmoid()
-    ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
-    p_t = prob * targets + (1 - prob) * (1 - targets)
-    loss = ce_loss * ((1 - p_t) ** gamma)
-
-    if alpha >= 0:
-        alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
-        loss = alpha_t * loss
-    #return loss.mean(0).sum() / num_boxes
-    return loss.sum() / num_boxes
\ No newline at end of file
diff --git a/spaces/Egrt/LicenseGAN/plate.py b/spaces/Egrt/LicenseGAN/plate.py
deleted file mode 100644
index 7f3e20c943b21ac9d33df93c75db32621196af55..0000000000000000000000000000000000000000
--- a/spaces/Egrt/LicenseGAN/plate.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import cv2
-import matplotlib.pyplot as plt
-import numpy as np
-import torch
-import torch.nn as nn
-from torchvision import transforms
-
-from esrgan import ESRGAN
-
-esrgan = ESRGAN()
-
-def viz(module, input):
-    x = input[0][0]
-    #最多显示4张图
-    min_num = np.minimum(4, x.size()[0])
-    for i in range(min_num):
-        plt.subplot(1, 4, i+1)
-        plt.imshow(x[i].cpu())
-        plt.xticks([])  #去掉横坐标值
-        plt.yticks([])  #去掉纵坐标值
-    plt.show()
-
-def main():
-    t = transforms.Compose([transforms.ToPILImage(),
-                            transforms.Resize((224, 224)),
-                            transforms.ToTensor(),
-                            transforms.Normalize(mean=[0.485, 0.456, 0.406],
-                                                 std=[0.229, 0.224, 0.225])
-                            ])
-
-    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
-
-    model = esrgan.net
-    for name, m in model.named_modules():
-        # if not isinstance(m, torch.nn.ModuleList) and \
-        #         not isinstance(m, torch.nn.Sequential) and \
-        #         type(m) in torch.nn.__dict__.values():
-        # 这里只对卷积层的feature map进行显示
-        if isinstance(m, torch.nn.Conv2d):
-            m.register_forward_pre_hook(viz)
-    img = cv2.imread('image.png')
-    img = t(img).unsqueeze(0).to(device)
-    with torch.no_grad():
-        model(img)
-
-if __name__ == '__main__':
-    main()
diff --git a/spaces/EuroPython2022/mmocr-demo/configs/_base_/recog_datasets/ST_SA_MJ_real_train.py b/spaces/EuroPython2022/mmocr-demo/configs/_base_/recog_datasets/ST_SA_MJ_real_train.py
deleted file mode 100644
index 87dab3352d92c3105684908f50b9b8f6bcc71a16..0000000000000000000000000000000000000000
--- a/spaces/EuroPython2022/mmocr-demo/configs/_base_/recog_datasets/ST_SA_MJ_real_train.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Text Recognition Training set, including:
-# Synthetic Datasets: SynthText, SynthAdd, Syn90k
-# Real Dataset: IC11, IC13, IC15, COCO-Test, IIIT5k
-
-train_prefix = 'data/mixture'
-
-train_img_prefix1 = f'{train_prefix}/icdar_2011'
-train_img_prefix2 = f'{train_prefix}/icdar_2013'
-train_img_prefix3 = f'{train_prefix}/icdar_2015'
-train_img_prefix4 = f'{train_prefix}/coco_text'
-train_img_prefix5 = f'{train_prefix}/IIIT5K'
-train_img_prefix6 = f'{train_prefix}/SynthText_Add'
-train_img_prefix7 = f'{train_prefix}/SynthText'
-train_img_prefix8 = f'{train_prefix}/Syn90k'
-
-train_ann_file1 = f'{train_prefix}/icdar_2011/train_label.txt',
-train_ann_file2 = f'{train_prefix}/icdar_2013/train_label.txt',
-train_ann_file3 = f'{train_prefix}/icdar_2015/train_label.txt',
-train_ann_file4 = f'{train_prefix}/coco_text/train_label.txt',
-train_ann_file5 = f'{train_prefix}/IIIT5K/train_label.txt',
-train_ann_file6 = f'{train_prefix}/SynthText_Add/label.txt',
-train_ann_file7 = f'{train_prefix}/SynthText/shuffle_labels.txt',
-train_ann_file8 = f'{train_prefix}/Syn90k/shuffle_labels.txt'
-
-train1 = dict(
-    type='OCRDataset',
-    img_prefix=train_img_prefix1,
-    ann_file=train_ann_file1,
-    loader=dict(
-        type='AnnFileLoader',
-        repeat=20,
-        file_format='txt',
-        parser=dict(
-            type='LineStrParser',
-            keys=['filename', 'text'],
-            keys_idx=[0, 1],
-            separator=' ')),
-    pipeline=None,
-    test_mode=False)
-
-train2 = {key: value for key, value in train1.items()}
-train2['img_prefix'] = train_img_prefix2
-train2['ann_file'] = train_ann_file2
-
-train3 = {key: value for key, value in train1.items()}
-train3['img_prefix'] = train_img_prefix3
-train3['ann_file'] = train_ann_file3
-
-train4 = {key: value for key, value in train1.items()}
-train4['img_prefix'] = train_img_prefix4
-train4['ann_file'] = train_ann_file4
-
-train5 = {key: value for key, value in train1.items()}
-train5['img_prefix'] = train_img_prefix5
-train5['ann_file'] = train_ann_file5
-
-train6 = dict(
-    type='OCRDataset',
-    img_prefix=train_img_prefix6,
-    ann_file=train_ann_file6,
-    loader=dict(
-        type='AnnFileLoader',
-        repeat=1,
-        file_format='txt',
-        parser=dict(
-            type='LineStrParser',
-            keys=['filename', 'text'],
-            keys_idx=[0, 1],
-            separator=' ')),
-    pipeline=None,
-    test_mode=False)
-
-train7 = {key: value for key, value in train6.items()}
-train7['img_prefix'] = train_img_prefix7
-train7['ann_file'] = train_ann_file7
-
-train8 = {key: value for key, value in train6.items()}
-train8['img_prefix'] = train_img_prefix8
-train8['ann_file'] = train_ann_file8
-
-train_list = [train1, train2, train3, train4, train5, train6, train7, train8]
diff --git a/spaces/EveryPizza/stabilityai-stable-diffusion-2/README.md b/spaces/EveryPizza/stabilityai-stable-diffusion-2/README.md
deleted file mode 100644
index 1539dd3a9464f868e1588376380e49af9e5d9a4e..0000000000000000000000000000000000000000
--- a/spaces/EveryPizza/stabilityai-stable-diffusion-2/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Stabilityai Stable Diffusion 2
-emoji: 🐢
-colorFrom: gray
-colorTo: green
-sdk: gradio
-sdk_version: 3.20.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/FYP-23-S1-21/Refineverse_Plugin/static/TextGeneration.css b/spaces/FYP-23-S1-21/Refineverse_Plugin/static/TextGeneration.css
deleted file mode 100644
index e26eb03e4f2df870d6054f1dc2d493b37c8e85a3..0000000000000000000000000000000000000000
--- a/spaces/FYP-23-S1-21/Refineverse_Plugin/static/TextGeneration.css
+++ /dev/null
@@ -1,317 +0,0 @@
-body{
-  background-image:url("../static/Images/Background.jpg");
-  background-repeat: no-repeat;
-background-size: cover;
-}
-:root {
-    --dl-color-gray-500: #595959;
-    --dl-color-gray-700: #999999;
-    --dl-color-gray-900: #d9d9d9;
-    --dl-size-size-large: 144px;
-    --dl-size-size-small: 48px;
-    --dl-color-danger-300: #a22020;
-    --dl-color-danger-500: #bf2626;
-    --dl-color-danger-700: #e14747;
-    --dl-color-gray-black: #000000;
-    --dl-color-gray-white: #ffffff;
-    --dl-size-size-medium: 96px;
-    --dl-size-size-xlarge: 192px;
-    --dl-size-size-xsmall: 16px;
-    --dl-space-space-unit: 16px;
-    --dl-color-primary-100: #003eb3;
-    --dl-color-primary-300: #0074f0;
-    --dl-color-primary-500: #14a9ff;
-    --dl-color-primary-700: #85dcff;
-    --dl-color-success-300: #199033;
-    --dl-color-success-500: #32a94c;
-    --dl-color-success-700: #4cc366;
-    --dl-size-size-xxlarge: 288px; 
-    --dl-size-size-maxwidth: 1400px;
-    --dl-radius-radius-round: 50%;
-    --dl-space-space-halfunit: 8px;
-    --dl-space-space-sixunits: 96px;
-    --dl-space-space-twounits: 32px;
-    --dl-radius-radius-radius2: 2px;
-    --dl-radius-radius-radius4: 4px;
-    --dl-radius-radius-radius8: 8px;
-    --dl-space-space-fiveunits: 80px;
-    --dl-space-space-fourunits: 64px;
-    --dl-space-space-threeunits: 48px;
-    --dl-space-space-oneandhalfunits: 24px;
-  }
-  .button {
-    color: var(--dl-color-gray-black);
-    display: inline-block;
-    padding: 0.5rem 1rem;
-    border-color: var(--dl-color-gray-black);
-    border-width: 1px;
-    border-radius: 4px;
-    background-color: var(--dl-color-gray-white);
-  }
-  .input {
-    color: var(--dl-color-gray-black);
-    cursor: auto;
-    padding: 0.5rem 1rem;
-    border-color: var(--dl-color-gray-black);
-    border-width: 1px;
-    border-radius: 4px;
-    background-color: var(--dl-color-gray-white);
-  }
-  .textarea {
-    color: var(--dl-color-gray-black);
-    cursor: auto;
-    padding: 0.5rem;
-    border-color: var(--dl-color-gray-black);
-    border-width: 1px;
-    border-radius: 4px;
-    background-color: var(--dl-color-gray-white);
-  }
-  .list {
-    width: 100%;
-    margin: 1em 0px 1em 0px;
-    display: block;
-    padding: 0px 0px 0px 1.5rem;
-    list-style-type: none;
-    list-style-position: outside;
-  }
-  .list-item {
-    display: list-item;
-  }
-  .teleport-show {
-    display: flex !important;
-    transform: none !important;
-  }
-  .Heading {
-    font-size: 32px;
-    font-family: Inter;
-    font-weight: 700;
-    line-height: 1.15;
-    text-transform: none;
-    text-decoration: none;
-  }
-  .Content {
-    font-size: 16px;
-    font-family: Inter;
-    font-weight: 400;
-    line-height: 1.15;
-    text-transform: none;
-    text-decoration: none;
-  }
-  
-  body {
-    margin: 0;
-    padding: 0;
-    font-family: Arial, sans-serif;
-  }
-  
-  .container {
-    white-space: nowrap;
-    display: flex;
-    align-items: center;
-    justify-content: center;
-    max-width: 1000px; 
-    margin: auto;
-    margin-top: 0px;
-    overflow: hidden;
-    margin-top: 50px;
-    
-  }
-  
-  .box {
-    height: 500px;
-    width: 570px;
-    background-color: #f2f2f2;
-    overflow-y: scroll;
-    border: 1px solid gray;
-    display: inline-block;
-    border-radius: 6px;
-  }
-  .box1 {
-    height: 500px;
-    width: 570px;
-    background-color: #f2f2f2;
-    overflow-y: scroll;
-    border: 1px solid gray;
-    display: inline-block;
-    border-radius: 6px;
-  }
-  .box::-webkit-scrollbar {
-    width: 10px;
-    height: 10px;
-  }
-  
-  .box::-webkit-scrollbar-track {
-    background-color: #f2f2f2;
-  }
-  
-  .box::-webkit-scrollbar-thumb {
-    background-color: #888;
-    border-radius: 5px;
-  }
-  
-  .box::-webkit-scrollbar-thumb:hover {
-    background-color: #555;
-  }
-  .box1::-webkit-scrollbar {
-    width: 10px;
-    height: 10px;
-  }
-  
-  .box1::-webkit-scrollbar-track {
-    background-color: #f2f2f2;
-  }
-  
-  .box1::-webkit-scrollbar-thumb {
-    background-color: #888;
-    border-radius: 5px;
-  }
-  
-  .box1::-webkit-scrollbar-thumb:hover {
-    background-color: #555;
-  }
-  .textarea {
-    height: 100%;
-    width: 100%;
-    padding: 10px;
-    box-sizing: border-box;
-    border: none;
-    font-size: 16px;
-    resize: none;
-  }
-  .textarea1 {
-    height: 100%;
-    width: 100%;
-    padding: 10px;
-    box-sizing: border-box;
-    border: none;
-    font-size: 16px;
-    resize: none;
-  }
-  .title {
-    text-align: center;
-    padding: 20px;
-    font-size: 62px;
-    font-weight: bold;
-    color: rgb(10, 10, 10);
-  }
-  
-  .message {
-    text-align: center;
-    padding: 0px;
-    font-size: 24px;
-    color: rgb(92, 88, 88);
-    margin-top: -30px;
-  }
-  
-  .logo {
-    height: 250px;
-    width: 250px;
-    margin-right: 150px;
-    
-    
-  }
-  .home-image {
-    max-width: 100%;
-    max-height: 100%;
-  }
-  
-  
-  .ClearBtn {
-    display: flex;
-    justify-content: center;
-    align-items: center;
-    background-color: whitesmoke;
-    color: black;
-    padding: 20px 30px;
-    border: none;
-    border-radius: 5px;
-    font-size: 20px;
-    cursor: pointer;
-    margin-left:-20px;
-    width: 110px;
-    height: 40px;
-    border: 1px solid black;
-    margin-left: 0px;
-    display: flex;
-    justify-content: center;
-    align-items: center;
-    text-align: center;
-    margin-top: 15px;
-   
-  }
-  .ViewBtn {
-    width: 150px;
-    height: 40px;
-    display: flex;
-    justify-content: center;
-    align-items: center;
-    background-color: #555;
-    color: #fff;
-    padding: 20px 30px;
-    border: none;
-    border-radius: 5px;
-    font-size: 20px;
-    cursor: pointer;
-    display: flex;
-    justify-content: center;
-    align-items: center;
-    text-align: center;
-    margin-left: 800px;
-    margin-top: -40px;
-    margin-right:-20px;
-    width: 150px;
-    height: 40px;
-  }
-  .GenerateBtn {
-    width: 150px;
-    height: 40px;
-    display: flex;
-    justify-content: center;
-    align-items: center;
-    background-color: #555;
-    color: #fff;
-    padding: 20px 30px;
-    border: none;
-    border-radius: 5px;
-    font-size: 20px;
-    cursor: pointer;
-    display: flex;
-    justify-content: center;
-    align-items: center;
-    text-align: center;
-    margin-left: 1000px;
-    margin-top: -40px;
-    margin-right:-20px;
-    width: 150px;
-    height: 40px;
-  }
-  
-  .BackBtnContainer {
-    display: flex;
-    justify-content: end;
-    align-items: end;
-    padding: 0 20px;
-    margin-top: 20px;
-  }
-  
-  .BackBtn {
-    background-color: #555;
-    color: #fff;
-    padding: 10px 20px;
-    border: none;
-    border-radius: 5px;
-    font-size: 16px;
-    cursor: pointer;
-    margin-right: 150px;
-    width: 110px;
-    height: 40px;
-  }
-  
-  @media screen and (max-width: 600px) {
-    .box {
-      height: 400px;
-      width: 90%;
-      margin: 10px auto;
-    }
-  }
-  
\ No newline at end of file
diff --git a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/ChatgptAi.py b/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/ChatgptAi.py
deleted file mode 100644
index 46605175d1ac94fcde252b53ddb81ba99f15706e..0000000000000000000000000000000000000000
--- a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/ChatgptAi.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import os
-import requests, re
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://chatgpt.ai/gpt-4/'
-model = ['gpt-4']
-supports_stream = True
-needs_auth = False
-
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
-    chat = ''
-    for message in messages:
-        chat += '%s: %s\n' % (message['role'], message['content'])
-    chat += 'assistant: '
-
-    response = requests.get('https://chatgpt.ai/')
-    nonce, post_id, _, bot_id = re.findall(r'data-nonce="(.*)"\n     data-post-id="(.*)"\n     data-url="(.*)"\n     data-bot-id="(.*)"\n     data-width', response.text)[0]
-
-    headers = {
-        'authority': 'chatgpt.ai',
-        'accept': '*/*',
-        'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
-        'cache-control': 'no-cache',
-        'origin': 'https://chatgpt.ai',
-        'pragma': 'no-cache',
-        'referer': 'https://chatgpt.ai/gpt-4/',
-        'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
-        'sec-ch-ua-mobile': '?0',
-        'sec-ch-ua-platform': '"Windows"',
-        'sec-fetch-dest': 'empty',
-        'sec-fetch-mode': 'cors',
-        'sec-fetch-site': 'same-origin',
-        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
-    }
-    data = {
-        '_wpnonce': nonce,
-        'post_id': post_id,
-        'url': 'https://chatgpt.ai/gpt-4',
-        'action': 'wpaicg_chat_shortcode_message',
-        'message': chat,
-        'bot_id': bot_id
-    }
-
-    response = requests.post('https://chatgpt.ai/wp-admin/admin-ajax.php', 
-                            headers=headers, data=data)
-
-    yield (response.json()['data'])
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
-    '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/spaces/Flux9665/PoeticTTS/app.py b/spaces/Flux9665/PoeticTTS/app.py
deleted file mode 100644
index 1f5d8ce1331be755d2222711d8b15cb961c99da2..0000000000000000000000000000000000000000
--- a/spaces/Flux9665/PoeticTTS/app.py
+++ /dev/null
@@ -1,213 +0,0 @@
-import os
-
-os.system("git clone --branch v2.5 https://github.com/DigitalPhonetics/IMS-Toucan.git toucan_codebase")
-os.system("mv toucan_codebase/* .")
-
-from run_model_downloader import download_models
-
-download_models()
-
-import gradio as gr
-import numpy as np
-import torch
-
-from InferenceInterfaces.UtteranceCloner import UtteranceCloner
-from TrainingInterfaces.Text_to_Spectrogram.AutoAligner.Aligner import Aligner
-from TrainingInterfaces.Text_to_Spectrogram.FastSpeech2.DurationCalculator import DurationCalculator
-
-
-def float2pcm(sig, dtype='int16'):
-    """
-    https://gist.github.com/HudsonHuang/fbdf8e9af7993fe2a91620d3fb86a182
-    """
-    sig = np.asarray(sig)
-    if sig.dtype.kind != 'f':
-        raise TypeError("'sig' must be a float array")
-    dtype = np.dtype(dtype)
-    if dtype.kind not in 'iu':
-        raise TypeError("'dtype' must be an integer type")
-    i = np.iinfo(dtype)
-    abs_max = 2 ** (i.bits - 1)
-    offset = i.min + abs_max
-    return (sig * abs_max + offset).clip(i.min, i.max).astype(dtype)
-
-
-class TTS_Interface:
-
-    def __init__(self):
-        self.device = "cuda" if torch.cuda.is_available() else "cpu"
-        self.utterance_cloner = UtteranceCloner(model_id="Meta", device=self.device)
-        self.utterance_cloner.tts.set_language("de")
-        self.acoustic_model = Aligner()
-        self.acoustic_model.load_state_dict(torch.load("Models/Aligner/aligner.pt", map_location='cpu')["asr_model"])
-        self.acoustic_model = self.acoustic_model.to(self.device)
-        self.dc = DurationCalculator(reduction_factor=1)
-        self.text = "Quellen hattest du ihm, hattest dem Flüchtigen, kühle Schatten geschenkt, und die Gestade sahen, all ihm nach, und es bebte, aus den Wellen ihr lieblich Bild."
-        reference_audio = "reference_audios/2.wav"
-        self.duration, self.pitch, self.energy, _, _ = self.utterance_cloner.extract_prosody(self.text, reference_audio, lang="de", on_line_fine_tune=True)
-        self.phones = self.utterance_cloner.tts.text2phone.get_phone_string(self.text)
-        print(self.phones)
-        for index, phone in enumerate(self.phones):
-            print(index, phone)
-
-        #######
-        self.utterance_cloner.tts.set_utterance_embedding("reference_audios/german_male.wav")
-        self.current_voice = "male"
-        self.cloned_speech_male = self.utterance_cloner.tts(self.phones,
-                                                            view=False,
-                                                            durations=self.duration,
-                                                            pitch=self.pitch,
-                                                            energy=self.energy,
-                                                            input_is_phones=True).cpu().numpy()
-        self.utterance_cloner.tts.set_utterance_embedding("reference_audios/german_female.wav")
-        self.current_voice = "female"
-        self.cloned_speech_female = self.utterance_cloner.tts(self.phones,
-                                                              view=False,
-                                                              durations=self.duration,
-                                                              pitch=self.pitch,
-                                                              energy=self.energy,
-                                                              input_is_phones=True).cpu().numpy()
-
-        #######
-        self.utterance_cloner.tts.set_utterance_embedding("reference_audios/german_male.wav")
-        self.current_voice = "male"
-        self.reg_speech_male = self.utterance_cloner.tts(
-            "Quellen hattest du ihm, hattest dem Flüchtigen kühle Schatten geschenkt, und die Gestade sahen all ihm nach, und es bebte aus den Wellen ihr lieblich Bild.",
-            view=False).cpu().numpy()
-        self.utterance_cloner.tts.set_utterance_embedding("reference_audios/german_female.wav")
-        self.current_voice = "female"
-        self.reg_speech_female = self.utterance_cloner.tts(
-            "Quellen hattest du ihm, hattest dem Flüchtigen kühle Schatten geschenkt, und die Gestade sahen all ihm nach, und es bebte aus den Wellen ihr lieblich Bild.",
-            view=False).cpu().numpy()
-
-    def read(self, _, speaker, lengthening, pause_dur, pitch_up):
-
-        if speaker == "Female Voice" and self.current_voice != "female":
-            self.utterance_cloner.tts.set_utterance_embedding("reference_audios/german_female.wav")
-            self.current_voice = "female"
-        elif speaker == "Male Voice" and self.current_voice != "male":
-            self.utterance_cloner.tts.set_utterance_embedding("reference_audios/german_male.wav")
-            self.current_voice = "male"
-
-        duration = self.duration.clone()
-        # lengthening
-        lenghtening_candidates = [
-            # ('f', 33),
-            # ('l', 34),
-            ('ʏ', 35),
-            ('ç', 36),
-            # ('t', 37),
-            ('ɪ', 38),
-            # ('ɡ', 39),
-            ('ə', 40),
-            ('n', 41),
-
-            # ('z', 79),
-            ('ɑ', 80),
-            # ('ə', 81),
-            ('n', 82),
-
-            # ('b', 103),
-            ('e', 104),
-            # ('p', 105),
-            # ('t', 106),
-            ('ə', 107)
-            ]
-
-        for lenghtening_candidate in lenghtening_candidates:
-            duration[lenghtening_candidate[1]] = duration[lenghtening_candidate[1]] + lengthening
-
-        # pauses
-        pause_candidates = [('~', 42),
-                            ('~', 83),
-                            ('~', 108)]
-
-        for pause_candidate in pause_candidates:
-            duration[pause_candidate[1]] = duration[pause_candidate[1]] + pause_dur
-
-        pitch = self.pitch.clone()
-        # pitch raise
-
-        pitch_candidates = [
-            # ('k', 44),
-            ('y', 45),
-            ('l', 46),
-            ('ə', 47),
-            ('ʃ', 49),
-            ('a', 50),
-            ('t', 51),
-            # ('ə', 52),
-            # ('n', 53),
-
-            ('a', 85),
-            ('l', 86),
-
-            ('v', 118),
-            ('ɛ', 119),
-            ('l', 120),
-            # ('ə', 121),
-            # ('n', 122)
-            ]
-
-        for pitch_candidate in pitch_candidates:
-            pitch[pitch_candidate[1]] = pitch[pitch_candidate[1]] + pitch_up
-
-        fixme = [('f', 33),
-                 ('l', 34),
-                 ('ʏ', 35),
-                 ('ç', 36),
-                 ('t', 37),
-                 ('ɪ', 38),
-                 ('ɡ', 39),
-                 ('ə', 40),
-                 ('n', 41)
-                 ]
-        for pitch_candidate in fixme:
-            pitch[pitch_candidate[1]] = pitch[pitch_candidate[1]] - abs(pitch_up)
-
-        manipulated_speech = self.utterance_cloner.tts(self.phones,
-                                                       view=False,
-                                                       durations=duration,
-                                                       pitch=pitch,
-                                                       energy=self.energy,
-                                                       input_is_phones=True).cpu()
-
-        if self.current_voice == "female":
-            cloned_speech = self.cloned_speech_female
-            reg_speech = self.reg_speech_female
-        else:
-            cloned_speech = self.cloned_speech_male
-            reg_speech = self.reg_speech_male
-
-        return (24000, float2pcm(reg_speech)), (24000, float2pcm(cloned_speech)), (24000, float2pcm(manipulated_speech.numpy()))
-
-
-poem_model = TTS_Interface()
-article = "<p style='text-align: left'>This is still a work in progress, models will be exchanged for better ones as soon as they are done. More diverse training data can help with more exact cloning and more controllability. For example we are still trying to incorporate more singing data. </p><p style='text-align: center'><a href='https://github.com/DigitalPhonetics/IMS-Toucan' target='_blank'>Click here to learn more about the IMS Toucan Speech Synthesis Toolkit</a></p>"
-
-iface = gr.Interface(fn=poem_model.read,
-                     inputs=[gr.inputs.Dropdown([
-                         "Quellen hattest du ihm, hattest dem Flüchtigen // kühle Schatten geschenkt, und die Gestade sahn // all ihm nach, und es bebte // aus den Wellen ihr lieblich Bild."],
-                         type="value",
-                         default="Quellen hattest du ihm, hattest dem Flüchtigen // kühle Schatten geschenkt, und die Gestade sahn // all ihm nach, und es bebte // aus den Wellen ihr lieblich Bild.",
-                         label="Poem Transcript"),
-                         gr.inputs.Dropdown(["Female Voice", "Male Voice"],
-                                            type="value",
-                                            default="Female Voice",
-                                            label="Select a Speaker"),
-                         gr.inputs.Slider(minimum=0, maximum=4, step=1, default=2, label="Lengthening on verse end"),
-                         gr.inputs.Slider(minimum=0, maximum=20, step=1, default=8, label="Length of Pause after verse end"),
-                         gr.inputs.Slider(minimum=-0.4, maximum=0.4, step=0.01, default=0.2, label="Raise Pitch on new verse")
-                         ],
-                     outputs=[gr.outputs.Audio(type="numpy", label="Poem read with prose reading"),
-                              gr.outputs.Audio(type="numpy", label="Poem cloned from a reference"),
-                              gr.outputs.Audio(type="numpy", label="Poem after human-in-the-loop adjustments")],
-                     layout="vertical",
-                     title="PoeticTTS - Customizing Poetry",
-                     thumbnail="Utility/toucan.png",
-                     theme="default",
-                     allow_flagging="never",
-                     allow_screenshot=False,
-                     description="Customize how a poem is read by a text-to-speech system with intuitive high-level controls. You can control phrasing markers to go from prose style syntactic phrasing to verse aware poetry style phrasing with the sliders below.",
-                     article=article)
-iface.launch(enable_queue=True)
diff --git a/spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/whisper/audio.py b/spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/whisper/audio.py
deleted file mode 100644
index 3bdb70ba9357e95ff05853dcc06437c3401ef3be..0000000000000000000000000000000000000000
--- a/spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/whisper/audio.py
+++ /dev/null
@@ -1,125 +0,0 @@
-import os
-from functools import lru_cache
-from typing import Union
-
-import ffmpeg
-import numpy as np
-import torch
-import torch.nn.functional as F
-
-from .utils import exact_div
-
-from librosa.filters import mel as librosa_mel_fn
-
-# hard-coded audio hyperparameters
-SAMPLE_RATE = 16000
-N_FFT = 400
-N_MELS = 80
-HOP_LENGTH = 160
-CHUNK_LENGTH = 30
-N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE  # 480000: number of samples in a chunk
-N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH)  # 3000: number of frames in a mel spectrogram input
-
-
-def load_audio(file: str, sr: int = SAMPLE_RATE):
-    """
-    Open an audio file and read as mono waveform, resampling as necessary
-
-    Parameters
-    ----------
-    file: str
-        The audio file to open
-
-    sr: int
-        The sample rate to resample the audio if necessary
-
-    Returns
-    -------
-    A NumPy array containing the audio waveform, in float32 dtype.
-    """
-    try:
-        # This launches a subprocess to decode audio while down-mixing and resampling as necessary.
-        # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
-        out, _ = (
-            ffmpeg.input(file, threads=0)
-            .output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
-            .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
-        )
-    except ffmpeg.Error as e:
-        raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
-
-    return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
-
-
-def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
-    """
-    Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
-    """
-    if torch.is_tensor(array):
-        if array.shape[axis] > length:
-            array = array.index_select(dim=axis, index=torch.arange(length, device=array.device))
-
-        if array.shape[axis] < length:
-            pad_widths = [(0, 0)] * array.ndim
-            pad_widths[axis] = (0, length - array.shape[axis])
-            array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
-    else:
-        if array.shape[axis] > length:
-            array = array.take(indices=range(length), axis=axis)
-
-        if array.shape[axis] < length:
-            pad_widths = [(0, 0)] * array.ndim
-            pad_widths[axis] = (0, length - array.shape[axis])
-            array = np.pad(array, pad_widths)
-
-    return array
-
-
-@lru_cache(maxsize=None)
-def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
-    """
-    load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
-    Allows decoupling librosa dependency; saved using:
-
-        np.savez_compressed(
-            "mel_filters.npz",
-            mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
-        )
-    """
-    assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
-    return torch.from_numpy(librosa_mel_fn(sr=SAMPLE_RATE,n_fft=N_FFT,n_mels=n_mels)).to(device)
-
-
-def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
-    """
-    Compute the log-Mel spectrogram of
-
-    Parameters
-    ----------
-    audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
-        The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
-
-    n_mels: int
-        The number of Mel-frequency filters, only 80 is supported
-
-    Returns
-    -------
-    torch.Tensor, shape = (80, n_frames)
-        A Tensor that contains the Mel spectrogram
-    """
-    if not torch.is_tensor(audio):
-        if isinstance(audio, str):
-            audio = load_audio(audio)
-        audio = torch.from_numpy(audio)
-
-    window = torch.hann_window(N_FFT).to(audio.device)
-    stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
-    magnitudes = stft[..., :-1].abs() ** 2
-
-    filters = mel_filters(audio.device, n_mels)
-    mel_spec = filters @ magnitudes
-
-    log_spec = torch.clamp(mel_spec, min=1e-10).log10()
-    log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
-    log_spec = (log_spec + 4.0) / 4.0
-    return log_spec
diff --git a/spaces/Gen-Sim/Gen-Sim/scripts/traintest_scripts/train_test_multi_task_indistribution.sh b/spaces/Gen-Sim/Gen-Sim/scripts/traintest_scripts/train_test_multi_task_indistribution.sh
deleted file mode 100644
index 2aa5c62485f29a6516f7c5a3451cb7d80c11159d..0000000000000000000000000000000000000000
--- a/spaces/Gen-Sim/Gen-Sim/scripts/traintest_scripts/train_test_multi_task_indistribution.sh
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/bin/bash
-
-DATA_DIR=$1
-TRAINTASK=${2-'[rainbow-stack,bowl-ball-placement]'}
-TASKNAME=${3-'mix-two'}
-STEPS=${4-'20000'}
-
-DISP=False
-
-echo "Training multi-task dataset... Folder: $DATA_DIR Task $TASK"
-trap "kill 0" SIGINT
-# You can parallelize these depending on how much resources you have
-
-#############################
-## Language-Conditioned Tasks
-# [align-rope,assembling-kits-seq-seen-colors,assembling-kits-seq-unseen-colors,packing-shapes]
-
-
-# TRAIN
-python cliport/train.py train.task=$TRAINTASK \
-                train.agent=cliport \
-                train.model_task=$TASKNAME \
-                train.attn_stream_fusion_type=add \
-                train.trans_stream_fusion_type=conv \
-                train.lang_fusion_type=mult \
-                train.n_demos=200 \
-                train.n_steps=${STEPS} \
-                dataset.cache=True \
-                train.exp_folder=exps/exp-$TASKNAME \
-                dataset.type=multi  \
-                train.load_from_last_ckpt=False
-
-# Convert Python list to Bash array
-bash_array=$(python3 -c "import sys; print(' '.join((sys.argv[1])[1:-1].split(',')))" "$TRAINTASK")
-
-# Convert the space-separated string to a bash array
-echo "Testing multi-task dataset... Folder: $DATA_DIR Task $TASK"
-
-
-for task in $bash_array
-    do
-        echo "Testing $task"
-        # TEST
-        # bash scripts/generate_gpt_datasets.sh data $task
-        
-        python cliport/eval.py model_task=$TASKNAME \
-                       eval_task=$task \
-                       agent=cliport \
-                       mode=test \
-                       n_demos=100 \
-                       train_demos=200 \
-                       checkpoint_type=test_best \
-                       type=single \
-                       exp_folder=exps/exp-$TASKNAME \
-                       update_results=True  &
-    done
-wait
-
-python notebooks/print_results.py -r=exps/exp-$TASKNAME
-echo "Finished Training."
\ No newline at end of file
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py
deleted file mode 100644
index 815f2857f99791232664ecc9e82ea860fdcaa268..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py'
-# learning policy
-lr_config = dict(step=[24, 27])
-runner = dict(type='EpochBasedRunner', max_epochs=28)
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py
deleted file mode 100644
index ca62682a3b2d328cc9a8fd08887bcc1bac53104d..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py'
-# learning policy
-lr_config = dict(step=[16, 22])
-runner = dict(type='EpochBasedRunner', max_epochs=24)
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/htc/htc_r50_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/htc/htc_r50_fpn_1x_coco.py
deleted file mode 100644
index 929cf464f6091f8380fd1057b282f29f4f7a8b5f..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/htc/htc_r50_fpn_1x_coco.py
+++ /dev/null
@@ -1,56 +0,0 @@
-_base_ = './htc_without_semantic_r50_fpn_1x_coco.py'
-model = dict(
-    roi_head=dict(
-        semantic_roi_extractor=dict(
-            type='SingleRoIExtractor',
-            roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
-            out_channels=256,
-            featmap_strides=[8]),
-        semantic_head=dict(
-            type='FusedSemanticHead',
-            num_ins=5,
-            fusion_level=1,
-            num_convs=4,
-            in_channels=256,
-            conv_out_channels=256,
-            num_classes=183,
-            ignore_label=255,
-            loss_weight=0.2)))
-data_root = 'data/coco/'
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
-    dict(type='LoadImageFromFile'),
-    dict(
-        type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
-    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
-    dict(type='RandomFlip', flip_ratio=0.5),
-    dict(type='Normalize', **img_norm_cfg),
-    dict(type='Pad', size_divisor=32),
-    dict(type='SegRescale', scale_factor=1 / 8),
-    dict(type='DefaultFormatBundle'),
-    dict(
-        type='Collect',
-        keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
-]
-test_pipeline = [
-    dict(type='LoadImageFromFile'),
-    dict(
-        type='MultiScaleFlipAug',
-        img_scale=(1333, 800),
-        flip=False,
-        transforms=[
-            dict(type='Resize', keep_ratio=True),
-            dict(type='RandomFlip', flip_ratio=0.5),
-            dict(type='Normalize', **img_norm_cfg),
-            dict(type='Pad', size_divisor=32),
-            dict(type='ImageToTensor', keys=['img']),
-            dict(type='Collect', keys=['img']),
-        ])
-]
-data = dict(
-    train=dict(
-        seg_prefix=data_root + 'stuffthingmaps/train2017/',
-        pipeline=train_pipeline),
-    val=dict(pipeline=test_pipeline),
-    test=dict(pipeline=test_pipeline))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x512_40k_voc12aug.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x512_40k_voc12aug.py
deleted file mode 100644
index 8a6968ea583758191fa8e94497c7186e653c7afb..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x512_40k_voc12aug.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './encnet_r50-d8_512x512_40k_voc12aug.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/solvers/audiogen.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/solvers/audiogen.py
deleted file mode 100644
index 1568f97fe7b84b90c7ef760ef5606fe0a475545a..0000000000000000000000000000000000000000
--- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/solvers/audiogen.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from . import builders, musicgen
-
-
-class AudioGenSolver(musicgen.MusicGenSolver):
-    """Solver for AudioGen re-implementation training task.
-
-    Note that this implementation does not strictly follows
-    the method proposed in https://arxiv.org/abs/2209.15352
-    but is derived from MusicGen's training pipeline.
-
-    More information can be found in the AudioGen model card.
-    """
-    DATASET_TYPE: builders.DatasetType = builders.DatasetType.SOUND
diff --git a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/quantization/base.py b/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/quantization/base.py
deleted file mode 100644
index 1b16c130d266fbd021d3fc29bb9f98c33dd3c588..0000000000000000000000000000000000000000
--- a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/quantization/base.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-Base class for all quantizers.
-"""
-
-from dataclasses import dataclass, field
-import typing as tp
-
-import torch
-from torch import nn
-
-
-@dataclass
-class QuantizedResult:
-    x: torch.Tensor
-    codes: torch.Tensor
-    bandwidth: torch.Tensor  # bandwidth in kb/s used, per batch item.
-    penalty: tp.Optional[torch.Tensor] = None
-    metrics: dict = field(default_factory=dict)
-
-
-class BaseQuantizer(nn.Module):
-    """Base class for quantizers.
-    """
-
-    def forward(self, x: torch.Tensor, frame_rate: int) -> QuantizedResult:
-        """
-        Given input tensor x, returns first the quantized (or approximately quantized)
-        representation along with quantized codes, bandwidth, and any penalty term for the loss.
-        Finally, this returns a dict of metrics to update logging etc.
-        Frame rate must be passed so that the bandwidth is properly computed.
-        """
-        raise NotImplementedError()
-
-    def encode(self, x: torch.Tensor) -> torch.Tensor:
-        """Encode a given input tensor with the specified sample rate at the given bandwidth.
-        """
-        raise NotImplementedError()
-
-    def decode(self, codes: torch.Tensor) -> torch.Tensor:
-        """Decode the given codes to the quantized representation.
-        """
-        raise NotImplementedError()
-
-    @property
-    def total_codebooks(self):
-        """Total number of codebooks.
-        """
-        raise NotImplementedError()
-
-    @property
-    def num_codebooks(self):
-        """Number of active codebooks.
-        """
-        raise NotImplementedError()
-
-    def set_num_codebooks(self, n: int):
-        """Set the number of active codebooks.
-        """
-        raise NotImplementedError()
-
-
-class DummyQuantizer(BaseQuantizer):
-    """Fake quantizer that actually does not perform any quantization.
-    """
-    def __init__(self):
-        super().__init__()
-
-    def forward(self, x: torch.Tensor, frame_rate: int):
-        q = x.unsqueeze(1)
-        return QuantizedResult(x, q, torch.tensor(q.numel() * 32 * frame_rate / 1000 / len(x)).to(x))
-
-    def encode(self, x: torch.Tensor) -> torch.Tensor:
-        """Encode a given input tensor with the specified sample rate at the given bandwidth.
-        In the case of the DummyQuantizer, the codes are actually identical
-        to the input and resulting quantized representation as no quantization is done.
-        """
-        return x.unsqueeze(1)
-
-    def decode(self, codes: torch.Tensor) -> torch.Tensor:
-        """Decode the given codes to the quantized representation.
-        In the case of the DummyQuantizer, the codes are actually identical
-        to the input and resulting quantized representation as no quantization is done.
-        """
-        return codes.squeeze(1)
-
-    @property
-    def total_codebooks(self):
-        """Total number of codebooks.
-        """
-        return 1
-
-    @property
-    def num_codebooks(self):
-        """Total number of codebooks.
-        """
-        return self.total_codebooks
-
-    def set_num_codebooks(self, n: int):
-        """Set the number of active codebooks.
-        """
-        raise AttributeError("Cannot override the number of codebooks for the dummy quantizer")
diff --git a/spaces/Grezz/generate_human_motion/pyrender/docs/make.bat b/spaces/Grezz/generate_human_motion/pyrender/docs/make.bat
deleted file mode 100644
index 4d9eb83d9f9309029f4b14ff09024658bb0f5563..0000000000000000000000000000000000000000
--- a/spaces/Grezz/generate_human_motion/pyrender/docs/make.bat
+++ /dev/null
@@ -1,35 +0,0 @@
-@ECHO OFF
-
-pushd %~dp0
-
-REM Command file for Sphinx documentation
-
-if "%SPHINXBUILD%" == "" (
-	set SPHINXBUILD=sphinx-build
-)
-set SOURCEDIR=source
-set BUILDDIR=build
-
-if "%1" == "" goto help
-
-%SPHINXBUILD% >NUL 2>NUL
-if errorlevel 9009 (
-	echo.
-	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
-	echo.installed, then set the SPHINXBUILD environment variable to point
-	echo.to the full path of the 'sphinx-build' executable. Alternatively you
-	echo.may add the Sphinx directory to PATH.
-	echo.
-	echo.If you don't have Sphinx installed, grab it from
-	echo.http://sphinx-doc.org/
-	exit /b 1
-)
-
-%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
-goto end
-
-:help
-%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
-
-:end
-popd
diff --git a/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/op/upfirdn2d.py b/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/op/upfirdn2d.py
deleted file mode 100644
index 9ca1f5c72098debfb0ffa1ba1b81eb92eb64d428..0000000000000000000000000000000000000000
--- a/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/op/upfirdn2d.py
+++ /dev/null
@@ -1,198 +0,0 @@
-import os
-import platform
-
-import torch
-from torch.nn import functional as F
-from torch.autograd import Function
-from torch.utils.cpp_extension import load
-
-use_fallback = False
-
-# Try loading precompiled, otherwise use native fallback
-try:
-    import upfirdn2d_op
-except ModuleNotFoundError as e:
-    print('StyleGAN2: Optimized CUDA op UpFirDn2d not available, using native PyTorch fallback.')
-    use_fallback = True
-
-class UpFirDn2dBackward(Function):
-    @staticmethod
-    def forward(
-        ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
-    ):
-
-        up_x, up_y = up
-        down_x, down_y = down
-        g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
-
-        grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
-
-        grad_input = upfirdn2d_op.upfirdn2d(
-            grad_output,
-            grad_kernel,
-            down_x,
-            down_y,
-            up_x,
-            up_y,
-            g_pad_x0,
-            g_pad_x1,
-            g_pad_y0,
-            g_pad_y1,
-        )
-        grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
-
-        ctx.save_for_backward(kernel)
-
-        pad_x0, pad_x1, pad_y0, pad_y1 = pad
-
-        ctx.up_x = up_x
-        ctx.up_y = up_y
-        ctx.down_x = down_x
-        ctx.down_y = down_y
-        ctx.pad_x0 = pad_x0
-        ctx.pad_x1 = pad_x1
-        ctx.pad_y0 = pad_y0
-        ctx.pad_y1 = pad_y1
-        ctx.in_size = in_size
-        ctx.out_size = out_size
-
-        return grad_input
-
-    @staticmethod
-    def backward(ctx, gradgrad_input):
-        kernel, = ctx.saved_tensors
-
-        gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
-
-        gradgrad_out = upfirdn2d_op.upfirdn2d(
-            gradgrad_input,
-            kernel,
-            ctx.up_x,
-            ctx.up_y,
-            ctx.down_x,
-            ctx.down_y,
-            ctx.pad_x0,
-            ctx.pad_x1,
-            ctx.pad_y0,
-            ctx.pad_y1,
-        )
-        # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
-        gradgrad_out = gradgrad_out.view(
-            ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
-        )
-
-        return gradgrad_out, None, None, None, None, None, None, None, None
-
-
-class UpFirDn2d(Function):
-    @staticmethod
-    def forward(ctx, input, kernel, up, down, pad):
-        up_x, up_y = up
-        down_x, down_y = down
-        pad_x0, pad_x1, pad_y0, pad_y1 = pad
-
-        kernel_h, kernel_w = kernel.shape
-        batch, channel, in_h, in_w = input.shape
-        ctx.in_size = input.shape
-
-        input = input.reshape(-1, in_h, in_w, 1)
-
-        ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
-
-        out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
-        out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
-        ctx.out_size = (out_h, out_w)
-
-        ctx.up = (up_x, up_y)
-        ctx.down = (down_x, down_y)
-        ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
-
-        g_pad_x0 = kernel_w - pad_x0 - 1
-        g_pad_y0 = kernel_h - pad_y0 - 1
-        g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
-        g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
-
-        ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
-
-        out = upfirdn2d_op.upfirdn2d(
-            input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
-        )
-        # out = out.view(major, out_h, out_w, minor)
-        out = out.view(-1, channel, out_h, out_w)
-
-        return out
-
-    @staticmethod
-    def backward(ctx, grad_output):
-        kernel, grad_kernel = ctx.saved_tensors
-
-        grad_input = UpFirDn2dBackward.apply(
-            grad_output,
-            kernel,
-            grad_kernel,
-            ctx.up,
-            ctx.down,
-            ctx.pad,
-            ctx.g_pad,
-            ctx.in_size,
-            ctx.out_size,
-        )
-
-        return grad_input, None, None, None, None
-
-
-def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
-    if use_fallback or input.device.type == "cpu":
-        out = upfirdn2d_native(
-            input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]
-        )
-    else:
-        out = UpFirDn2d.apply(
-            input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
-        )
-
-    return out
-
-
-def upfirdn2d_native(
-    input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
-):
-    _, channel, in_h, in_w = input.shape
-    input = input.reshape(-1, in_h, in_w, 1)
-
-    _, in_h, in_w, minor = input.shape
-    kernel_h, kernel_w = kernel.shape
-
-    out = input.view(-1, in_h, 1, in_w, 1, minor)
-    out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
-    out = out.view(-1, in_h * up_y, in_w * up_x, minor)
-
-    out = F.pad(
-        out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
-    )
-    out = out[
-        :,
-        max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
-        max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
-        :,
-    ]
-
-    out = out.permute(0, 3, 1, 2)
-    out = out.reshape(
-        [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
-    )
-    w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
-    out = F.conv2d(out, w)
-    out = out.reshape(
-        -1,
-        minor,
-        in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
-        in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
-    )
-    out = out.permute(0, 2, 3, 1)
-    out = out[:, ::down_y, ::down_x, :]
-
-    out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
-    out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
-
-    return out.view(-1, channel, out_h, out_w)
diff --git a/spaces/Haleyok/stablelm-tuned-alpha-chat/README.md b/spaces/Haleyok/stablelm-tuned-alpha-chat/README.md
deleted file mode 100644
index 650958ad876049536679267341d0b3ac009b555d..0000000000000000000000000000000000000000
--- a/spaces/Haleyok/stablelm-tuned-alpha-chat/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Stablelm Tuned Alpha Chat
-emoji: 👀
-colorFrom: purple
-colorTo: pink
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
-duplicated_from: stabilityai/stablelm-tuned-alpha-chat
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/finetune_classification_bert-3.9B_afqmc.sh b/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/finetune_classification_bert-3.9B_afqmc.sh
deleted file mode 100644
index 9d36b627d6cc1b0a8de575138eec6a7529b31137..0000000000000000000000000000000000000000
--- a/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/finetune_classification_bert-3.9B_afqmc.sh
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=afqmc # create a short name for your job
-#SBATCH --nodes=1 # node count
-#SBATCH --ntasks=4 # total number of tasks across all nodes
-#SBATCH --cpus-per-task=20 # cpu-cores per task (>1 if multi-threaded tasks)
-#SBATCH --gres=gpu:4 # number of gpus per node
-#SBATCH --mail-type=ALL # send email when job begins, ends or failed etc. 
-#SBATCH -o %x-%j.log # output and error file name (%x=job name, %j=job id)
-
-set -x -e
-echo "START TIME: $(date)"
-
-export TORCH_EXTENSIONS_DIR=/cognitive_comp/gaoxinyu/cache/torch_extendsions
-
-BERT_NAME=bert-3.9B
-
-TASK=afqmc
-TEXTA_NAME=sentence1
-TEXTB_NAME=sentence2
-LABEL_NAME=label
-ID_NAME=id
-
-
-BATCH_SIZE=8
-VAL_BATCH_SIZE=32
-ZERO_STAGE=2
-STRATEGY=deepspeed_stage_${ZERO_STAGE}
-
-DATA_DIR=/cognitive_comp/yangping/data/ChineseCLUE_DATA/${TASK}_public/
-PRETRAINED_MODEL_PATH=/cognitive_comp/gaoxinyu/pretrained_model/$BERT_NAME/
-
-
-CHECKPOINT_PATH=/cognitive_comp/gaoxinyu/ln_model/fintune/ckpt/fengshen-finetune/$TASK/
-DEFAULT_ROOT_DIR=/cognitive_comp/gaoxinyu/ln_model/finetune/${BERT_NAME}-${TASK}
-OUTPUT_PATH=/cognitive_comp/gaoxinyu/ln_model/finetune/${BERT_NAME}-${TASK}/predict.json
-
-
-config_json="./ds_config.json"
-# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
-# reduce_bucket_size: hidden_size*hidden_size
-# stage3_prefetch_bucket_size: 0.9 * hidden_size * hidden_size
-# stage3_param_persistence_threshold: 10 * hidden_size
-
-cat <<EOT > $config_json
-{
-  "train_micro_batch_size_per_gpu": $BATCH_SIZE,
-  "steps_per_print": 1000,
-  "gradient_clipping": 0.1,
-  "zero_optimization": {
-        "stage": 2
-    },
-  "optimizer": {
-    "type": "Adam",
-    "params": {
-      "lr": 1e-7,
-      "eps": 1e-12,
-      "weight_decay": 1e-1
-    }
-  },
-  "scheduler": {
-    "type": "WarmupLR",
-    "params":{
-      "warmup_min_lr": 1e-8,
-      "warmup_max_lr": 1e-6,
-      "warmup_num_steps": 400,
-      "warmup_type": "linear"
-    }
-  },
-  "zero_allow_untested_optimizer": false,
-  "fp16": {
-    "enabled": true,
-    "loss_scale": 0,
-    "loss_scale_window": 1000,
-    "hysteresis": 2,
-    "min_loss_scale": 1
-  },
-  "activation_checkpointing": {
-    "partition_activations": false,
-    "contiguous_memory_optimization": false
-  },
-  "wall_clock_breakdown": false
-}
-EOT
-
-export PL_DEEPSPEED_CONFIG_PATH=$config_json
-
-
-DATA_ARGS="\
-        --data_dir $DATA_DIR \
-        --train_data train.json \
-        --valid_data dev.json \
-        --test_data test.json \
-        --train_batchsize $BATCH_SIZE \
-        --valid_batchsize $VAL_BATCH_SIZE \
-        --max_length 128 \
-        --texta_name $TEXTA_NAME \
-        --textb_name $TEXTB_NAME \
-        --label_name $LABEL_NAME \
-        --id_name $ID_NAME \
-        "
-
-MODEL_ARGS="\
-        --learning_rate 1e-5 \
-        --weight_decay 1e-2 \
-        --warmup 0.01 \
-        --num_labels 2 \
-        "
-
-MODEL_CHECKPOINT_ARGS="\
-        --monitor val_acc \
-        --save_top_k 3 \
-        --mode max \
-        --every_n_train_steps 0 \
-        --save_weights_only True \
-        --dirpath $CHECKPOINT_PATH \
-        --filename model-{epoch:02d}-{val_acc:.4f} \
-        "
-
-
-TRAINER_ARGS="\
-        --max_epochs 67 \
-        --gpus 4 \
-        --num_nodes 1 \
-        --strategy $STRATEGY \
-        --gradient_clip_val 1.0 \
-        --check_val_every_n_epoch 1 \
-        --val_check_interval 100 \
-        --precision 16 \
-        --default_root_dir $DEFAULT_ROOT_DIR \
-        "
-
-options=" \
-        --pretrained_model_path $PRETRAINED_MODEL_PATH \
-        --output_save_path $OUTPUT_PATH \
-        $DATA_ARGS \
-        $MODEL_ARGS \
-        $MODEL_CHECKPOINT_ARGS \
-        $TRAINER_ARGS \
-        "
-
-DOCKER_PATH=/cognitive_comp/gaoxinyu/docker/pytorch21_06_py3_docker_image_v2.sif
-SCRIPT_PATH=/cognitive_comp/gaoxinyu/github/Fengshenbang-LM/fengshen/examples/classification/finetune_classification.py
-
-# python3 $SCRIPT_PATH $options
-srun -N 1 --job-name=afqmc --jobid=151522 --ntasks=4 --cpus-per-task=15 --gres=gpu:4 -o %x-%j.log singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $DOCKER_PATH python3 $SCRIPT_PATH $options
-
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/byte_level_bpe/get_bitext.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/byte_level_bpe/get_bitext.py
deleted file mode 100644
index 6ac1eeec1e6167ec6bafd76b37173ee6987cae7e..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/byte_level_bpe/get_bitext.py
+++ /dev/null
@@ -1,254 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-
-import argparse
-import os
-import os.path as op
-from collections import namedtuple
-from multiprocessing import cpu_count
-from typing import List, Optional
-
-import sentencepiece as sp
-from fairseq.data.encoders.byte_bpe import ByteBPE
-from fairseq.data.encoders.byte_utils import byte_encode
-from fairseq.data.encoders.bytes import Bytes
-from fairseq.data.encoders.characters import Characters
-from fairseq.data.encoders.moses_tokenizer import MosesTokenizer
-from fairseq.data.encoders.sentencepiece_bpe import SentencepieceBPE
-
-
-SPLITS = ["train", "valid", "test"]
-
-
-def _convert_xml(in_path: str, out_path: str):
-    with open(in_path) as f, open(out_path, "w") as f_o:
-        for s in f:
-            ss = s.strip()
-            if not ss.startswith("<seg"):
-                continue
-            ss = ss.replace("</seg>", "").split('">')
-            assert len(ss) == 2
-            f_o.write(ss[1].strip() + "\n")
-
-
-def _convert_train(in_path: str, out_path: str):
-    with open(in_path) as f, open(out_path, "w") as f_o:
-        for s in f:
-            ss = s.strip()
-            if ss.startswith("<"):
-                continue
-            f_o.write(ss.strip() + "\n")
-
-
-def _get_bytes(in_path: str, out_path: str):
-    with open(in_path) as f, open(out_path, "w") as f_o:
-        for s in f:
-            f_o.write(Bytes.encode(s.strip()) + "\n")
-
-
-def _get_chars(in_path: str, out_path: str):
-    with open(in_path) as f, open(out_path, "w") as f_o:
-        for s in f:
-            f_o.write(Characters.encode(s.strip()) + "\n")
-
-
-def pretokenize(in_path: str, out_path: str, src: str, tgt: str):
-    Args = namedtuple(
-        "Args",
-        [
-            "moses_source_lang",
-            "moses_target_lang",
-            "moses_no_dash_splits",
-            "moses_no_escape",
-        ],
-    )
-    args = Args(
-        moses_source_lang=src,
-        moses_target_lang=tgt,
-        moses_no_dash_splits=False,
-        moses_no_escape=False,
-    )
-    pretokenizer = MosesTokenizer(args)
-    with open(in_path) as f, open(out_path, "w") as f_o:
-        for s in f:
-            f_o.write(pretokenizer.encode(s.strip()) + "\n")
-
-
-def _convert_to_bchar(in_path_prefix: str, src: str, tgt: str, out_path: str):
-    with open(out_path, "w") as f_o:
-        for lang in [src, tgt]:
-            with open(f"{in_path_prefix}.{lang}") as f:
-                for s in f:
-                    f_o.write(byte_encode(s.strip()) + "\n")
-
-
-def _get_bpe(in_path: str, model_prefix: str, vocab_size: int):
-    arguments = [
-        f"--input={in_path}",
-        f"--model_prefix={model_prefix}",
-        f"--model_type=bpe",
-        f"--vocab_size={vocab_size}",
-        "--character_coverage=1.0",
-        "--normalization_rule_name=identity",
-        f"--num_threads={cpu_count()}",
-    ]
-    sp.SentencePieceTrainer.Train(" ".join(arguments))
-
-
-def _apply_bbpe(model_path: str, in_path: str, out_path: str):
-    Args = namedtuple("Args", ["sentencepiece_model_path"])
-    args = Args(sentencepiece_model_path=model_path)
-    tokenizer = ByteBPE(args)
-    with open(in_path) as f, open(out_path, "w") as f_o:
-        for s in f:
-            f_o.write(tokenizer.encode(s.strip()) + "\n")
-
-
-def _apply_bpe(model_path: str, in_path: str, out_path: str):
-    Args = namedtuple("Args", ["sentencepiece_model"])
-    args = Args(sentencepiece_model=model_path)
-    tokenizer = SentencepieceBPE(args)
-    with open(in_path) as f, open(out_path, "w") as f_o:
-        for s in f:
-            f_o.write(tokenizer.encode(s.strip()) + "\n")
-
-
-def _concat_files(in_paths: List[str], out_path: str):
-    with open(out_path, "w") as f_o:
-        for p in in_paths:
-            with open(p) as f:
-                for r in f:
-                    f_o.write(r)
-
-
-def preprocess_iwslt17(
-    root: str,
-    src: str,
-    tgt: str,
-    bpe_size: Optional[int],
-    need_chars: bool,
-    bbpe_size: Optional[int],
-    need_bytes: bool,
-):
-    # extract bitext
-    in_root = op.join(root, f"{src}-{tgt}")
-    for lang in [src, tgt]:
-        _convert_train(
-            op.join(in_root, f"train.tags.{src}-{tgt}.{lang}"),
-            op.join(root, f"train.{lang}"),
-        )
-        _convert_xml(
-            op.join(in_root, f"IWSLT17.TED.dev2010.{src}-{tgt}.{lang}.xml"),
-            op.join(root, f"valid.{lang}"),
-        )
-        _convert_xml(
-            op.join(in_root, f"IWSLT17.TED.tst2015.{src}-{tgt}.{lang}.xml"),
-            op.join(root, f"test.{lang}"),
-        )
-    # pre-tokenize
-    for lang in [src, tgt]:
-        for split in SPLITS:
-            pretokenize(
-                op.join(root, f"{split}.{lang}"),
-                op.join(root, f"{split}.moses.{lang}"),
-                src,
-                tgt,
-            )
-    # tokenize with BPE vocabulary
-    if bpe_size is not None:
-        # learn vocabulary
-        concated_train_path = op.join(root, "train.all")
-        _concat_files(
-            [op.join(root, "train.moses.fr"), op.join(root, "train.moses.en")],
-            concated_train_path,
-        )
-        bpe_model_prefix = op.join(root, f"spm_bpe{bpe_size}")
-        _get_bpe(concated_train_path, bpe_model_prefix, bpe_size)
-        os.remove(concated_train_path)
-        # apply
-        for lang in [src, tgt]:
-            for split in SPLITS:
-                _apply_bpe(
-                    bpe_model_prefix + ".model",
-                    op.join(root, f"{split}.moses.{lang}"),
-                    op.join(root, f"{split}.moses.bpe{bpe_size}.{lang}"),
-                )
-    # tokenize with bytes vocabulary
-    if need_bytes:
-        for lang in [src, tgt]:
-            for split in SPLITS:
-                _get_bytes(
-                    op.join(root, f"{split}.moses.{lang}"),
-                    op.join(root, f"{split}.moses.bytes.{lang}"),
-                )
-    # tokenize with characters vocabulary
-    if need_chars:
-        for lang in [src, tgt]:
-            for split in SPLITS:
-                _get_chars(
-                    op.join(root, f"{split}.moses.{lang}"),
-                    op.join(root, f"{split}.moses.chars.{lang}"),
-                )
-    # tokenize with byte-level BPE vocabulary
-    if bbpe_size is not None:
-        # learn vocabulary
-        bchar_path = op.join(root, "train.bchar")
-        _convert_to_bchar(op.join(root, "train.moses"), src, tgt, bchar_path)
-        bbpe_model_prefix = op.join(root, f"spm_bbpe{bbpe_size}")
-        _get_bpe(bchar_path, bbpe_model_prefix, bbpe_size)
-        os.remove(bchar_path)
-        # apply
-        for lang in [src, tgt]:
-            for split in SPLITS:
-                _apply_bbpe(
-                    bbpe_model_prefix + ".model",
-                    op.join(root, f"{split}.moses.{lang}"),
-                    op.join(root, f"{split}.moses.bbpe{bbpe_size}.{lang}"),
-                )
-
-
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument("--root", type=str, default="data")
-    parser.add_argument(
-        "--bpe-vocab",
-        default=None,
-        type=int,
-        help="Generate tokenized bitext with BPE of size K."
-        "Default to None (disabled).",
-    )
-    parser.add_argument(
-        "--bbpe-vocab",
-        default=None,
-        type=int,
-        help="Generate tokenized bitext with BBPE of size K."
-        "Default to None (disabled).",
-    )
-    parser.add_argument(
-        "--byte-vocab",
-        action="store_true",
-        help="Generate tokenized bitext with bytes vocabulary",
-    )
-    parser.add_argument(
-        "--char-vocab",
-        action="store_true",
-        help="Generate tokenized bitext with chars vocabulary",
-    )
-    args = parser.parse_args()
-
-    preprocess_iwslt17(
-        args.root,
-        "fr",
-        "en",
-        args.bpe_vocab,
-        args.char_vocab,
-        args.bbpe_vocab,
-        args.byte_vocab,
-    )
-
-
-if __name__ == "__main__":
-    main()
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/laser/README.md b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/laser/README.md
deleted file mode 100644
index 66acada04f58fa235cd312753f144f6f1e5f4a33..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/laser/README.md
+++ /dev/null
@@ -1,144 +0,0 @@
-# LASER  Language-Agnostic SEntence Representations
-
-LASER is a library to calculate and use multilingual sentence embeddings.
-
-You can find more information about LASER and how to use it on the official [LASER repository](https://github.com/facebookresearch/LASER).
-
-This folder contains source code for training LASER embeddings.
-
-
-## Prepare data and configuration file
-
-Binarize your data with fairseq, as described [here](https://fairseq.readthedocs.io/en/latest/getting_started.html#data-pre-processing).
-
-Create a json config file with this format:
-```
-{
-  "src_vocab": "/path/to/spm.src.cvocab",
-  "tgt_vocab": "/path/to/spm.tgt.cvocab",
-  "train": [
-    {
-      "type": "translation",
-      "id": 0,
-      "src": "/path/to/srclang1-tgtlang0/train.srclang1",
-      "tgt": "/path/to/srclang1-tgtlang0/train.tgtlang0"
-    },
-    {
-      "type": "translation",
-      "id": 1,
-      "src": "/path/to/srclang1-tgtlang1/train.srclang1",
-      "tgt": "/path/to/srclang1-tgtlang1/train.tgtlang1"
-    },
-    {
-      "type": "translation",
-      "id": 0,
-      "src": "/path/to/srclang2-tgtlang0/train.srclang2",
-      "tgt": "/path/to/srclang2-tgtlang0/train.tgtlang0"
-    },
-    {
-      "type": "translation",
-      "id": 1,
-      "src": "/path/to/srclang2-tgtlang1/train.srclang2",
-      "tgt": "/path/to/srclang2-tgtlang1/train.tgtlang1"
-    },
-    ...
-  ],
-  "valid": [
-    {
-      "type": "translation",
-      "id": 0,
-      "src": "/unused",
-      "tgt": "/unused"
-    }
-  ]
-}
-```
-where paths are paths to binarized indexed fairseq dataset files.
-`id` represents the target language id.
-
-
-## Training Command Line Example
-
-```
-fairseq-train \
-  /path/to/configfile_described_above.json \
-  --user-dir examples/laser/laser_src \
-  --log-interval 100 --log-format simple \
-  --task laser --arch laser_lstm \
-  --save-dir . \
-  --optimizer adam \
-  --lr 0.001 \
-  --lr-scheduler inverse_sqrt \
-  --clip-norm 5 \
-  --warmup-updates 90000 \
-  --update-freq 2 \
-  --dropout 0.0 \
-  --encoder-dropout-out 0.1 \
-  --max-tokens 2000 \
-  --max-epoch 50 \
-  --encoder-bidirectional \
-  --encoder-layers 5 \
-  --encoder-hidden-size 512 \
-  --decoder-layers 1 \
-  --decoder-hidden-size 2048 \
-  --encoder-embed-dim 320 \
-  --decoder-embed-dim 320 \
-  --decoder-lang-embed-dim 32 \
-  --warmup-init-lr 0.001 \
-  --disable-validation
-```
-
-
-## Applications
-
-We showcase several applications of multilingual sentence embeddings
-with code to reproduce our results (in the directory "tasks").
-
-* [**Cross-lingual document classification**](https://github.com/facebookresearch/LASER/tree/master/tasks/mldoc) using the
-  [*MLDoc*](https://github.com/facebookresearch/MLDoc) corpus [2,6]
-* [**WikiMatrix**](https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix)
-   Mining 135M Parallel Sentences in 1620 Language Pairs from Wikipedia [7]
-* [**Bitext mining**](https://github.com/facebookresearch/LASER/tree/master/tasks/bucc) using the
-  [*BUCC*](https://comparable.limsi.fr/bucc2018/bucc2018-task.html) corpus [3,5]
-* [**Cross-lingual NLI**](https://github.com/facebookresearch/LASER/tree/master/tasks/xnli)
-  using the [*XNLI*](https://www.nyu.edu/projects/bowman/xnli/) corpus [4,5,6]
-* [**Multilingual similarity search**](https://github.com/facebookresearch/LASER/tree/master/tasks/similarity) [1,6]
-* [**Sentence embedding of text files**](https://github.com/facebookresearch/LASER/tree/master/tasks/embed)
-  example how to calculate sentence embeddings for arbitrary text files in any of the supported language.
-
-**For all tasks, we use exactly the same multilingual encoder, without any task specific optimization or fine-tuning.**
-
-
-
-## References
-
-[1] Holger Schwenk and Matthijs Douze,
-    [*Learning Joint Multilingual Sentence Representations with Neural Machine Translation*](https://aclanthology.info/papers/W17-2619/w17-2619),
-    ACL workshop on Representation Learning for NLP, 2017
-
-[2] Holger Schwenk and Xian Li,
-    [*A Corpus for Multilingual Document Classification in Eight Languages*](http://www.lrec-conf.org/proceedings/lrec2018/pdf/658.pdf),
-    LREC, pages 3548-3551, 2018.
-
-[3] Holger Schwenk,
-    [*Filtering and Mining Parallel Data in a Joint Multilingual Space*](http://aclweb.org/anthology/P18-2037)
-    ACL, July 2018
-
-[4] Alexis Conneau, Guillaume Lample, Ruty Rinott, Adina Williams, Samuel R. Bowman, Holger Schwenk and Veselin Stoyanov,
-    [*XNLI: Cross-lingual Sentence Understanding through Inference*](https://aclweb.org/anthology/D18-1269),
-    EMNLP, 2018.
-
-[5] Mikel Artetxe and Holger Schwenk,
-    [*Margin-based Parallel Corpus Mining with Multilingual Sentence Embeddings*](https://arxiv.org/abs/1811.01136)
-    arXiv, Nov 3 2018.
-
-[6] Mikel Artetxe and Holger Schwenk,
-    [*Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond*](https://arxiv.org/abs/1812.10464)
-    arXiv, Dec 26 2018.
-
-[7] Holger Schwenk, Vishrav Chaudhary, Shuo Sun, Hongyu Gong and Paco Guzman,
-    [*WikiMatrix: Mining 135M Parallel Sentences in 1620 Language Pairs from Wikipedia*](https://arxiv.org/abs/1907.05791)
-    arXiv, July 11  2019.
-
-[8] Holger Schwenk, Guillaume Wenzek, Sergey Edunov, Edouard Grave and Armand Joulin
-    [*CCMatrix: Mining Billions of High-Quality Parallel Sentences on the WEB*](https://arxiv.org/abs/1911.04944)
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/audio/raw_audio_dataset.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/audio/raw_audio_dataset.py
deleted file mode 100644
index f4e965493cdf94a1f92fa7dab45cc68973c8cdb5..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/audio/raw_audio_dataset.py
+++ /dev/null
@@ -1,392 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-
-import logging
-import os
-import sys
-import io
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-
-from .. import FairseqDataset
-from ..data_utils import compute_mask_indices, get_buckets, get_bucketed_sizes
-from fairseq.data.audio.audio_utils import (
-    parse_path,
-    read_from_stored_zip,
-    is_sf_audio_data,
-)
-from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel
-
-
-logger = logging.getLogger(__name__)
-
-
-class RawAudioDataset(FairseqDataset):
-    def __init__(
-        self,
-        sample_rate,
-        max_sample_size=None,
-        min_sample_size=0,
-        shuffle=True,
-        pad=False,
-        normalize=False,
-        compute_mask_indices=False,
-        **mask_compute_kwargs,
-    ):
-        super().__init__()
-
-        self.sample_rate = sample_rate
-        self.sizes = []
-        self.max_sample_size = (
-            max_sample_size if max_sample_size is not None else sys.maxsize
-        )
-        self.min_sample_size = min_sample_size
-        self.pad = pad
-        self.shuffle = shuffle
-        self.normalize = normalize
-        self.compute_mask_indices = compute_mask_indices
-        if self.compute_mask_indices:
-            self.mask_compute_kwargs = mask_compute_kwargs
-            self._features_size_map = {}
-            self._C = mask_compute_kwargs["encoder_embed_dim"]
-            self._conv_feature_layers = eval(mask_compute_kwargs["conv_feature_layers"])
-
-    def __getitem__(self, index):
-        raise NotImplementedError()
-
-    def __len__(self):
-        return len(self.sizes)
-
-    def postprocess(self, feats, curr_sample_rate):
-        if feats.dim() == 2:
-            feats = feats.mean(-1)
-
-        if curr_sample_rate != self.sample_rate:
-            raise Exception(f"sample rate: {curr_sample_rate}, need {self.sample_rate}")
-
-        assert feats.dim() == 1, feats.dim()
-
-        if self.normalize:
-            with torch.no_grad():
-                feats = F.layer_norm(feats, feats.shape)
-        return feats
-
-    def crop_to_max_size(self, wav, target_size):
-        size = len(wav)
-        diff = size - target_size
-        if diff <= 0:
-            return wav
-
-        start = np.random.randint(0, diff + 1)
-        end = size - diff + start
-        return wav[start:end]
-
-    def _compute_mask_indices(self, dims, padding_mask):
-        B, T, C = dims
-        mask_indices, mask_channel_indices = None, None
-        if self.mask_compute_kwargs["mask_prob"] > 0:
-            mask_indices = compute_mask_indices(
-                (B, T),
-                padding_mask,
-                self.mask_compute_kwargs["mask_prob"],
-                self.mask_compute_kwargs["mask_length"],
-                self.mask_compute_kwargs["mask_selection"],
-                self.mask_compute_kwargs["mask_other"],
-                min_masks=2,
-                no_overlap=self.mask_compute_kwargs["no_mask_overlap"],
-                min_space=self.mask_compute_kwargs["mask_min_space"],
-            )
-            mask_indices = torch.from_numpy(mask_indices)
-        if self.mask_compute_kwargs["mask_channel_prob"] > 0:
-            mask_channel_indices = compute_mask_indices(
-                (B, C),
-                None,
-                self.mask_compute_kwargs["mask_channel_prob"],
-                self.mask_compute_kwargs["mask_channel_length"],
-                self.mask_compute_kwargs["mask_channel_selection"],
-                self.mask_compute_kwargs["mask_channel_other"],
-                no_overlap=self.mask_compute_kwargs["no_mask_channel_overlap"],
-                min_space=self.mask_compute_kwargs["mask_channel_min_space"],
-            )
-            mask_channel_indices = (
-                torch.from_numpy(mask_channel_indices).unsqueeze(1).expand(-1, T, -1)
-            )
-
-        return mask_indices, mask_channel_indices
-
-    @staticmethod
-    def _bucket_tensor(tensor, num_pad, value):
-        return F.pad(tensor, (0, num_pad), value=value)
-
-    def collater(self, samples):
-        samples = [s for s in samples if s["source"] is not None]
-        if len(samples) == 0:
-            return {}
-
-        sources = [s["source"] for s in samples]
-        sizes = [len(s) for s in sources]
-
-        if self.pad:
-            target_size = min(max(sizes), self.max_sample_size)
-        else:
-            target_size = min(min(sizes), self.max_sample_size)
-
-        collated_sources = sources[0].new_zeros(len(sources), target_size)
-        padding_mask = (
-            torch.BoolTensor(collated_sources.shape).fill_(False) if self.pad else None
-        )
-        for i, (source, size) in enumerate(zip(sources, sizes)):
-            diff = size - target_size
-            if diff == 0:
-                collated_sources[i] = source
-            elif diff < 0:
-                assert self.pad
-                collated_sources[i] = torch.cat(
-                    [source, source.new_full((-diff,), 0.0)]
-                )
-                padding_mask[i, diff:] = True
-            else:
-                collated_sources[i] = self.crop_to_max_size(source, target_size)
-
-        input = {"source": collated_sources}
-        out = {"id": torch.LongTensor([s["id"] for s in samples])}
-        if self.pad:
-            input["padding_mask"] = padding_mask
-
-        if hasattr(self, "num_buckets") and self.num_buckets > 0:
-            assert self.pad, "Cannot bucket without padding first."
-            bucket = max(self._bucketed_sizes[s["id"]] for s in samples)
-            num_pad = bucket - collated_sources.size(-1)
-            if num_pad:
-                input["source"] = self._bucket_tensor(collated_sources, num_pad, 0)
-                input["padding_mask"] = self._bucket_tensor(padding_mask, num_pad, True)
-
-        if self.compute_mask_indices:
-            B = input["source"].size(0)
-            T = self._get_mask_indices_dims(input["source"].size(-1))
-            padding_mask_reshaped = input["padding_mask"].clone()
-            extra = padding_mask_reshaped.size(1) % T
-            if extra > 0:
-                padding_mask_reshaped = padding_mask_reshaped[:, :-extra]
-            padding_mask_reshaped = padding_mask_reshaped.view(
-                padding_mask_reshaped.size(0), T, -1
-            )
-            padding_mask_reshaped = padding_mask_reshaped.all(-1)
-            input["padding_count"] = padding_mask_reshaped.sum(-1).max().item()
-            mask_indices, mask_channel_indices = self._compute_mask_indices(
-                (B, T, self._C),
-                padding_mask_reshaped,
-            )
-            input["mask_indices"] = mask_indices
-            input["mask_channel_indices"] = mask_channel_indices
-            out["sample_size"] = mask_indices.sum().item()
-
-        out["net_input"] = input
-        return out
-
-    def _get_mask_indices_dims(self, size, padding=0, dilation=1):
-        if size not in self._features_size_map:
-            L_in = size
-            for (_, kernel_size, stride) in self._conv_feature_layers:
-                L_out = L_in + 2 * padding - dilation * (kernel_size - 1) - 1
-                L_out = 1 + L_out // stride
-                L_in = L_out
-            self._features_size_map[size] = L_out
-        return self._features_size_map[size]
-
-    def num_tokens(self, index):
-        return self.size(index)
-
-    def size(self, index):
-        """Return an example's size as a float or tuple. This value is used when
-        filtering a dataset with ``--max-positions``."""
-        if self.pad:
-            return self.sizes[index]
-        return min(self.sizes[index], self.max_sample_size)
-
-    def ordered_indices(self):
-        """Return an ordered list of indices. Batches will be constructed based
-        on this order."""
-
-        if self.shuffle:
-            order = [np.random.permutation(len(self))]
-            order.append(
-                np.minimum(
-                    np.array(self.sizes),
-                    self.max_sample_size,
-                )
-            )
-            return np.lexsort(order)[::-1]
-        else:
-            return np.arange(len(self))
-
-    def set_bucket_info(self, num_buckets):
-        self.num_buckets = num_buckets
-        if self.num_buckets > 0:
-            self._collated_sizes = np.minimum(
-                np.array(self.sizes),
-                self.max_sample_size,
-            )
-            self.buckets = get_buckets(
-                self._collated_sizes,
-                self.num_buckets,
-            )
-            self._bucketed_sizes = get_bucketed_sizes(
-                self._collated_sizes, self.buckets
-            )
-            logger.info(
-                f"{len(self.buckets)} bucket(s) for the audio dataset: "
-                f"{self.buckets}"
-            )
-
-
-class FileAudioDataset(RawAudioDataset):
-    def __init__(
-        self,
-        manifest_path,
-        sample_rate,
-        max_sample_size=None,
-        min_sample_size=0,
-        shuffle=True,
-        pad=False,
-        normalize=False,
-        num_buckets=0,
-        compute_mask_indices=False,
-        text_compression_level=TextCompressionLevel.none,
-        **mask_compute_kwargs,
-    ):
-        super().__init__(
-            sample_rate=sample_rate,
-            max_sample_size=max_sample_size,
-            min_sample_size=min_sample_size,
-            shuffle=shuffle,
-            pad=pad,
-            normalize=normalize,
-            compute_mask_indices=compute_mask_indices,
-            **mask_compute_kwargs,
-        )
-
-        self.text_compressor = TextCompressor(level=text_compression_level)
-
-        skipped = 0
-        self.fnames = []
-        sizes = []
-        self.skipped_indices = set()
-
-        with open(manifest_path, "r") as f:
-            self.root_dir = f.readline().strip()
-            for i, line in enumerate(f):
-                items = line.strip().split("\t")
-                assert len(items) == 2, line
-                sz = int(items[1])
-                if min_sample_size is not None and sz < min_sample_size:
-                    skipped += 1
-                    self.skipped_indices.add(i)
-                    continue
-                self.fnames.append(self.text_compressor.compress(items[0]))
-                sizes.append(sz)
-        logger.info(f"loaded {len(self.fnames)}, skipped {skipped} samples")
-
-        self.sizes = np.array(sizes, dtype=np.int64)
-
-        try:
-            import pyarrow
-
-            self.fnames = pyarrow.array(self.fnames)
-        except:
-            logger.debug(
-                "Could not create a pyarrow array. Please install pyarrow for better performance"
-            )
-            pass
-
-        self.set_bucket_info(num_buckets)
-
-    def __getitem__(self, index):
-        import soundfile as sf
-        fn = self.fnames[index]
-        fn = fn if isinstance(self.fnames, list) else fn.as_py()
-        fn = self.text_compressor.decompress(fn)
-        path_or_fp = os.path.join(self.root_dir, fn)
-        _path, slice_ptr = parse_path(path_or_fp)
-        if len(slice_ptr) == 2:
-            byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1])
-            assert is_sf_audio_data(byte_data)
-            path_or_fp = io.BytesIO(byte_data)
-
-        wav, curr_sample_rate = sf.read(path_or_fp, dtype="float32")
-
-        feats = torch.from_numpy(wav).float()
-        feats = self.postprocess(feats, curr_sample_rate)
-        return {"id": index, "source": feats}
-
-
-class BinarizedAudioDataset(RawAudioDataset):
-    def __init__(
-        self,
-        data_dir,
-        split,
-        sample_rate,
-        max_sample_size=None,
-        min_sample_size=0,
-        shuffle=True,
-        pad=False,
-        normalize=False,
-        num_buckets=0,
-        compute_mask_indices=False,
-        **mask_compute_kwargs,
-    ):
-        super().__init__(
-            sample_rate=sample_rate,
-            max_sample_size=max_sample_size,
-            min_sample_size=min_sample_size,
-            shuffle=shuffle,
-            pad=pad,
-            normalize=normalize,
-            compute_mask_indices=compute_mask_indices,
-            **mask_compute_kwargs,
-        )
-
-        from fairseq.data import data_utils, Dictionary
-
-        self.fnames_dict = Dictionary.load(os.path.join(data_dir, "dict.txt"))
-
-        root_path = os.path.join(data_dir, f"{split}.root")
-        if os.path.exists(root_path):
-            with open(root_path, "r") as f:
-                self.root_dir = next(f).strip()
-        else:
-            self.root_dir = None
-
-        fnames_path = os.path.join(data_dir, split)
-        self.fnames = data_utils.load_indexed_dataset(fnames_path, self.fnames_dict)
-        lengths_path = os.path.join(data_dir, f"{split}.lengths")
-
-        with open(lengths_path, "r") as f:
-            for line in f:
-                sz = int(line.rstrip())
-                assert (
-                    sz >= min_sample_size
-                ), f"Min sample size is not supported for binarized dataset, but found a sample with size {sz}"
-                self.sizes.append(sz)
-
-        self.sizes = np.array(self.sizes, dtype=np.int64)
-
-        self.set_bucket_info(num_buckets)
-        logger.info(f"loaded {len(self.fnames)} samples")
-
-    def __getitem__(self, index):
-        import soundfile as sf
-
-        fname = self.fnames_dict.string(self.fnames[index], separator="")
-        if self.root_dir:
-            fname = os.path.join(self.root_dir, fname)
-
-        wav, curr_sample_rate = sf.read(fname)
-        feats = torch.from_numpy(wav).float()
-        feats = self.postprocess(feats, curr_sample_rate)
-        return {"id": index, "source": feats}
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/encoders/subword_nmt_bpe.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/encoders/subword_nmt_bpe.py
deleted file mode 100644
index 5d724d2730a5895ca55af2998c2ced471625b516..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/encoders/subword_nmt_bpe.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from dataclasses import dataclass, field
-
-from fairseq import file_utils
-from fairseq.data.encoders import register_bpe
-from fairseq.dataclass import FairseqDataclass
-
-
-@dataclass
-class SubwordNMTBPEConfig(FairseqDataclass):
-    bpe_codes: str = field(default="???", metadata={"help": "path to subword NMT BPE"})
-    bpe_separator: str = field(default="@@", metadata={"help": "BPE separator"})
-
-
-@register_bpe("subword_nmt", dataclass=SubwordNMTBPEConfig)
-class SubwordNMTBPE(object):
-    def __init__(self, cfg):
-        if cfg.bpe_codes is None:
-            raise ValueError("--bpe-codes is required for --bpe=subword_nmt")
-        codes = file_utils.cached_path(cfg.bpe_codes)
-        try:
-            from subword_nmt import apply_bpe
-
-            bpe_parser = apply_bpe.create_parser()
-            bpe_args = bpe_parser.parse_args(
-                [
-                    "--codes",
-                    codes,
-                    "--separator",
-                    cfg.bpe_separator,
-                ]
-            )
-            self.bpe = apply_bpe.BPE(
-                bpe_args.codes,
-                bpe_args.merges,
-                bpe_args.separator,
-                None,
-                bpe_args.glossaries,
-            )
-            self.bpe_symbol = bpe_args.separator + " "
-        except ImportError:
-            raise ImportError(
-                "Please install subword_nmt with: pip install subword-nmt"
-            )
-
-    def encode(self, x: str) -> str:
-        return self.bpe.process_line(x)
-
-    def decode(self, x: str) -> str:
-        return (x + " ").replace(self.bpe_symbol, "").rstrip()
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/nat/iterative_nonautoregressive_transformer.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/nat/iterative_nonautoregressive_transformer.py
deleted file mode 100644
index bc39509980a80eb8c21e0bfdb304649ad3acc4d0..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/nat/iterative_nonautoregressive_transformer.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-from fairseq.models import register_model, register_model_architecture
-from fairseq.models.nat import NATransformerModel
-
-
-def _sequential_poisoning(s, V, beta=0.33, bos=2, eos=3, pad=1):
-    # s: input batch
-    # V: vocabulary size
-    rand_words = torch.randint(low=4, high=V, size=s.size(), device=s.device)
-    choices = torch.rand(size=s.size(), device=s.device)
-    choices.masked_fill_((s == pad) | (s == bos) | (s == eos), 1)
-
-    replace = choices < beta / 3
-    repeat = (choices >= beta / 3) & (choices < beta * 2 / 3)
-    swap = (choices >= beta * 2 / 3) & (choices < beta)
-    safe = choices >= beta
-
-    for i in range(s.size(1) - 1):
-        rand_word = rand_words[:, i]
-        next_word = s[:, i + 1]
-        self_word = s[:, i]
-
-        replace_i = replace[:, i]
-        swap_i = swap[:, i] & (next_word != 3)
-        repeat_i = repeat[:, i] & (next_word != 3)
-        safe_i = safe[:, i] | ((next_word == 3) & (~replace_i))
-
-        s[:, i] = (
-            self_word * (safe_i | repeat_i).long()
-            + next_word * swap_i.long()
-            + rand_word * replace_i.long()
-        )
-        s[:, i + 1] = (
-            next_word * (safe_i | replace_i).long()
-            + self_word * (swap_i | repeat_i).long()
-        )
-    return s
-
-
-def gumbel_noise(input, TINY=1e-8):
-    return (
-        input.new_zeros(*input.size())
-        .uniform_()
-        .add_(TINY)
-        .log_()
-        .neg_()
-        .add_(TINY)
-        .log_()
-        .neg_()
-    )
-
-
-@register_model("iterative_nonautoregressive_transformer")
-class IterNATransformerModel(NATransformerModel):
-    @staticmethod
-    def add_args(parser):
-        NATransformerModel.add_args(parser)
-        parser.add_argument(
-            "--train-step",
-            type=int,
-            help="number of refinement iterations during training",
-        )
-        parser.add_argument(
-            "--dae-ratio",
-            type=float,
-            help="the probability of switching to the denoising auto-encoder loss",
-        )
-        parser.add_argument(
-            "--stochastic-approx",
-            action="store_true",
-            help="sampling from the decoder as the inputs for next iteration",
-        )
-
-    @classmethod
-    def build_model(cls, args, task):
-        model = super().build_model(args, task)
-        model.train_step = getattr(args, "train_step", 4)
-        model.dae_ratio = getattr(args, "dae_ratio", 0.5)
-        model.stochastic_approx = getattr(args, "stochastic_approx", False)
-        return model
-
-    def forward(
-        self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
-    ):
-
-        B, T = prev_output_tokens.size()
-
-        # encoding
-        encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
-
-        # length prediction
-        length_out = self.decoder.forward_length(
-            normalize=False, encoder_out=encoder_out
-        )
-        length_tgt = self.decoder.forward_length_prediction(
-            length_out, encoder_out, tgt_tokens
-        )
-
-        # decoding
-        word_ins_outs, word_ins_tgts, word_ins_masks = [], [], []
-        for t in range(self.train_step):
-            word_ins_out = self.decoder(
-                normalize=False,
-                prev_output_tokens=prev_output_tokens,
-                encoder_out=encoder_out,
-                step=t,
-            )
-            word_ins_tgt = tgt_tokens
-            word_ins_mask = word_ins_tgt.ne(self.pad)
-
-            word_ins_outs.append(word_ins_out)
-            word_ins_tgts.append(word_ins_tgt)
-            word_ins_masks.append(word_ins_mask)
-
-            if t < (self.train_step - 1):
-                # prediction for next iteration
-                if self.stochastic_approx:
-                    word_ins_prediction = (
-                        word_ins_out + gumbel_noise(word_ins_out)
-                    ).max(-1)[1]
-                else:
-                    word_ins_prediction = word_ins_out.max(-1)[1]
-
-                prev_output_tokens = prev_output_tokens.masked_scatter(
-                    word_ins_mask, word_ins_prediction[word_ins_mask]
-                )
-
-                if self.dae_ratio > 0:
-                    # we do not perform denoising for the first iteration
-                    corrputed = (
-                        torch.rand(size=(B,), device=prev_output_tokens.device)
-                        < self.dae_ratio
-                    )
-                    corrputed_tokens = _sequential_poisoning(
-                        tgt_tokens[corrputed],
-                        len(self.tgt_dict),
-                        0.33,
-                        self.bos,
-                        self.eos,
-                        self.pad,
-                    )
-                    prev_output_tokens[corrputed] = corrputed_tokens
-
-        # concat everything
-        word_ins_out = torch.cat(word_ins_outs, 0)
-        word_ins_tgt = torch.cat(word_ins_tgts, 0)
-        word_ins_mask = torch.cat(word_ins_masks, 0)
-
-        return {
-            "word_ins": {
-                "out": word_ins_out,
-                "tgt": word_ins_tgt,
-                "mask": word_ins_mask,
-                "ls": self.args.label_smoothing,
-                "nll_loss": True,
-            },
-            "length": {
-                "out": length_out,
-                "tgt": length_tgt,
-                "factor": self.decoder.length_loss_factor,
-            },
-        }
-
-
-@register_model_architecture(
-    "iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer"
-)
-def inat_base_architecture(args):
-    args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
-    args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
-    args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
-    args.encoder_layers = getattr(args, "encoder_layers", 6)
-    args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
-    args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
-    args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
-    args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
-    args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
-    args.decoder_ffn_embed_dim = getattr(
-        args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
-    )
-    args.decoder_layers = getattr(args, "decoder_layers", 6)
-    args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
-    args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
-    args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
-    args.attention_dropout = getattr(args, "attention_dropout", 0.0)
-    args.activation_dropout = getattr(args, "activation_dropout", 0.0)
-    args.activation_fn = getattr(args, "activation_fn", "relu")
-    args.dropout = getattr(args, "dropout", 0.1)
-    args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
-    args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
-    args.share_decoder_input_output_embed = getattr(
-        args, "share_decoder_input_output_embed", False
-    )
-    args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
-    args.no_token_positional_embeddings = getattr(
-        args, "no_token_positional_embeddings", False
-    )
-    args.adaptive_input = getattr(args, "adaptive_input", False)
-    args.apply_bert_init = getattr(args, "apply_bert_init", False)
-
-    args.decoder_output_dim = getattr(
-        args, "decoder_output_dim", args.decoder_embed_dim
-    )
-    args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
-
-    # --- special arguments ---
-    args.sg_length_pred = getattr(args, "sg_length_pred", False)
-    args.pred_length_offset = getattr(args, "pred_length_offset", False)
-    args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
-    args.ngram_predictor = getattr(args, "ngram_predictor", 1)
-    args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
-
-    args.train_step = getattr(args, "train_step", 4)
-    args.dae_ratio = getattr(args, "dae_ratio", 0.5)
-    args.stochastic_approx = getattr(args, "stochastic_approx", False)
-
-
-@register_model_architecture(
-    "iterative_nonautoregressive_transformer",
-    "iterative_nonautoregressive_transformer_wmt_en_de",
-)
-def iter_nat_wmt_en_de(args):
-    inat_base_architecture(args)
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py
deleted file mode 100644
index 0f87bb5d7ed5c7eb8011d4c651f2ecbf0ae700ac..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from collections.abc import Collection
-from dataclasses import dataclass, field
-from typing import List
-
-from omegaconf import II
-
-from fairseq.dataclass import FairseqDataclass
-from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
-
-
-@dataclass
-class InverseSquareRootLRScheduleConfig(FairseqDataclass):
-    warmup_updates: int = field(
-        default=4000,
-        metadata={"help": "warmup the learning rate linearly for the first N updates"},
-    )
-    warmup_init_lr: float = field(
-        default=-1,
-        metadata={
-            "help": "initial learning rate during warmup phase; default is cfg.lr"
-        },
-    )
-    lr: List[float] = II("optimization.lr")
-
-
-@register_lr_scheduler("inverse_sqrt", dataclass=InverseSquareRootLRScheduleConfig)
-class InverseSquareRootSchedule(FairseqLRScheduler):
-    """Decay the LR based on the inverse square root of the update number.
-
-    We also support a warmup phase where we linearly increase the learning rate
-    from some initial learning rate (``--warmup-init-lr``) until the configured
-    learning rate (``--lr``). Thereafter we decay proportional to the number of
-    updates, with a decay factor set to align with the configured learning rate.
-
-    During warmup::
-
-      lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates)
-      lr = lrs[update_num]
-
-    After warmup::
-
-      decay_factor = cfg.lr * sqrt(cfg.warmup_updates)
-      lr = decay_factor / sqrt(update_num)
-    """
-
-    def __init__(self, cfg: InverseSquareRootLRScheduleConfig, optimizer):
-        super().__init__(cfg, optimizer)
-        if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1:
-            raise ValueError(
-                "Cannot use a fixed learning rate schedule with inverse_sqrt."
-                " Consider --lr-scheduler=fixed instead."
-            )
-        warmup_end_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr
-        if cfg.warmup_init_lr < 0:
-            cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr
-
-        # linearly warmup for the first cfg.warmup_updates
-        self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates
-
-        # then, decay prop. to the inverse square root of the update number
-        self.decay_factor = warmup_end_lr * cfg.warmup_updates ** 0.5
-
-        # initial learning rate
-        self.lr = cfg.warmup_init_lr
-        self.optimizer.set_lr(self.lr)
-
-    def step(self, epoch, val_loss=None):
-        """Update the learning rate at the end of the given epoch."""
-        super().step(epoch, val_loss)
-        # we don't change the learning rate at epoch boundaries
-        return self.optimizer.get_lr()
-
-    def step_update(self, num_updates):
-        """Update the learning rate after each update."""
-        if num_updates < self.cfg.warmup_updates:
-            self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step
-        else:
-            self.lr = self.decay_factor * num_updates ** -0.5
-        self.optimizer.set_lr(self.lr)
-        return self.lr
diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/commons.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/commons.py
deleted file mode 100644
index 8da7b35049d768a29de6f66cbe8795a825967818..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/commons.py
+++ /dev/null
@@ -1,273 +0,0 @@
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from librosa.filters import mel as librosa_mel_fn
-from audio_processing import dynamic_range_compression
-from audio_processing import dynamic_range_decompression
-from stft import STFT
-
-
-def intersperse(lst, item):
-    result = [item] * (len(lst) * 2 + 1)
-    result[1::2] = lst
-    return result
-
-
-def mle_loss(z, m, logs, logdet, mask):
-    l = torch.sum(logs) + 0.5 * torch.sum(
-        torch.exp(-2 * logs) * ((z - m) ** 2)
-    )  # neg normal likelihood w/o the constant term
-    l = l - torch.sum(logdet)  # log jacobian determinant
-    l = l / torch.sum(
-        torch.ones_like(z) * mask
-    )  # averaging across batch, channel and time axes
-    l = l + 0.5 * math.log(2 * math.pi)  # add the remaining constant term
-    return l
-
-
-def duration_loss(logw, logw_, lengths):
-    l = torch.sum((logw - logw_) ** 2) / torch.sum(lengths)
-    return l
-
-
-@torch.jit.script
-def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
-    n_channels_int = n_channels[0]
-    in_act = input_a + input_b
-    t_act = torch.tanh(in_act[:, :n_channels_int, :])
-    s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
-    acts = t_act * s_act
-    return acts
-
-
-def convert_pad_shape(pad_shape):
-    l = pad_shape[::-1]
-    pad_shape = [item for sublist in l for item in sublist]
-    return pad_shape
-
-
-def shift_1d(x):
-    x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
-    return x
-
-
-def sequence_mask(length, max_length=None):
-    if max_length is None:
-        max_length = length.max()
-    x = torch.arange(max_length, dtype=length.dtype, device=length.device)
-    return x.unsqueeze(0) < length.unsqueeze(1)
-
-
-def maximum_path(value, mask, max_neg_val=-np.inf):
-    """Numpy-friendly version. It's about 4 times faster than torch version.
-    value: [b, t_x, t_y]
-    mask: [b, t_x, t_y]
-    """
-    value = value * mask
-
-    device = value.device
-    dtype = value.dtype
-    value = value.cpu().detach().numpy()
-    mask = mask.cpu().detach().numpy().astype(np.bool)
-
-    b, t_x, t_y = value.shape
-    direction = np.zeros(value.shape, dtype=np.int64)
-    v = np.zeros((b, t_x), dtype=np.float32)
-    x_range = np.arange(t_x, dtype=np.float32).reshape(1, -1)
-    for j in range(t_y):
-        v0 = np.pad(v, [[0, 0], [1, 0]], mode="constant", constant_values=max_neg_val)[
-            :, :-1
-        ]
-        v1 = v
-        max_mask = v1 >= v0
-        v_max = np.where(max_mask, v1, v0)
-        direction[:, :, j] = max_mask
-
-        index_mask = x_range <= j
-        v = np.where(index_mask, v_max + value[:, :, j], max_neg_val)
-    direction = np.where(mask, direction, 1)
-
-    path = np.zeros(value.shape, dtype=np.float32)
-    index = mask[:, :, 0].sum(1).astype(np.int64) - 1
-    index_range = np.arange(b)
-    for j in reversed(range(t_y)):
-        path[index_range, index, j] = 1
-        index = index + direction[index_range, index, j] - 1
-    path = path * mask.astype(np.float32)
-    path = torch.from_numpy(path).to(device=device, dtype=dtype)
-    return path
-
-
-def generate_path(duration, mask):
-    """
-    duration: [b, t_x]
-    mask: [b, t_x, t_y]
-    """
-    device = duration.device
-
-    b, t_x, t_y = mask.shape
-    cum_duration = torch.cumsum(duration, 1)
-    path = torch.zeros(b, t_x, t_y, dtype=mask.dtype).to(device=device)
-
-    cum_duration_flat = cum_duration.view(b * t_x)
-    path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
-    path = path.view(b, t_x, t_y)
-    path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
-    path = path * mask
-    return path
-
-
-class Adam:
-    def __init__(
-        self,
-        params,
-        scheduler,
-        dim_model,
-        warmup_steps=4000,
-        lr=1e0,
-        betas=(0.9, 0.98),
-        eps=1e-9,
-    ):
-        self.params = params
-        self.scheduler = scheduler
-        self.dim_model = dim_model
-        self.warmup_steps = warmup_steps
-        self.lr = lr
-        self.betas = betas
-        self.eps = eps
-
-        self.step_num = 1
-        self.cur_lr = lr * self._get_lr_scale()
-
-        self._optim = torch.optim.Adam(params, lr=self.cur_lr, betas=betas, eps=eps)
-
-    def _get_lr_scale(self):
-        if self.scheduler == "noam":
-            return np.power(self.dim_model, -0.5) * np.min(
-                [
-                    np.power(self.step_num, -0.5),
-                    self.step_num * np.power(self.warmup_steps, -1.5),
-                ]
-            )
-        else:
-            return 1
-
-    def _update_learning_rate(self):
-        self.step_num += 1
-        if self.scheduler == "noam":
-            self.cur_lr = self.lr * self._get_lr_scale()
-            for param_group in self._optim.param_groups:
-                param_group["lr"] = self.cur_lr
-
-    def get_lr(self):
-        return self.cur_lr
-
-    def step(self):
-        self._optim.step()
-        self._update_learning_rate()
-
-    def zero_grad(self):
-        self._optim.zero_grad()
-
-    def load_state_dict(self, d):
-        self._optim.load_state_dict(d)
-
-    def state_dict(self):
-        return self._optim.state_dict()
-
-
-class TacotronSTFT(nn.Module):
-    def __init__(
-        self,
-        filter_length=1024,
-        hop_length=256,
-        win_length=1024,
-        n_mel_channels=80,
-        sampling_rate=22050,
-        mel_fmin=0.0,
-        mel_fmax=8000.0,
-    ):
-        super(TacotronSTFT, self).__init__()
-        self.n_mel_channels = n_mel_channels
-        self.sampling_rate = sampling_rate
-        self.stft_fn = STFT(filter_length, hop_length, win_length)
-        mel_basis = librosa_mel_fn(
-            sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax
-        )
-        mel_basis = torch.from_numpy(mel_basis).float()
-        self.register_buffer("mel_basis", mel_basis)
-
-    def spectral_normalize(self, magnitudes):
-        output = dynamic_range_compression(magnitudes)
-        return output
-
-    def spectral_de_normalize(self, magnitudes):
-        output = dynamic_range_decompression(magnitudes)
-        return output
-
-    def mel_spectrogram(self, y):
-        """Computes mel-spectrograms from a batch of waves
-        PARAMS
-        ------
-        y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
-
-        RETURNS
-        -------
-        mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
-        """
-        assert torch.min(y.data) >= -1
-        assert torch.max(y.data) <= 1
-
-        magnitudes, phases = self.stft_fn.transform(y)
-        magnitudes = magnitudes.data
-        mel_output = torch.matmul(self.mel_basis, magnitudes)
-        mel_output = self.spectral_normalize(mel_output)
-        return mel_output
-
-
-def clip_grad_value_(parameters, clip_value, norm_type=2):
-    if isinstance(parameters, torch.Tensor):
-        parameters = [parameters]
-    parameters = list(filter(lambda p: p.grad is not None, parameters))
-    norm_type = float(norm_type)
-    clip_value = float(clip_value)
-
-    total_norm = 0
-    for p in parameters:
-        param_norm = p.grad.data.norm(norm_type)
-        total_norm += param_norm.item() ** norm_type
-
-        p.grad.data.clamp_(min=-clip_value, max=clip_value)
-    total_norm = total_norm ** (1.0 / norm_type)
-    return total_norm
-
-
-def squeeze(x, x_mask=None, n_sqz=2):
-    b, c, t = x.size()
-
-    t = (t // n_sqz) * n_sqz
-    x = x[:, :, :t]
-    x_sqz = x.view(b, c, t // n_sqz, n_sqz)
-    x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * n_sqz, t // n_sqz)
-
-    if x_mask is not None:
-        x_mask = x_mask[:, :, n_sqz - 1 :: n_sqz]
-    else:
-        x_mask = torch.ones(b, 1, t // n_sqz).to(device=x.device, dtype=x.dtype)
-    return x_sqz * x_mask, x_mask
-
-
-def unsqueeze(x, x_mask=None, n_sqz=2):
-    b, c, t = x.size()
-
-    x_unsqz = x.view(b, n_sqz, c // n_sqz, t)
-    x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // n_sqz, t * n_sqz)
-
-    if x_mask is not None:
-        x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, n_sqz).view(b, 1, t * n_sqz)
-    else:
-        x_mask = torch.ones(b, 1, t * n_sqz).to(device=x.device, dtype=x.dtype)
-    return x_unsqz * x_mask, x_mask
diff --git a/spaces/HuangLab/CELL-E_2-Sequence_Prediction/taming/models/cond_transformer.py b/spaces/HuangLab/CELL-E_2-Sequence_Prediction/taming/models/cond_transformer.py
deleted file mode 100644
index 03adb5ab52b497b97d4c50cbb3c3bf3ca0753d41..0000000000000000000000000000000000000000
--- a/spaces/HuangLab/CELL-E_2-Sequence_Prediction/taming/models/cond_transformer.py
+++ /dev/null
@@ -1,349 +0,0 @@
-import os, math
-import torch
-import torch.nn.functional as F
-import pytorch_lightning as pl
-
-from main import instantiate_from_config
-from taming.modules.util import SOSProvider
-
-
-def disabled_train(self, mode=True):
-    """Overwrite model.train with this function to make sure train/eval mode
-    does not change anymore."""
-    return self
-
-
-class Net2NetTransformer(pl.LightningModule):
-    def __init__(self,
-                 transformer_config,
-                 first_stage_config,
-                 cond_stage_config,
-                 permuter_config=None,
-                 ckpt_path=None,
-                 ignore_keys=[],
-                 first_stage_key="image",
-                 cond_stage_key="depth",
-                 downsample_cond_size=-1,
-                 pkeep=1.0,
-                 sos_token=0,
-                 unconditional=False,
-                 ):
-        super().__init__()
-        self.be_unconditional = unconditional
-        self.sos_token = sos_token
-        self.first_stage_key = first_stage_key
-        self.cond_stage_key = cond_stage_key
-        self.init_first_stage_from_ckpt(first_stage_config)
-        self.init_cond_stage_from_ckpt(cond_stage_config)
-        if permuter_config is None:
-            permuter_config = {"target": "taming.modules.transformer.permuter.Identity"}
-        self.permuter = instantiate_from_config(config=permuter_config)
-        self.transformer = instantiate_from_config(config=transformer_config)
-
-        if ckpt_path is not None:
-            self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
-        self.downsample_cond_size = downsample_cond_size
-        self.pkeep = pkeep
-
-    def init_from_ckpt(self, path, ignore_keys=list()):
-        sd = torch.load(path, map_location="cpu")["state_dict"]
-        for k in sd.keys():
-            for ik in ignore_keys:
-                if k.startswith(ik):
-                    self.print("Deleting key {} from state_dict.".format(k))
-                    del sd[k]
-        self.load_state_dict(sd, strict=False)
-        print(f"Restored from {path}")
-
-    def init_first_stage_from_ckpt(self, config):
-        model = instantiate_from_config(config)
-        model = model.eval()
-        model.train = disabled_train
-        self.first_stage_model = model
-
-    def init_cond_stage_from_ckpt(self, config):
-        if config == "__is_first_stage__":
-            print("Using first stage also as cond stage.")
-            self.cond_stage_model = self.first_stage_model
-        elif config == "__is_unconditional__" or self.be_unconditional:
-            print(f"Using no cond stage. Assuming the training is intended to be unconditional. "
-                  f"Prepending {self.sos_token} as a sos token.")
-            self.be_unconditional = True
-            self.cond_stage_key = self.first_stage_key
-            self.cond_stage_model = SOSProvider(self.sos_token)
-        else:
-            model = instantiate_from_config(config)
-            model = model.eval()
-            model.train = disabled_train
-            self.cond_stage_model = model
-
-    def forward(self, x, c):
-        # one step to produce the logits
-        # x = target
-        # c = nucleus
-        _, z_indices = self.encode_to_z(x)
-        _, c_indices = self.encode_to_c(c) 
-        
-        if self.training and self.pkeep < 1.0:
-            mask = torch.bernoulli(self.pkeep*torch.ones(z_indices.shape,
-                                                         device=z_indices.device))
-            mask = mask.round().to(dtype=torch.int64)
-            r_indices = torch.randint_like(z_indices, self.transformer.config.vocab_size)
-            a_indices = mask*z_indices+(1-mask)*r_indices
-        else:
-            a_indices = z_indices
-        
-        cz_indices = torch.cat((c_indices, a_indices), dim=1)
-
-        # target includes all sequence elements (no need to handle first one
-        # differently because we are conditioning)
-        target = z_indices
-        # make the prediction
-        logits, _ = self.transformer(cz_indices[:, :-1])
-        # cut off conditioning outputs - output i corresponds to p(z_i | z_{<i}, c)
-        logits = logits[:, c_indices.shape[1]-1:]
-
-        return logits, target
-
-    def top_k_logits(self, logits, k):
-        v, ix = torch.topk(logits, k)
-        out = logits.clone()
-        out[out < v[..., [-1]]] = -float('Inf')
-        return out
-
-    @torch.no_grad()
-    def sample(self, x, c, steps, temperature=1.0, sample=False, top_k=None,
-               callback=lambda k: None):
-        x = torch.cat((c,x),dim=1)
-        block_size = self.transformer.get_block_size()
-        assert not self.transformer.training
-        if self.pkeep <= 0.0:
-            # one pass suffices since input is pure noise anyway
-            assert len(x.shape)==2
-            noise_shape = (x.shape[0], steps-1)
-            #noise = torch.randint(self.transformer.config.vocab_size, noise_shape).to(x)
-            noise = c.clone()[:,x.shape[1]-c.shape[1]:-1]
-            x = torch.cat((x,noise),dim=1)
-            logits, _ = self.transformer(x)
-            # take all logits for now and scale by temp
-            logits = logits / temperature
-            # optionally crop probabilities to only the top k options
-            if top_k is not None:
-                logits = self.top_k_logits(logits, top_k)
-            # apply softmax to convert to probabilities
-            probs = F.softmax(logits, dim=-1)
-            # sample from the distribution or take the most likely
-            if sample:
-                shape = probs.shape
-                probs = probs.reshape(shape[0]*shape[1],shape[2])
-                ix = torch.multinomial(probs, num_samples=1)
-                probs = probs.reshape(shape[0],shape[1],shape[2])
-                ix = ix.reshape(shape[0],shape[1])
-            else:
-                _, ix = torch.topk(probs, k=1, dim=-1)
-            # cut off conditioning
-            x = ix[:, c.shape[1]-1:]
-        else:
-            for k in range(steps):
-                callback(k)
-                assert x.size(1) <= block_size # make sure model can see conditioning
-                x_cond = x if x.size(1) <= block_size else x[:, -block_size:]  # crop context if needed
-                logits, _ = self.transformer(x_cond)
-                # pluck the logits at the final step and scale by temperature
-                logits = logits[:, -1, :] / temperature
-                # optionally crop probabilities to only the top k options
-                if top_k is not None:
-                    logits = self.top_k_logits(logits, top_k)
-                # apply softmax to convert to probabilities
-                probs = F.softmax(logits, dim=-1)
-                # sample from the distribution or take the most likely
-                if sample:
-                    ix = torch.multinomial(probs, num_samples=1)
-                else:
-                    _, ix = torch.topk(probs, k=1, dim=-1)
-                # append to the sequence and continue
-                x = torch.cat((x, ix), dim=1)
-            # cut off conditioning
-            x = x[:, c.shape[1]:]
-        return x
-
-    @torch.no_grad()
-    def encode_to_z(self, x):
-        quant_z, _, info = self.first_stage_model.encode(x)
-        indices = info[2].view(quant_z.shape[0], -1)
-        indices = self.permuter(indices)
-        return quant_z, indices
-
-    @torch.no_grad()
-    def encode_to_c(self, c):
-        if self.downsample_cond_size > -1:
-            c = F.interpolate(c, size=(self.downsample_cond_size, self.downsample_cond_size))
-            
-        #quant_c, _, info = self.cond_stage_model.encode(x)
-        #indices = info[2].view(quant_c.shape[0], -1)
-        #indices = self.permuter(indices)    
-        quant_c, _, [_,_,indices] = self.cond_stage_model.encode(c)
-        if len(indices.shape) != 2:
-            indices = indices.view(c.shape[0], -1)
-        return quant_c, indices
-    
-    @torch.no_grad()
-    def decode_to_img(self, index, zshape):
-        index = self.permuter(index, reverse=True)
-        bhwc = (zshape[0],zshape[2],zshape[3],zshape[1])
-        quant_z = self.first_stage_model.quantize.get_codebook_entry(
-            index.reshape(-1), shape=bhwc)
-        x = self.first_stage_model.decode(quant_z)
-        return x
-
-    @torch.no_grad()
-    def log_images(self, batch, temperature=None, top_k=None, callback=None, lr_interface=False, **kwargs):
-        log = dict()
-
-        N = 4
-        if lr_interface:
-            x, c = self.get_xc(batch, N, diffuse=False, upsample_factor=8)
-        else:
-            x, c = self.get_xc(batch, N)
-        x = x.to(device=self.device)
-        c = c.to(device=self.device)
-
-        quant_z, z_indices = self.encode_to_z(x)
-        quant_c, c_indices = self.encode_to_c(c)
-
-        # create a "half"" sample
-        z_start_indices = z_indices[:,:z_indices.shape[1]//2]
-        index_sample = self.sample(z_start_indices, c_indices,
-                                   steps=z_indices.shape[1]-z_start_indices.shape[1],
-                                   temperature=temperature if temperature is not None else 1.0,
-                                   sample=True,
-                                   top_k=top_k if top_k is not None else 100,
-                                   callback=callback if callback is not None else lambda k: None)
-        x_sample = self.decode_to_img(index_sample, quant_z.shape)
-
-        # sample
-        z_start_indices = z_indices[:, :0]
-        index_sample = self.sample(z_start_indices, c_indices,
-                                   steps=z_indices.shape[1],
-                                   temperature=temperature if temperature is not None else 1.0,
-                                   sample=True,
-                                   top_k=top_k if top_k is not None else 100,
-                                   callback=callback if callback is not None else lambda k: None)
-        x_sample_nopix = self.decode_to_img(index_sample, quant_z.shape)
-
-        # det sample
-        z_start_indices = z_indices[:, :0]
-        index_sample = self.sample(z_start_indices, c_indices,
-                                   steps=z_indices.shape[1],
-                                   sample=False,
-                                   callback=callback if callback is not None else lambda k: None)
-        x_sample_det = self.decode_to_img(index_sample, quant_z.shape)
-
-        # reconstruction
-        x_rec = self.decode_to_img(z_indices, quant_z.shape)
-
-        log["inputs"] = x
-        log["reconstructions"] = x_rec
-
-        if self.cond_stage_key != "image" or self.cond_stage_key != "nucleus" or self.cond_stage_key != "target":
-            cond_rec = self.cond_stage_model.decode(quant_c)
-            if self.cond_stage_key == "segmentation":
-                # get image from segmentation mask
-                num_classes = cond_rec.shape[1]
-
-                c = torch.argmax(c, dim=1, keepdim=True)
-                c = F.one_hot(c, num_classes=num_classes)
-                c = c.squeeze(1).permute(0, 3, 1, 2).float()
-                c = self.cond_stage_model.to_rgb(c)
-
-                cond_rec = torch.argmax(cond_rec, dim=1, keepdim=True)
-                cond_rec = F.one_hot(cond_rec, num_classes=num_classes)
-                cond_rec = cond_rec.squeeze(1).permute(0, 3, 1, 2).float()
-                cond_rec = self.cond_stage_model.to_rgb(cond_rec)
-            log["conditioning_rec"] = cond_rec
-            log["conditioning"] = c
-
-        log["samples_half"] = x_sample
-        log["samples_nopix"] = x_sample_nopix
-        log["samples_det"] = x_sample_det
-        return log
-
-    def get_input(self, key, batch):
-        x = batch[key]
-        if len(x.shape) == 3:
-            x = x[..., None]
-        #if len(x.shape) == 4:
-        #    x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
-        if x.dtype == torch.double:
-            x = x.float()
-        return x
-
-    def get_xc(self, batch, N=None):
-        x = self.get_input(self.first_stage_key, batch)
-        c = self.get_input(self.cond_stage_key, batch)
-        if N is not None:
-            x = x[:N]
-            c = c[:N]
-        return x, c
-
-    def shared_step(self, batch):
-        x, c = self.get_xc(batch)
-        logits, target = self(x, c)
-        loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), target.reshape(-1))
-        return loss
-
-    def training_step(self, batch, batch_idx):
-        loss = self.shared_step(batch)
-        self.log("train/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
-        return loss
-
-    def validation_step(self, batch, batch_idx):
-        loss = self.shared_step(batch)
-        self.log("val/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
-        return loss
-
-    def configure_optimizers(self):
-        """
-        Following minGPT:
-        This long function is unfortunately doing something very simple and is being very defensive:
-        We are separating out all parameters of the model into two buckets: those that will experience
-        weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
-        We are then returning the PyTorch optimizer object.
-        """
-        # separate out all parameters to those that will and won't experience regularizing weight decay
-        decay = set()
-        no_decay = set()
-        whitelist_weight_modules = (torch.nn.Linear, )
-        blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
-        for mn, m in self.transformer.named_modules():
-            for pn, p in m.named_parameters():
-                fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
-
-                if pn.endswith('bias'):
-                    # all biases will not be decayed
-                    no_decay.add(fpn)
-                elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
-                    # weights of whitelist modules will be weight decayed
-                    decay.add(fpn)
-                elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
-                    # weights of blacklist modules will NOT be weight decayed
-                    no_decay.add(fpn)
-
-        # special case the position embedding parameter in the root GPT module as not decayed
-        no_decay.add('pos_emb')
-
-        # validate that we considered every parameter
-        param_dict = {pn: p for pn, p in self.transformer.named_parameters()}
-        inter_params = decay & no_decay
-        union_params = decay | no_decay
-        assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
-        assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
-                                                    % (str(param_dict.keys() - union_params), )
-
-        # create the pytorch optimizer object
-        optim_groups = [
-            {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01},
-            {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
-        ]
-        optimizer = torch.optim.AdamW(optim_groups, lr=self.learning_rate, betas=(0.9, 0.95))
-        return optimizer
diff --git a/spaces/HuggingFaceH4/open_llm_leaderboard/src/get_model_info/apply_metadata_to_df.py b/spaces/HuggingFaceH4/open_llm_leaderboard/src/get_model_info/apply_metadata_to_df.py
deleted file mode 100644
index 7a26d047a3d025b85230ec00f6e29176f578156a..0000000000000000000000000000000000000000
--- a/spaces/HuggingFaceH4/open_llm_leaderboard/src/get_model_info/apply_metadata_to_df.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import glob
-import json
-import os
-from typing import List
-
-from huggingface_hub import HfApi
-from tqdm import tqdm
-
-from src.get_model_info.hardocded_metadata.flags import DO_NOT_SUBMIT_MODELS, FLAGGED_MODELS
-from src.get_model_info.hardocded_metadata.types import MODEL_TYPE_METADATA, ModelType, model_type_from_str
-from src.get_model_info.utils import AutoEvalColumn, model_hyperlink
-
-api = HfApi(token=os.environ.get("H4_TOKEN", None))
-
-
-def get_model_metadata(leaderboard_data: List[dict]):
-    for model_data in tqdm(leaderboard_data):
-        request_files = os.path.join(
-            "eval-queue",
-            model_data["model_name_for_query"] + "_eval_request_*" + ".json",
-        )
-        request_files = glob.glob(request_files)
-
-        # Select correct request file (precision)
-        request_file = ""
-        if len(request_files) == 1:
-            request_file = request_files[0]
-        elif len(request_files) > 1:
-            request_files = sorted(request_files, reverse=True)
-            for tmp_request_file in request_files:
-                with open(tmp_request_file, "r") as f:
-                    req_content = json.load(f)
-                    if (
-                        req_content["status"] in ["FINISHED", "PENDING_NEW_EVAL"]
-                        and req_content["precision"] == model_data["Precision"].split(".")[-1]
-                    ):
-                        request_file = tmp_request_file
-
-        try:
-            with open(request_file, "r") as f:
-                request = json.load(f)
-            model_type = model_type_from_str(request.get("model_type", ""))
-            model_data[AutoEvalColumn.model_type.name] = model_type.value.name
-            model_data[AutoEvalColumn.model_type_symbol.name] = model_type.value.symbol  # + ("🔺" if is_delta else "")
-            model_data[AutoEvalColumn.license.name] = request.get("license", "?")
-            model_data[AutoEvalColumn.likes.name] = request.get("likes", 0)
-            model_data[AutoEvalColumn.params.name] = request.get("params", 0)
-        except Exception:
-            print(f"Could not find request file for {model_data['model_name_for_query']}")
-
-            if model_data["model_name_for_query"] in MODEL_TYPE_METADATA:
-                model_data[AutoEvalColumn.model_type.name] = MODEL_TYPE_METADATA[
-                    model_data["model_name_for_query"]
-                ].value.name
-                model_data[AutoEvalColumn.model_type_symbol.name] = MODEL_TYPE_METADATA[
-                    model_data["model_name_for_query"]
-                ].value.symbol  # + ("🔺" if is_delta else "")
-            else:
-                model_data[AutoEvalColumn.model_type.name] = ModelType.Unknown.value.name
-                model_data[AutoEvalColumn.model_type_symbol.name] = ModelType.Unknown.value.symbol
-
-            # if we cannot find a request file, set license and likes to unknown
-            model_data[AutoEvalColumn.license.name] =  "?"
-            model_data[AutoEvalColumn.likes.name] = 0
-            model_data[AutoEvalColumn.params.name] =  0
-
-
-def flag_models(leaderboard_data: List[dict]):
-    for model_data in leaderboard_data:
-        if model_data["model_name_for_query"] in FLAGGED_MODELS:
-            issue_num = FLAGGED_MODELS[model_data["model_name_for_query"]].split("/")[-1]
-            issue_link = model_hyperlink(
-                FLAGGED_MODELS[model_data["model_name_for_query"]],
-                f"See discussion #{issue_num}",
-            )
-            model_data[
-                AutoEvalColumn.model.name
-            ] = f"{model_data[AutoEvalColumn.model.name]} has been flagged! {issue_link}"
-
-
-def remove_forbidden_models(leaderboard_data: List[dict]):
-    indices_to_remove = []
-    for ix, model in enumerate(leaderboard_data):
-        if model["model_name_for_query"] in DO_NOT_SUBMIT_MODELS:
-            indices_to_remove.append(ix)
-
-    for ix in reversed(indices_to_remove):
-        leaderboard_data.pop(ix)
-    return leaderboard_data
-
-
-def apply_metadata(leaderboard_data: List[dict]):
-    leaderboard_data = remove_forbidden_models(leaderboard_data)
-    get_model_metadata(leaderboard_data)
-    flag_models(leaderboard_data)
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/lightweight_convolution.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/lightweight_convolution.py
deleted file mode 100644
index ec11a9507951c9e8f3564753841dd9c74a4900e0..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/lightweight_convolution.py
+++ /dev/null
@@ -1,310 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.incremental_decoding_utils import with_incremental_state
-from fairseq.modules.fairseq_dropout import FairseqDropout
-from fairseq.modules.unfold import unfold1d
-
-
-def LightweightConv(
-    input_size,
-    kernel_size=1,
-    padding_l=None,
-    num_heads=1,
-    weight_dropout=0.0,
-    weight_softmax=False,
-    bias=False,
-):
-    if torch.cuda.is_available():
-        try:
-            from fairseq.modules.lightconv_layer import LightconvLayer
-
-            return LightconvLayer(
-                input_size,
-                kernel_size=kernel_size,
-                padding_l=padding_l,
-                num_heads=num_heads,
-                weight_dropout=weight_dropout,
-                weight_softmax=weight_softmax,
-                bias=bias,
-            )
-        except ImportError as e:
-            print(e)
-    return LightweightConv1dTBC(
-        input_size,
-        kernel_size=kernel_size,
-        padding_l=padding_l,
-        num_heads=num_heads,
-        weight_dropout=weight_dropout,
-        weight_softmax=weight_softmax,
-        bias=bias,
-    )
-
-
-class LightweightConv1d(nn.Module):
-    """Lightweight Convolution assuming the input is BxCxT
-    This is just an example that explains LightConv clearer than the TBC version.
-    We don't use this module in the model.
-
-    Args:
-        input_size: # of channels of the input and output
-        kernel_size: convolution channels
-        padding: padding
-        num_heads: number of heads used. The weight is of shape
-            `(num_heads, 1, kernel_size)`
-        weight_softmax: normalize the weight with softmax before the convolution
-
-    Shape:
-        Input: BxCxT, i.e. (batch_size, input_size, timesteps)
-        Output: BxCxT, i.e. (batch_size, input_size, timesteps)
-
-    Attributes:
-        weight: the learnable weights of the module of shape
-            `(num_heads, 1, kernel_size)`
-        bias: the learnable bias of the module of shape `(input_size)`
-    """
-
-    def __init__(
-        self,
-        input_size,
-        kernel_size=1,
-        padding=0,
-        num_heads=1,
-        weight_softmax=False,
-        bias=False,
-        weight_dropout=0.0,
-    ):
-        super().__init__()
-        self.input_size = input_size
-        self.kernel_size = kernel_size
-        self.num_heads = num_heads
-        self.padding = padding
-        self.weight_softmax = weight_softmax
-        self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size))
-
-        if bias:
-            self.bias = nn.Parameter(torch.Tensor(input_size))
-        else:
-            self.bias = None
-        self.weight_dropout_module = FairseqDropout(
-            weight_dropout, module_name=self.__class__.__name__
-        )
-        self.reset_parameters()
-
-    def reset_parameters(self):
-        nn.init.xavier_uniform_(self.weight)
-        if self.bias is not None:
-            nn.init.constant_(self.bias, 0.0)
-
-    def forward(self, input):
-        """
-        input size: B x C x T
-        output size: B x C x T
-        """
-        B, C, T = input.size()
-        H = self.num_heads
-
-        weight = self.weight
-        if self.weight_softmax:
-            weight = F.softmax(weight, dim=-1)
-
-        weight = self.weight_dropout_module(weight)
-        # Merge every C/H entries into the batch dimension (C = self.input_size)
-        # B x C x T -> (B * C/H) x H x T
-        # One can also expand the weight to C x 1 x K by a factor of C/H
-        # and do not reshape the input instead, which is slow though
-        input = input.view(-1, H, T)
-        output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads)
-        output = output.view(B, C, T)
-        if self.bias is not None:
-            output = output + self.bias.view(1, -1, 1)
-
-        return output
-
-
-@with_incremental_state
-class LightweightConv1dTBC(nn.Module):
-    """Lightweight Convolution assuming the input is TxBxC
-    Args:
-        input_size: # of channels of the input
-        kernel_size: convolution channels
-        padding_l: padding to the left when using "same" padding
-        num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size)
-        weight_dropout: the drop rate of the DropConnect to drop the weight
-        weight_softmax: normalize the weight with softmax before the convolution
-        bias: use bias
-
-    Shape:
-        Input: TxBxC, i.e. (timesteps, batch_size, input_size)
-        Output: TxBxC, i.e. (timesteps, batch_size, input_size)
-
-    Attributes:
-        weight: the learnable weights of the module of shape
-            `(num_heads, 1, kernel_size)`
-        bias:   the learnable bias of the module of shape `(input_size)`
-    """
-
-    def __init__(
-        self,
-        input_size,
-        kernel_size=1,
-        padding_l=None,
-        num_heads=1,
-        weight_dropout=0.0,
-        weight_softmax=False,
-        bias=False,
-    ):
-        super().__init__()
-        self.input_size = input_size
-        self.kernel_size = kernel_size
-        self.padding_l = padding_l
-        self.num_heads = num_heads
-        self.weight_dropout_module = FairseqDropout(
-            weight_dropout, module_name=self.__class__.__name__
-        )
-        self.weight_softmax = weight_softmax
-
-        self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size))
-        if bias:
-            self.bias = nn.Parameter(torch.Tensor(input_size))
-        else:
-            self.bias = None
-
-        self.reset_parameters()
-        self.onnx_trace = False
-
-    def reset_parameters(self):
-        nn.init.xavier_uniform_(self.weight)
-        if self.bias is not None:
-            nn.init.constant_(self.bias, 0.0)
-
-    def forward(self, x, incremental_state=None, unfold=False):
-        """Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C
-        args:
-            x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size)
-            incremental_state: A dict to keep the state
-            unfold: unfold the input or not. If not, we use the matrix trick instead
-        """
-        unfold = unfold or (incremental_state is not None)
-
-        if unfold:
-            output = self._forward_unfolded(x, incremental_state)
-        else:
-            output = self._forward_expanded(x, incremental_state)
-
-        if self.bias is not None:
-            output = output + self.bias.view(1, 1, -1)
-        return output
-
-    def prepare_for_onnx_export_(self):
-        self.onnx_trace = True
-
-    def _forward_unfolded(self, x, incremental_state):
-        """The conventional implementation of convolutions.
-        Unfolding the input by having a window shifting to the right."""
-        T, B, C = x.size()
-        K, H = self.kernel_size, self.num_heads
-        R = C // H
-        assert R * H == C == self.input_size
-
-        weight = self.weight.view(H, K)
-        if incremental_state is not None:
-            input_buffer = self._get_input_buffer(incremental_state)
-            if input_buffer is None:
-                input_buffer = x.new()
-            x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
-            if self.kernel_size > 1:
-                self._set_input_buffer(
-                    incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :]
-                )
-            x_unfold = x_unfold.view(T * B * H, R, -1)
-        else:
-            # unfold the input: T x B x C --> T' x B x C x K
-            x_unfold = unfold1d(x, self.kernel_size, self.padding_l, 0)
-            x_unfold = x_unfold.view(T * B * H, R, K)
-
-        if self.weight_softmax:
-            weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(
-                weight
-            )
-
-        if incremental_state is not None:
-            weight = weight[:, -x_unfold.size(2) :]
-            K = weight.size(1)
-
-        weight = (
-            weight.view(1, H, K).expand(T * B, H, K).contiguous().view(T * B * H, K, 1)
-        )
-
-        weight = self.weight_dropout_module(weight)
-        output = torch.bmm(x_unfold, weight)  # T*B*H x R x 1
-        output = output.view(T, B, C)
-        return output
-
-    def _forward_expanded(self, x, incremental_state):
-        """Turn the convolution filters into band matrices and do matrix multiplication.
-        This is faster when the sequence is short, but less memory efficient.
-        This is not used in the decoder during inference.
-        """
-        T, B, C = x.size()
-        K, H = self.kernel_size, self.num_heads
-        R = C // H
-        assert R * H == C == self.input_size
-
-        weight = self.weight.view(H, K)
-        if self.weight_softmax:
-            weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(
-                weight
-            )
-        weight = weight.view(1, H, K).expand(T * B, H, K).contiguous()
-        weight = weight.view(T, B * H, K).transpose(0, 1)
-
-        x = x.view(T, B * H, R).transpose(0, 1)
-        P = self.padding_l
-        if K > T and P == K - 1:
-            weight = weight.narrow(2, K - T, T)
-            K, P = T, T - 1
-        # turn the convolution filters into band matrices
-        weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False)
-        weight_expanded.as_strided((B * H, T, K), (T * (T + K - 1), T + K, 1)).copy_(
-            weight
-        )
-        weight_expanded = weight_expanded.narrow(2, P, T)
-        weight_expanded = self.weight_dropout_module(weight_expanded)
-
-        output = torch.bmm(weight_expanded, x)
-        output = output.transpose(0, 1).contiguous().view(T, B, C)
-        return output
-
-    def reorder_incremental_state(self, incremental_state, new_order):
-        input_buffer = self._get_input_buffer(incremental_state)
-        if input_buffer is not None:
-            input_buffer = input_buffer.index_select(1, new_order)
-            self._set_input_buffer(incremental_state, input_buffer)
-
-    def _get_input_buffer(self, incremental_state):
-        return utils.get_incremental_state(self, incremental_state, "input_buffer")
-
-    def _set_input_buffer(self, incremental_state, new_buffer):
-        return utils.set_incremental_state(
-            self, incremental_state, "input_buffer", new_buffer
-        )
-
-    def extra_repr(self):
-        s = "{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, bias={}".format(
-            self.input_size,
-            self.kernel_size,
-            self.padding_l,
-            self.num_heads,
-            self.weight_softmax,
-            self.bias is not None,
-        )
-        if self.weight_dropout_module.p > 0.0:
-            s += ", weight_dropout={}".format(self.weight_dropout_module.p)
-        return s
diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_utils.py b/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_utils.py
deleted file mode 100644
index 90ab674e38a40796dd1183ec0ef341159f8f62b4..0000000000000000000000000000000000000000
--- a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_utils.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright 2022 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import importlib
-import os
-from dataclasses import dataclass
-from typing import Any, Dict, Optional, Union
-
-import torch
-
-from ..utils import BaseOutput
-
-
-SCHEDULER_CONFIG_NAME = "scheduler_config.json"
-
-
-@dataclass
-class SchedulerOutput(BaseOutput):
-    """
-    Base class for the scheduler's step function output.
-
-    Args:
-        prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
-            Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
-            denoising loop.
-    """
-
-    prev_sample: torch.FloatTensor
-
-
-class SchedulerMixin:
-    """
-    Mixin containing common functions for the schedulers.
-
-    Class attributes:
-        - **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that
-          `from_config` can be used from a class different than the one used to save the config (should be overridden
-          by parent class).
-    """
-
-    config_name = SCHEDULER_CONFIG_NAME
-    _compatibles = []
-    has_compatibles = True
-
-    @classmethod
-    def from_pretrained(
-        cls,
-        pretrained_model_name_or_path: Dict[str, Any] = None,
-        subfolder: Optional[str] = None,
-        return_unused_kwargs=False,
-        **kwargs,
-    ):
-        r"""
-        Instantiate a Scheduler class from a pre-defined JSON configuration file inside a directory or Hub repo.
-
-        Parameters:
-            pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
-                Can be either:
-
-                    - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an
-                      organization name, like `google/ddpm-celebahq-256`.
-                    - A path to a *directory* containing the schedluer configurations saved using
-                      [`~SchedulerMixin.save_pretrained`], e.g., `./my_model_directory/`.
-            subfolder (`str`, *optional*):
-                In case the relevant files are located inside a subfolder of the model repo (either remote in
-                huggingface.co or downloaded locally), you can specify the folder name here.
-            return_unused_kwargs (`bool`, *optional*, defaults to `False`):
-                Whether kwargs that are not consumed by the Python class should be returned or not.
-            cache_dir (`Union[str, os.PathLike]`, *optional*):
-                Path to a directory in which a downloaded pretrained model configuration should be cached if the
-                standard cache should not be used.
-            force_download (`bool`, *optional*, defaults to `False`):
-                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
-                cached versions if they exist.
-            resume_download (`bool`, *optional*, defaults to `False`):
-                Whether or not to delete incompletely received files. Will attempt to resume the download if such a
-                file exists.
-            proxies (`Dict[str, str]`, *optional*):
-                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
-                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
-            output_loading_info(`bool`, *optional*, defaults to `False`):
-                Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
-            local_files_only(`bool`, *optional*, defaults to `False`):
-                Whether or not to only look at local files (i.e., do not try to download the model).
-            use_auth_token (`str` or *bool*, *optional*):
-                The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
-                when running `transformers-cli login` (stored in `~/.huggingface`).
-            revision (`str`, *optional*, defaults to `"main"`):
-                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
-                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
-                identifier allowed by git.
-
-        <Tip>
-
-         It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
-         models](https://huggingface.co/docs/hub/models-gated#gated-models).
-
-        </Tip>
-
-        <Tip>
-
-        Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to
-        use this method in a firewalled environment.
-
-        </Tip>
-
-        """
-        config, kwargs = cls.load_config(
-            pretrained_model_name_or_path=pretrained_model_name_or_path,
-            subfolder=subfolder,
-            return_unused_kwargs=True,
-            **kwargs,
-        )
-        return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs)
-
-    def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
-        """
-        Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the
-        [`~SchedulerMixin.from_pretrained`] class method.
-
-        Args:
-            save_directory (`str` or `os.PathLike`):
-                Directory where the configuration JSON file will be saved (will be created if it does not exist).
-        """
-        self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)
-
-    @property
-    def compatibles(self):
-        """
-        Returns all schedulers that are compatible with this scheduler
-
-        Returns:
-            `List[SchedulerMixin]`: List of compatible schedulers
-        """
-        return self._get_compatibles()
-
-    @classmethod
-    def _get_compatibles(cls):
-        compatible_classes_str = list(set([cls.__name__] + cls._compatibles))
-        diffusers_library = importlib.import_module(__name__.split(".")[0])
-        compatible_classes = [
-            getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)
-        ]
-        return compatible_classes
diff --git a/spaces/Jason1112/ML-GUI/gui.py b/spaces/Jason1112/ML-GUI/gui.py
deleted file mode 100644
index 824bcfd57d6cffde8bb00c2cd69a9bd4e3628fae..0000000000000000000000000000000000000000
--- a/spaces/Jason1112/ML-GUI/gui.py
+++ /dev/null
@@ -1,138 +0,0 @@
-import tkinter as tk
-import pickle 
-import dill
-from nltk.tokenize import word_tokenize
-import re
-from nltk.corpus import stopwords
-from nltk.stem.snowball import SnowballStemmer
-import numpy as np
-import pandas as pd
-stop_words = set(stopwords.words('english'))
-stemmer = SnowballStemmer("english")
-
-#load model
-y_vectorizer = dill.load(open("y_vectorizer_file_180k_50.pkl", "rb"))
-vectorizer = dill.load(open("vectorizer_180k_50.pkl", "rb"))
-clf = pickle.load(open("fileSVM50.pkl", "rb"))
-#xử lí title và body của question
-
-def chuan_hoa_title_va_text(title, body):
-    body=re.sub('<code>(.*?)</code>', '', body, flags=re.MULTILINE|re.DOTALL)
-    body = re.sub('<.*?>', ' ', str(body.encode('utf-8')))
-    title=str(title).encode('utf-8')
-    question=str(title)+" "+str(title)+" "+str(title)+" "+ body
-    question=re.sub(r'[^A-Za-z]+',' ',question)
-    words=word_tokenize(str(question.lower()))
-    question=' '.join(str(stemmer.stem(j)) for j in words if j not in stop_words and (len(j)!=1 or j=='c'))
-    return question
-
-#chuyển câu hỏi về TF-IDF để dự đoán
-question = chuan_hoa_title_va_text(title="C# Can't Process.Start a Python script on Ubuntu with Unity", body="for my project I need to create a TTS voice using a library that only has Python support. I already have a functioning Python script, but for whatever reason when I run my C# script in Unity it just skips right over the code for running the script. I don't get any errors from it, it just doesn't do anything with the script. Here's my current code: Process.Start(@/usr/bin/python3.10, /full/path/to/script.py); It doesn't seem to matter if I enable UseShellExecute or not, or use a ProcessStartInfo with it instead, and same thing with setting the working directory. I also triple checked that my Python script isn't throwing errors.")
-df222 = pd.DataFrame({"question": [question]})
-sample_tfidf = vectorizer.transform(df222['question'])
-
-sample_pred = clf.predict(sample_tfidf)
-
-
-'''for Logistic method'''
-# sample_pred = clf.predict_proba(sample_tfidf)
-# Sắp xếp xác suất theo thứ tự giảm dần
-# sorted_indices = np.argsort(sample_pred, axis=1)[:, ::-1]
-# # Lấy tên nhãn tương ứng từ mô hình
-# labels = y_vectorizer.get_feature_names_out()
-# # Lấy tên nhãn theo xác suất đã sắp xếp
-# sorted_labels = labels[sorted_indices]
-
-
-
-
-
-
-import tkinter as tk
-
-def process_question_with_logistic():
-    title = title_entry.get()
-    question = question_entry.get()
-    question = chuan_hoa_title_va_text(title, question)
-    df222 = pd.DataFrame({"question": [question]})
-    sample_tfidf = vectorizer.transform(df222['question'])
-    
-    # Perform some processing on the title and question
-    # Replace this with your own processing logic
-    
-    # Example processing: Concatenate title and question
-    sample_pred = clf.predict_proba(sample_tfidf)
-    # Sắp xếp xác suất theo thứ tự giảm dần
-    sorted_indices = np.argsort(sample_pred, axis=1)[:, ::-1]
-    # Lấy tên nhãn tương ứng từ mô hình
-    labels = y_vectorizer.get_feature_names_out()
-    # Lấy tên nhãn theo xác suất đã sắp xếp
-    sorted_labels = labels[sorted_indices]
-    output = ""
-    for j in range(0, 6):
-        print(f"Nhãn: {sorted_labels[0][j]}, Xác suất: {sample_pred[0][sorted_indices[0][j]]}")
-        if sorted_labels[0][j] != "selenium-chromedriver":
-            output += f"Nhãn: {sorted_labels[0][j]}, Xác suất: {sample_pred[0][sorted_indices[0][j]]}" + "\n"
-    
-    output_label.config(text=output)
-
-def process_question():
-    title = title_entry.get()
-    question = question_entry.get()
-    print("tieu de cau hoi: ", title)
-    print("-----------------------------")
-    print("cau hoi", question)
-    question = chuan_hoa_title_va_text(title, question)
-    print("-----------------------------")
-    print("question sau chuan hoa: ", question)
-    df222 = pd.DataFrame({"question": [question]})
-    sample_tfidf = vectorizer.transform(df222['question'])
-    print("-----------------------------")
-    print("cau hoi sau khi ma hoa: ", sample_tfidf)
-    # Perform some processing on the title and question
-    # Replace this with your own processing logic
-    
-    # Example processing: Concatenate title and question
-    sample_pred = clf.predict(sample_tfidf)
-    # Sắp xếp xác suất theo thứ tự giảm dần
-    # sorted_indices = np.argsort(sample_pred, axis=1)[:, ::-1]
-    # Lấy tên nhãn tương ứng từ mô hình
-    labels = y_vectorizer.get_feature_names_out()
-    # Lấy tên nhãn theo xác suất đã sắp xếp
-    # sorted_labels = labels[sorted_indices]
-    output = ""
-    print(y_vectorizer.inverse_transform(sample_pred))
-    # for j in range(len(sample_pred)):
-    #     # print(f"Nhãn: {sorted_labels[0][j]}, Xác suất: {sample_pred[0][sorted_indices[0][j]]}")
-    #     # if sorted_labels[0][j] != "selenium-chromedriver":
-    #         output += f"Nhãn: {y_vectorizer.inverse_transform(sample_pred)}" + "\n"
-    
-    output_label.config(text=y_vectorizer.inverse_transform(sample_pred))
-# Create the main window
-window = tk.Tk()
-window.title("Simple GUI Example")
-
-# Create the title label and entry field
-title_label = tk.Label(window, text="Title:")
-title_label.pack()
-
-title_entry = tk.Entry(window)
-title_entry.pack()
-
-# Create the question label and entry field
-question_label = tk.Label(window, text="Question:")
-question_label.pack()
-
-question_entry = tk.Entry(window)
-question_entry.pack()
-
-# Create the process button
-process_button = tk.Button(window, text="Process", command=process_question)
-process_button.pack()
-
-# Create the output label
-output_label = tk.Label(window, text="")
-output_label.pack()
-
-# Start the main event loop
-window.mainloop()
\ No newline at end of file
diff --git a/spaces/JiaoFa/bert-base-chinese/app.py b/spaces/JiaoFa/bert-base-chinese/app.py
deleted file mode 100644
index 3d9f36ff130c71e3850b7c5662c5967b767ef0ab..0000000000000000000000000000000000000000
--- a/spaces/JiaoFa/bert-base-chinese/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/bert-base-chinese").launch()
\ No newline at end of file
diff --git a/spaces/JohnnyPittt/audio-styling/deepafx_st/callbacks/ckpt.py b/spaces/JohnnyPittt/audio-styling/deepafx_st/callbacks/ckpt.py
deleted file mode 100644
index e01d53e8c10a56c530ef1e8b28f69abc579ce1b1..0000000000000000000000000000000000000000
--- a/spaces/JohnnyPittt/audio-styling/deepafx_st/callbacks/ckpt.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import os
-import sys
-import shutil
-import pytorch_lightning as pl
-
-
-class CopyPretrainedCheckpoints(pl.callbacks.Callback):
-    def __init__(self):
-        super().__init__()
-
-    def on_fit_start(self, trainer, pl_module):
-        """Before training, move the pre-trained checkpoints
-        to the current checkpoint directory.
-
-        """
-        # copy any pre-trained checkpoints to new directory
-        if pl_module.hparams.processor_model == "proxy":
-            pretrained_ckpt_dir = os.path.join(
-                pl_module.logger.experiment.log_dir, "pretrained_checkpoints"
-            )
-            if not os.path.isdir(pretrained_ckpt_dir):
-                os.makedirs(pretrained_ckpt_dir)
-            cp_proxy_ckpts = []
-            for proxy_ckpt in pl_module.hparams.proxy_ckpts:
-                new_ckpt = shutil.copy(
-                    proxy_ckpt,
-                    pretrained_ckpt_dir,
-                )
-                cp_proxy_ckpts.append(new_ckpt)
-                print(f"Moved checkpoint to {new_ckpt}.")
-            # overwrite to the paths in current experiment logs
-            pl_module.hparams.proxy_ckpts = cp_proxy_ckpts
-            print(pl_module.hparams.proxy_ckpts)
diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/utils/argutils.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/utils/argutils.py
deleted file mode 100644
index db41683027173517c910e3b259f8da48207dcb38..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/utils/argutils.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from pathlib import Path
-import numpy as np
-import argparse
-
-_type_priorities = [    # In decreasing order
-    Path,
-    str,
-    int,
-    float,
-    bool,
-]
-
-def _priority(o):
-    p = next((i for i, t in enumerate(_type_priorities) if type(o) is t), None) 
-    if p is not None:
-        return p
-    p = next((i for i, t in enumerate(_type_priorities) if isinstance(o, t)), None) 
-    if p is not None:
-        return p
-    return len(_type_priorities)
-
-def print_args(args: argparse.Namespace, parser=None):
-    args = vars(args)
-    if parser is None:
-        priorities = list(map(_priority, args.values()))
-    else:
-        all_params = [a.dest for g in parser._action_groups for a in g._group_actions ]
-        priority = lambda p: all_params.index(p) if p in all_params else len(all_params)
-        priorities = list(map(priority, args.keys()))
-    
-    pad = max(map(len, args.keys())) + 3
-    indices = np.lexsort((list(args.keys()), priorities))
-    items = list(args.items())
-    
-    print("Arguments:")
-    for i in indices:
-        param, value = items[i]
-        print("    {0}:{1}{2}".format(param, ' ' * (pad - len(param)), value))
-    print("")
-    
\ No newline at end of file
diff --git a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/common.py b/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/common.py
deleted file mode 100644
index feff2e790d709f859da975b2d11e338eb91d943c..0000000000000000000000000000000000000000
--- a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/common.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# 
-#  Copyright (c) 2013-present, Anoop Kunchukuttan
-#  All rights reserved.
-#  
-#  This source code is licensed under the MIT license found in the
-#  LICENSE file in the root directory of this source tree.
-# 
-
-import os
-
-"""
-Path to the Indic NLP Resources directory
-"""
-INDIC_RESOURCES_PATH=''
-
-def init():
-    """
-    Initialize the module. The following actions are performed:
-
-    - Checks of INDIC_RESOURCES_PATH variable is set. If not, checks if it can beb initialized from 
-        INDIC_RESOURCES_PATH environment variable. If that fails, an exception is raised
-    """
-    global INDIC_RESOURCES_PATH 
-    try: 
-        if INDIC_RESOURCES_PATH=='':
-            INDIC_RESOURCES_PATH=os.environ['INDIC_RESOURCES_PATH']
-    except Exception as e: 
-        raise IndicNlpException('INDIC_RESOURCES_PATH not set')
-
-    if INDIC_RESOURCES_PATH=='': 
-        raise IndicNlpException('INDIC_RESOURCES_PATH not set')
-
-
-
-def get_resources_path(): 
-    """
-        Get the path to the Indic NLP Resources directory
-    """
-    return INDIC_RESOURCES_PATH
-
-def set_resources_path(resources_path): 
-    """
-        Set the path to the Indic NLP Resources directory
-    """
-    global INDIC_RESOURCES_PATH 
-    INDIC_RESOURCES_PATH=resources_path
-
-class IndicNlpException(Exception):
-    """
-        Exceptions thrown by Indic NLP Library components are instances of this class.  
-        'msg' attribute contains exception details.
-    """
-    def __init__(self, msg):
-        self.msg = msg 
-
-    def __str__(self):
-        return repr(self.msg)
-
diff --git a/spaces/KushJaggi/YOLOv8/README.md b/spaces/KushJaggi/YOLOv8/README.md
deleted file mode 100644
index 10f91242b875b3e5850807f1a8acfc52c34ef5dd..0000000000000000000000000000000000000000
--- a/spaces/KushJaggi/YOLOv8/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: 📷 Object Recognition- YOLOv8
-emoji: 🚀
-colorFrom: purple
-colorTo: blue
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/Laihiujin/OneFormer/oneformer/modeling/transformer_decoder/transformer.py b/spaces/Laihiujin/OneFormer/oneformer/modeling/transformer_decoder/transformer.py
deleted file mode 100644
index cd07525673b9b1165e1fdd0c9990a8f29c84f199..0000000000000000000000000000000000000000
--- a/spaces/Laihiujin/OneFormer/oneformer/modeling/transformer_decoder/transformer.py
+++ /dev/null
@@ -1,376 +0,0 @@
-# ------------------------------------------------------------------------------
-# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/transformer_decoder/transformer.py
-# Modified by Jitesh Jain (https://github.com/praeclarumjj3)
-# ------------------------------------------------------------------------------
-
-"""
-Transformer class.
-
-Copy-paste from torch.nn.Transformer with modifications:
-    * positional encodings are passed in MHattention
-    * extra LN at the end of encoder is removed
-    * decoder returns a stack of activations from all decoding layers
-"""
-import copy
-from typing import List, Optional
-
-import torch
-import torch.nn.functional as F
-from torch import Tensor, nn
-
-
-class Transformer(nn.Module):
-    def __init__(
-        self,
-        d_model=512,
-        nhead=8,
-        num_encoder_layers=6,
-        num_decoder_layers=6,
-        dim_feedforward=2048,
-        dropout=0.1,
-        activation="relu",
-        normalize_before=False,
-        return_intermediate_dec=False,
-    ):
-        super().__init__()
-
-        encoder_layer = TransformerEncoderLayer(
-            d_model, nhead, dim_feedforward, dropout, activation, normalize_before
-        )
-        encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
-        self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
-
-        decoder_layer = TransformerDecoderLayer(
-            d_model, nhead, dim_feedforward, dropout, activation, normalize_before
-        )
-        decoder_norm = nn.LayerNorm(d_model)
-        self.decoder = TransformerDecoder(
-            decoder_layer,
-            num_decoder_layers,
-            decoder_norm,
-            return_intermediate=return_intermediate_dec,
-        )
-
-        self._reset_parameters()
-
-        self.d_model = d_model
-        self.nhead = nhead
-
-    def _reset_parameters(self):
-        for p in self.parameters():
-            if p.dim() > 1:
-                nn.init.xavier_uniform_(p)
-
-    def forward(self, src, mask, query_embed, pos_embed, task_token=None):
-        # flatten NxCxHxW to HWxNxC
-        bs, c, h, w = src.shape
-        src = src.flatten(2).permute(2, 0, 1)
-        pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
-        query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
-        if mask is not None:
-            mask = mask.flatten(1)
-            
-        if task_token is None:
-            tgt = torch.zeros_like(query_embed)
-        else:
-            tgt = task_token.repeat(query_embed.shape[0], 1, 1)
-   
-        memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
-        hs = self.decoder(
-            tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed
-        )
-        return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)
-
-
-class TransformerEncoder(nn.Module):
-    def __init__(self, encoder_layer, num_layers, norm=None):
-        super().__init__()
-        self.layers = _get_clones(encoder_layer, num_layers)
-        self.num_layers = num_layers
-        self.norm = norm
-
-    def forward(
-        self,
-        src,
-        mask: Optional[Tensor] = None,
-        src_key_padding_mask: Optional[Tensor] = None,
-        pos: Optional[Tensor] = None,
-    ):
-        output = src
-
-        for layer in self.layers:
-            output = layer(
-                output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos
-            )
-
-        if self.norm is not None:
-            output = self.norm(output)
-
-        return output
-
-
-class TransformerDecoder(nn.Module):
-    def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
-        super().__init__()
-        self.layers = _get_clones(decoder_layer, num_layers)
-        self.num_layers = num_layers
-        self.norm = norm
-        self.return_intermediate = return_intermediate
-
-    def forward(
-        self,
-        tgt,
-        memory,
-        tgt_mask: Optional[Tensor] = None,
-        memory_mask: Optional[Tensor] = None,
-        tgt_key_padding_mask: Optional[Tensor] = None,
-        memory_key_padding_mask: Optional[Tensor] = None,
-        pos: Optional[Tensor] = None,
-        query_pos: Optional[Tensor] = None,
-    ):
-        output = tgt
-
-        intermediate = []
-
-        for layer in self.layers:
-            output = layer(
-                output,
-                memory,
-                tgt_mask=tgt_mask,
-                memory_mask=memory_mask,
-                tgt_key_padding_mask=tgt_key_padding_mask,
-                memory_key_padding_mask=memory_key_padding_mask,
-                pos=pos,
-                query_pos=query_pos,
-            )
-            if self.return_intermediate:
-                intermediate.append(self.norm(output))
-
-        if self.norm is not None:
-            output = self.norm(output)
-            if self.return_intermediate:
-                intermediate.pop()
-                intermediate.append(output)
-
-        if self.return_intermediate:
-            return torch.stack(intermediate)
-
-        return output.unsqueeze(0)
-
-
-class TransformerEncoderLayer(nn.Module):
-    def __init__(
-        self,
-        d_model,
-        nhead,
-        dim_feedforward=2048,
-        dropout=0.1,
-        activation="relu",
-        normalize_before=False,
-    ):
-        super().__init__()
-        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
-        # Implementation of Feedforward model
-        self.linear1 = nn.Linear(d_model, dim_feedforward)
-        self.dropout = nn.Dropout(dropout)
-        self.linear2 = nn.Linear(dim_feedforward, d_model)
-
-        self.norm1 = nn.LayerNorm(d_model)
-        self.norm2 = nn.LayerNorm(d_model)
-        self.dropout1 = nn.Dropout(dropout)
-        self.dropout2 = nn.Dropout(dropout)
-
-        self.activation = _get_activation_fn(activation)
-        self.normalize_before = normalize_before
-
-    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
-        return tensor if pos is None else tensor + pos
-
-    def forward_post(
-        self,
-        src,
-        src_mask: Optional[Tensor] = None,
-        src_key_padding_mask: Optional[Tensor] = None,
-        pos: Optional[Tensor] = None,
-    ):
-        q = k = self.with_pos_embed(src, pos)
-        src2 = self.self_attn(
-            q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
-        )[0]
-        src = src + self.dropout1(src2)
-        src = self.norm1(src)
-        src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
-        src = src + self.dropout2(src2)
-        src = self.norm2(src)
-        return src
-
-    def forward_pre(
-        self,
-        src,
-        src_mask: Optional[Tensor] = None,
-        src_key_padding_mask: Optional[Tensor] = None,
-        pos: Optional[Tensor] = None,
-    ):
-        src2 = self.norm1(src)
-        q = k = self.with_pos_embed(src2, pos)
-        src2 = self.self_attn(
-            q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
-        )[0]
-        src = src + self.dropout1(src2)
-        src2 = self.norm2(src)
-        src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
-        src = src + self.dropout2(src2)
-        return src
-
-    def forward(
-        self,
-        src,
-        src_mask: Optional[Tensor] = None,
-        src_key_padding_mask: Optional[Tensor] = None,
-        pos: Optional[Tensor] = None,
-    ):
-        if self.normalize_before:
-            return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
-        return self.forward_post(src, src_mask, src_key_padding_mask, pos)
-
-
-class TransformerDecoderLayer(nn.Module):
-    def __init__(
-        self,
-        d_model,
-        nhead,
-        dim_feedforward=2048,
-        dropout=0.1,
-        activation="relu",
-        normalize_before=False,
-    ):
-        super().__init__()
-        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
-        self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
-        # Implementation of Feedforward model
-        self.linear1 = nn.Linear(d_model, dim_feedforward)
-        self.dropout = nn.Dropout(dropout)
-        self.linear2 = nn.Linear(dim_feedforward, d_model)
-
-        self.norm1 = nn.LayerNorm(d_model)
-        self.norm2 = nn.LayerNorm(d_model)
-        self.norm3 = nn.LayerNorm(d_model)
-        self.dropout1 = nn.Dropout(dropout)
-        self.dropout2 = nn.Dropout(dropout)
-        self.dropout3 = nn.Dropout(dropout)
-
-        self.activation = _get_activation_fn(activation)
-        self.normalize_before = normalize_before
-
-    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
-        return tensor if pos is None else tensor + pos
-
-    def forward_post(
-        self,
-        tgt,
-        memory,
-        tgt_mask: Optional[Tensor] = None,
-        memory_mask: Optional[Tensor] = None,
-        tgt_key_padding_mask: Optional[Tensor] = None,
-        memory_key_padding_mask: Optional[Tensor] = None,
-        pos: Optional[Tensor] = None,
-        query_pos: Optional[Tensor] = None,
-    ):
-        q = k = self.with_pos_embed(tgt, query_pos)
-        tgt2 = self.self_attn(
-            q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
-        )[0]
-        tgt = tgt + self.dropout1(tgt2)
-        tgt = self.norm1(tgt)
-        tgt2 = self.multihead_attn(
-            query=self.with_pos_embed(tgt, query_pos),
-            key=self.with_pos_embed(memory, pos),
-            value=memory,
-            attn_mask=memory_mask,
-            key_padding_mask=memory_key_padding_mask,
-        )[0]
-        tgt = tgt + self.dropout2(tgt2)
-        tgt = self.norm2(tgt)
-        tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
-        tgt = tgt + self.dropout3(tgt2)
-        tgt = self.norm3(tgt)
-        return tgt
-
-    def forward_pre(
-        self,
-        tgt,
-        memory,
-        tgt_mask: Optional[Tensor] = None,
-        memory_mask: Optional[Tensor] = None,
-        tgt_key_padding_mask: Optional[Tensor] = None,
-        memory_key_padding_mask: Optional[Tensor] = None,
-        pos: Optional[Tensor] = None,
-        query_pos: Optional[Tensor] = None,
-    ):
-        tgt2 = self.norm1(tgt)
-        q = k = self.with_pos_embed(tgt2, query_pos)
-        tgt2 = self.self_attn(
-            q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
-        )[0]
-        tgt = tgt + self.dropout1(tgt2)
-        tgt2 = self.norm2(tgt)
-        tgt2 = self.multihead_attn(
-            query=self.with_pos_embed(tgt2, query_pos),
-            key=self.with_pos_embed(memory, pos),
-            value=memory,
-            attn_mask=memory_mask,
-            key_padding_mask=memory_key_padding_mask,
-        )[0]
-        tgt = tgt + self.dropout2(tgt2)
-        tgt2 = self.norm3(tgt)
-        tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
-        tgt = tgt + self.dropout3(tgt2)
-        return tgt
-
-    def forward(
-        self,
-        tgt,
-        memory,
-        tgt_mask: Optional[Tensor] = None,
-        memory_mask: Optional[Tensor] = None,
-        tgt_key_padding_mask: Optional[Tensor] = None,
-        memory_key_padding_mask: Optional[Tensor] = None,
-        pos: Optional[Tensor] = None,
-        query_pos: Optional[Tensor] = None,
-    ):
-        if self.normalize_before:
-            return self.forward_pre(
-                tgt,
-                memory,
-                tgt_mask,
-                memory_mask,
-                tgt_key_padding_mask,
-                memory_key_padding_mask,
-                pos,
-                query_pos,
-            )
-        return self.forward_post(
-            tgt,
-            memory,
-            tgt_mask,
-            memory_mask,
-            tgt_key_padding_mask,
-            memory_key_padding_mask,
-            pos,
-            query_pos,
-        )
-
-
-def _get_clones(module, N):
-    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
-
-
-def _get_activation_fn(activation):
-    """Return an activation function given a string"""
-    if activation == "relu":
-        return F.relu
-    if activation == "gelu":
-        return F.gelu
-    if activation == "glu":
-        return F.glu
-    raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
diff --git a/spaces/Legal-ease/legal-ease/app.py b/spaces/Legal-ease/legal-ease/app.py
deleted file mode 100644
index 16f3344792ca52344678f8ec034c31c821c08228..0000000000000000000000000000000000000000
--- a/spaces/Legal-ease/legal-ease/app.py
+++ /dev/null
@@ -1,395 +0,0 @@
-from __future__ import annotations
-from typing import Iterable
-import gradio as gr
-from base.legal_document_utils import (
-    summarize,
-    question_answer,
-    load_gpl_license,
-    load_pokemon_license,
-)
-from base.document_search import cross_lingual_document_search, translate_search_result
-from gradio.themes.base import Base
-from gradio.themes import Soft
-from gradio.themes.utils import colors, fonts, sizes
-
-
-class CustomTheme(Base):
-    def __init__(
-        self,
-        *,
-        primary_hue: colors.Color | str = colors.blue,
-        secondary_hue: colors.Color | str = colors.cyan,
-        neutral_hue: colors.Color | str = colors.zinc,
-        spacing_size: sizes.Size | str = sizes.spacing_md,
-        radius_size: sizes.Size | str = sizes.radius_md,
-        text_size: sizes.Size | str = sizes.text_md,
-    ):
-        super().__init__(
-            primary_hue=primary_hue,
-            secondary_hue=secondary_hue,
-            neutral_hue=neutral_hue,
-            spacing_size=spacing_size,
-            radius_size=radius_size,
-            text_size=text_size,
-        )
-        self.name = "custom_theme"
-        super().set(
-            # Colors
-            background_fill_primary="*neutral_50",
-            slider_color="*primary_500",
-            slider_color_dark="*primary_600",
-            # Shadows
-            shadow_drop="0 1px 4px 0 rgb(0 0 0 / 0.1)",
-            shadow_drop_lg="0 2px 5px 0 rgb(0 0 0 / 0.1)",
-            # Block Labels
-            block_background_fill="white",
-            block_label_padding="*spacing_sm *spacing_md",
-            block_label_background_fill="*primary_100",
-            block_label_background_fill_dark="*primary_600",
-            block_label_radius="*radius_md",
-            block_label_text_size="*text_md",
-            block_label_text_weight="600",
-            block_label_text_color="*primary_500",
-            block_label_text_color_dark="*white",
-            block_title_radius="*block_label_radius",
-            block_title_padding="*block_label_padding",
-            block_title_background_fill="*block_label_background_fill",
-            block_title_text_weight="600",
-            block_title_text_color="*primary_500",
-            block_title_text_color_dark="*white",
-            block_label_margin="*spacing_md",
-            block_shadow="*shadow_drop_lg",
-            # Inputs
-            input_border_color="*neutral_50",
-            input_shadow="*shadow_drop",
-            input_shadow_focus="*shadow_drop_lg",
-            checkbox_shadow="none",
-            # Buttons
-            shadow_spread="6px",
-            button_shadow="*shadow_drop_lg",
-            button_shadow_hover="*shadow_drop_lg",
-            button_shadow_active="*shadow_inset",
-            button_primary_background_fill="linear-gradient(90deg, *primary_300, *secondary_400)",
-            button_primary_background_fill_hover="linear-gradient(90deg, *primary_200, *secondary_300)",
-            button_primary_text_color="white",
-            button_primary_background_fill_dark="linear-gradient(90deg, *primary_600, *secondary_800)",
-            button_primary_background_fill_hover_dark="*primary_500",
-            button_secondary_background_fill="white",
-            button_secondary_background_fill_hover="*neutral_100",
-            button_secondary_background_fill_hover_dark="*primary_500",
-            button_secondary_text_color="*neutral_800",
-            button_cancel_background_fill="*button_secondary_background_fill",
-            button_cancel_background_fill_hover="*button_secondary_background_fill_hover",
-            button_cancel_background_fill_hover_dark="*button_secondary_background_fill_hover",
-            button_cancel_text_color="*button_secondary_text_color",
-            #checkboxes
-            checkbox_label_shadow="*shadow_drop_lg",
-            checkbox_label_background_fill_selected="*primary_500",
-            checkbox_label_background_fill_selected_dark="*primary_600",
-            checkbox_border_width="1px",
-            checkbox_border_color="*neutral_100",
-            checkbox_border_color_dark="*neutral_600",
-            checkbox_background_color_selected="*primary_600",
-            checkbox_background_color_selected_dark="*primary_700",
-            checkbox_border_color_focus="*primary_500",
-            checkbox_border_color_focus_dark="*primary_600",
-            checkbox_border_color_selected="*primary_600",
-            checkbox_border_color_selected_dark="*primary_700",
-            checkbox_label_text_color_selected="white",
-            # Borders
-            block_border_width="0px",
-            panel_border_width="1px",
-        )
-
-
-custom_theme = CustomTheme()
-
-
-max_search_results = 3
-
-
-def reset_chatbot():
-    return gr.update(value="")
-
-
-def get_user_input(input_question, history):
-    return "", history + [[input_question, None]]
-
-
-def legal_doc_qa_bot(input_document, history):
-    bot_message = question_answer(input_document, history)
-    history[-1][1] = bot_message
-    return history
-
-
-with gr.Blocks(theme=custom_theme) as demo:
-    gr.HTML(
-        """<html><center><img src='file/logo/flc_design4.png', alt='Legal-ease logo', width=250, height=250 /></center><br></html>"""
-    )
-
-    qa_bot_state = gr.State(value=[])
-
-    with gr.Tabs():
-        with gr.TabItem("Q&A"):
-            gr.HTML(
-                """<p style="text-align:center;"><b>Legal documents can be difficult to comprehend and understand. Add a legal document below and ask any questions related to it.</p>"""
-            )
-
-            with gr.Row():
-                with gr.Column():
-                    input_document = gr.Text(label="Copy your document here", lines=10)
-
-                with gr.Column():
-                    chatbot = gr.Chatbot(label="Chat History")
-                    input_question = gr.Text(
-                        label="Ask a question",
-                        placeholder="Type a question here and hit enter.",
-                    )
-                    clear = gr.Button("Clear", variant="primary")
-
-            with gr.Row():
-                with gr.Accordion("Show example inputs I can load:", open=False):
-                    example_1 = gr.Button(
-                        "Load GPL License Document", variant="primary"
-                    )
-                    example_2 = gr.Button(
-                        "Load Pokemon Go Terms of Service", variant="primary"
-                    )
-
-        with gr.TabItem("Summarize"):
-            gr.HTML(
-                """<p style="text-align:center;"><b>Legal documents can be very lengthy. Add a legal document below and generate a quick summary for it.</p>"""
-            )
-
-            with gr.Row():
-                with gr.Column():
-                    summary_input = gr.Text(label="Document", lines=10)
-                    generate_summary = gr.Button("Generate Summary", variant="primary")
-
-                with gr.Column():
-                    summary_output = gr.Text(label="Summary", lines=10)
-                    invisible_comp = gr.Text(label="Dummy Component", visible=False)
-
-            with gr.Row():
-                with gr.Accordion("Advanced Settings:", open=False):
-                    summary_length = gr.Radio(
-                        ["short", "medium", "long"],
-                        label="Summary Length",
-                        value="long",
-                    )
-                    summary_format = gr.Radio(
-                        ["paragraph", "bullets"],
-                        label="Summary Format",
-                        value="bullets",
-                    )
-                    extractiveness = gr.Radio(
-                        ["low", "medium", "high"],
-                        label="Extractiveness",
-                        info="Controls how close to the original text the summary is.",
-                        visible=False,
-                        value="high",
-                    )
-                    temperature = gr.Slider(
-                        minimum=0,
-                        maximum=5.0,
-                        value=0.64,
-                        step=0.1,
-                        interactive=True,
-                        visible=False,
-                        label="Temperature",
-                        info="Controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output.",
-                    )
-
-            with gr.Row():
-                with gr.Accordion("Show example inputs I can load:", open=False):
-                    example_3 = gr.Button(
-                        "Load GPL License Document", variant="primary"
-                    )
-                    example_4 = gr.Button(
-                        "Load Pokemon Go Terms of Service", variant="primary"
-                    )
-
-        with gr.TabItem("Document Search"):
-            gr.HTML(
-                """<p style="text-align:center;"><b>Search across a set of legal documents in any language or even a mix of languages. Query them using any one of over 100 supported languages.</p>"""
-            )
-            gr.HTML(
-                """<p style="text-align:center; font-style:italic;">Get started with a pre-indexed set of documents from eight European countries (Belgium, France, Hungary, Italy, Netherlands, Norway, Poland, UK) in seven languages, outlining legislation passed during the COVID-19 pandemic.</p>"""
-            )
-
-            with gr.Row():
-                text_match = gr.CheckboxGroup(
-                    ["Full Text Search"], label="find exact text in documents"
-                )
-
-            with gr.Row():
-                lang_choices = gr.CheckboxGroup(
-                    [
-                        "English",
-                        "French",
-                        "Italian",
-                        "Dutch",
-                        "Polish",
-                        "Hungarian",
-                        "Norwegian",
-                    ],
-                    label="Filter results based on language",
-                )
-
-            with gr.Row():
-                with gr.Column():
-                    user_query = gr.Text(
-                        label="Enter query here",
-                        placeholder="Search through all your documents",
-                    )
-
-                    num_search_results = gr.Slider(
-                        1,
-                        max_search_results,
-                        visible=False,
-                        value=max_search_results,
-                        step=1,
-                        interactive=True,
-                        label="How many search results to show:",
-                    )
-
-                    with gr.Row():
-                        with gr.Column():
-                            query_match_out_1 = gr.Textbox(label=f"Search Result 1")
-
-                        with gr.Column():
-                            with gr.Accordion("Translate Search Result", open=False):
-                                translate_1 = gr.Button(
-                                    label="Translate",
-                                    value="Translate",
-                                    variant="primary",
-                                )
-                                translate_res_1 = gr.Textbox(
-                                    label=f"Translation Result 1"
-                                )
-
-                    with gr.Row():
-                        with gr.Column():
-                            query_match_out_2 = gr.Textbox(label=f"Search Result 2")
-
-                        with gr.Column():
-                            with gr.Accordion("Translate Search Result", open=False):
-                                translate_2 = gr.Button(
-                                    label="Translate",
-                                    value="Translate",
-                                    variant="primary",
-                                )
-                                translate_res_2 = gr.Textbox(
-                                    label=f"Translation Result 2"
-                                )
-
-                    with gr.Row():
-                        with gr.Column():
-                            query_match_out_3 = gr.Textbox(label=f"Search Result 3")
-
-                        with gr.Column():
-                            with gr.Accordion("Translate Search Result", open=False):
-                                translate_3 = gr.Button(
-                                    label="Translate",
-                                    value="Translate",
-                                    variant="primary",
-                                )
-                                translate_res_3 = gr.Textbox(
-                                    label=f"Translation Result 3"
-                                )
-
-    # fetch answer for submitted question corresponding to input document
-    input_question.submit(
-        get_user_input,
-        [input_question, chatbot],
-        [input_question, chatbot],
-        queue=False,
-    ).then(legal_doc_qa_bot, [input_document, chatbot], chatbot)
-
-    # reset the chatbot Q&A history when input document changes
-    input_document.change(fn=reset_chatbot, inputs=[], outputs=chatbot)
-
-    # Loading examples on click for Q&A module
-    example_1.click(
-        load_gpl_license,
-        [],
-        [input_document, input_question],
-        queue=False,
-    )
-
-    example_2.click(
-        load_pokemon_license,
-        [],
-        [input_document, input_question],
-        queue=False,
-    )
-
-    # Loading examples on click for Q&A module
-    example_3.click(
-        load_gpl_license,
-        [],
-        [summary_input, invisible_comp],
-        queue=False,
-    )
-
-    example_4.click(
-        load_pokemon_license,
-        [],
-        [summary_input, invisible_comp],
-        queue=False,
-    )
-
-    # generate summary corresponding to document submitted by the user.
-    generate_summary.click(
-        summarize,
-        [summary_input, summary_length, summary_format, extractiveness, temperature],
-        [summary_output],
-        queue=False,
-    )
-
-    # clear the chatbot Q&A history when this button is clicked by the user
-    clear.click(lambda: None, None, chatbot, queue=False)
-
-    # run search as user is typing the query
-    user_query.change(
-        cross_lingual_document_search,
-        [user_query, num_search_results, lang_choices, text_match],
-        [query_match_out_1, query_match_out_2, query_match_out_3],
-        queue=False,
-    )
-
-    # run search if user submits query
-    user_query.submit(
-        cross_lingual_document_search,
-        [user_query, num_search_results, lang_choices, text_match],
-        [query_match_out_1, query_match_out_2, query_match_out_3],
-        queue=False,
-    )
-
-    # translate results corresponding to 1st search result obtained if user clicks 'Translate'
-    translate_1.click(
-        translate_search_result,
-        [query_match_out_1, user_query],
-        [translate_res_1],
-        queue=False,
-    )
-
-    # translate results corresponding to 2nd search result obtained if user clicks 'Translate'
-    translate_2.click(
-        translate_search_result,
-        [query_match_out_2, user_query],
-        [translate_res_2],
-        queue=False,
-    )
-
-    # translate results corresponding to 3rd search result obtained if user clicks 'Translate'
-    translate_3.click(
-        translate_search_result,
-        [query_match_out_3, user_query],
-        [translate_res_3],
-        queue=False,
-    )
-
-
-if __name__ == "__main__":
-    demo.launch(debug=True)
diff --git a/spaces/Letheoricien/demo/app.py b/spaces/Letheoricien/demo/app.py
deleted file mode 100644
index a699bc5b3c2e987102ca93e0ee28d601e0a93d02..0000000000000000000000000000000000000000
--- a/spaces/Letheoricien/demo/app.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import gradio as gr
-
-def greet(name):
-    return "Hello " + name + "!!"
-
-iface = gr.Interface(fn=greet, inputs="text", outputs="text")
-iface.launch()
\ No newline at end of file
diff --git a/spaces/Luelll/ChuanhuChatGPT/modules/webui_locale.py b/spaces/Luelll/ChuanhuChatGPT/modules/webui_locale.py
deleted file mode 100644
index 1ce4d97b9b41cbb2d9be3fdadc4c85f6ef897604..0000000000000000000000000000000000000000
--- a/spaces/Luelll/ChuanhuChatGPT/modules/webui_locale.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import os
-import locale
-import commentjson as json
-
-class I18nAuto:
-    def __init__(self):
-        if os.path.exists("config.json"):
-            with open("config.json", "r", encoding='utf-8') as f:
-                config = json.load(f)
-        else:
-            config = {}
-        lang_config = config.get("language", "auto")
-        language = os.environ.get("LANGUAGE", lang_config)
-        if language == "auto":
-            language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN)
-        self.language_map = {}
-        self.file_is_exists = os.path.isfile(f"./locale/{language}.json")
-        if self.file_is_exists:
-            with open(f"./locale/{language}.json", "r", encoding="utf-8") as f:
-                self.language_map.update(json.load(f))
-
-    def __call__(self, key):
-        if self.file_is_exists and key in self.language_map:
-            return self.language_map[key]
-        else:
-            return key
diff --git a/spaces/LuxOAI/GPT4-30b/README.md b/spaces/LuxOAI/GPT4-30b/README.md
deleted file mode 100644
index dd281af6cf25c45cefcf7d62c41caf44e497143c..0000000000000000000000000000000000000000
--- a/spaces/LuxOAI/GPT4-30b/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: MetaIX GPT4 X Alpasta 30b
-emoji: 📚
-colorFrom: green
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.29.0
-app_file: app.py
-pinned: false
-duplicated_from: wisoner/MetaIX-GPT4-X-Alpasta-30b
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ML701G7/taim-gan/src/models/losses.py b/spaces/ML701G7/taim-gan/src/models/losses.py
deleted file mode 100644
index 6579694ae4e96921a49a53689efad6fc9b6f2bd4..0000000000000000000000000000000000000000
--- a/spaces/ML701G7/taim-gan/src/models/losses.py
+++ /dev/null
@@ -1,344 +0,0 @@
-"""Module containing the loss functions for the GANs."""
-from typing import Any, Dict
-
-import torch
-from torch import nn
-
-# pylint: disable=too-many-arguments
-# pylint: disable=too-many-locals
-
-
-def generator_loss(
-    logits: Dict[str, Dict[str, torch.Tensor]],
-    local_fake_incept_feat: torch.Tensor,
-    global_fake_incept_feat: torch.Tensor,
-    real_labels: torch.Tensor,
-    words_emb: torch.Tensor,
-    sent_emb: torch.Tensor,
-    match_labels: torch.Tensor,
-    cap_lens: torch.Tensor,
-    class_ids: torch.Tensor,
-    real_vgg_feat: torch.Tensor,
-    fake_vgg_feat: torch.Tensor,
-    const_dict: Dict[str, float],
-) -> Any:
-    """Calculate the loss for the generator.
-
-    Args:
-        logits: Dictionary with fake/real and word-level/uncond/cond logits
-
-        local_fake_incept_feat: The local inception features for the fake images.
-
-        global_fake_incept_feat: The global inception features for the fake images.
-
-        real_labels: Label for "real" image as predicted by discriminator,
-        this is a tensor of ones. [shape: (batch_size, 1)].
-
-        word_labels: POS tagged word labels for the captions. [shape: (batch_size, L)]
-
-        words_emb: The embeddings for all the words in the captions.
-        shape: (batch_size, embedding_size, max_caption_length)
-
-        sent_emb: The embeddings for the sentences.
-        shape: (batch_size, embedding_size)
-
-        match_labels: Tensor of shape: (batch_size, 1).
-        This is of the form torch.tensor([0, 1, 2, ..., batch-1])
-
-        cap_lens: The length of the 'actual' captions in the batch [without padding]
-        shape: (batch_size, 1)
-
-        class_ids: The class ids for the instance. shape: (batch_size, 1)
-
-        real_vgg_feat: The vgg features for the real images. shape: (batch_size, 128, 128, 128)
-        fake_vgg_feat: The vgg features for the fake images. shape: (batch_size, 128, 128, 128)
-
-        const_dict: The dictionary containing the constants.
-    """
-    lambda1 = const_dict["lambda1"]
-    total_error_g = 0.0
-
-    cond_logits = logits["fake"]["cond"]
-    cond_err_g = nn.BCEWithLogitsLoss()(cond_logits, real_labels)
-
-    uncond_logits = logits["fake"]["uncond"]
-    uncond_err_g = nn.BCEWithLogitsLoss()(uncond_logits, real_labels)
-
-    # add up the conditional and unconditional losses
-    loss_g = cond_err_g + uncond_err_g
-    total_error_g += loss_g
-
-    # DAMSM Loss from attnGAN.
-    loss_damsm = damsm_loss(
-        local_fake_incept_feat,
-        global_fake_incept_feat,
-        words_emb,
-        sent_emb,
-        match_labels,
-        cap_lens,
-        class_ids,
-        const_dict,
-    )
-
-    total_error_g += loss_damsm
-
-    loss_per = 0.5 * nn.MSELoss()(real_vgg_feat, fake_vgg_feat)  # perceptual loss
-
-    total_error_g += lambda1 * loss_per
-
-    return total_error_g
-
-
-def damsm_loss(
-    local_incept_feat: torch.Tensor,
-    global_incept_feat: torch.Tensor,
-    words_emb: torch.Tensor,
-    sent_emb: torch.Tensor,
-    match_labels: torch.Tensor,
-    cap_lens: torch.Tensor,
-    class_ids: torch.Tensor,
-    const_dict: Dict[str, float],
-) -> Any:
-    """Calculate the DAMSM loss from the attnGAN paper.
-
-    Args:
-        local_incept_feat: The local inception features. [shape: (batch, D, 17, 17)]
-
-        global_incept_feat: The global inception features. [shape: (batch, D)]
-
-        words_emb: The embeddings for all the words in the captions.
-
-        shape: (batch, D, max_caption_length)
-
-        sent_emb: The embeddings for the sentences. shape: (batch_size, D)
-
-        match_labels: Tensor of shape: (batch_size, 1).
-        This is of the form torch.tensor([0, 1, 2, ..., batch-1])
-
-        cap_lens: The length of the 'actual' captions in the batch [without padding]
-        shape: (batch_size, 1)
-
-        class_ids: The class ids for the instance. shape: (batch, 1)
-
-        const_dict: The dictionary containing the constants.
-    """
-    batch_size = match_labels.size(0)
-    # Mask mis-match samples, that come from the same class as the real sample
-    masks = []
-
-    match_scores = []
-    gamma1 = const_dict["gamma1"]
-    gamma2 = const_dict["gamma2"]
-    gamma3 = const_dict["gamma3"]
-    lambda3 = const_dict["lambda3"]
-
-    for i in range(batch_size):
-        mask = (class_ids == class_ids[i]).int()
-        # This ensures that "correct class" index is not included in the mask.
-        mask[i] = 0
-        masks.append(mask.reshape(1, -1))  # shape: (1, batch)
-
-        numb_words = int(cap_lens[i])
-        # shape: (1, D, L), this picks the caption at ith batch index.
-        query_words = words_emb[i, :, :numb_words].unsqueeze(0)
-        # shape: (batch, D, L), this expands the same caption for all batch indices.
-        query_words = query_words.repeat(batch_size, 1, 1)
-
-        c_i = compute_region_context_vector(
-            local_incept_feat, query_words, gamma1
-        )  # Taken from attnGAN paper. shape: (batch, D, L)
-
-        query_words = query_words.transpose(1, 2)  # shape: (batch, L, D)
-        c_i = c_i.transpose(1, 2)  # shape: (batch, L, D)
-        query_words = query_words.reshape(
-            batch_size * numb_words, -1
-        )  # shape: (batch * L, D)
-        c_i = c_i.reshape(batch_size * numb_words, -1)  # shape: (batch * L, D)
-
-        r_i = compute_relevance(
-            c_i, query_words
-        )  # cosine similarity, or R(c_i, e_i) from attnGAN paper. shape: (batch * L, 1)
-        r_i = r_i.view(batch_size, numb_words)  # shape: (batch, L)
-        r_i = torch.exp(r_i * gamma2)  # shape: (batch, L)
-        r_i = r_i.sum(dim=1, keepdim=True)  # shape: (batch, 1)
-        r_i = torch.log(
-            r_i
-        )  # This is image-text matching score b/w whole image and caption, shape: (batch, 1)
-        match_scores.append(r_i)
-
-    masks = torch.cat(masks, dim=0).bool()  # type: ignore
-    match_scores = torch.cat(match_scores, dim=1)  # type: ignore
-
-    # This corresponds to P(D|Q) from attnGAN.
-    match_scores = gamma3 * match_scores  # type: ignore
-    match_scores.data.masked_fill_(  # type: ignore
-        masks, -float("inf")
-    )  # mask out the scores for mis-matched samples
-
-    match_scores_t = match_scores.transpose(  # type: ignore
-        0, 1
-    )  # This corresponds to P(Q|D) from attnGAN.
-
-    # This corresponds to L1_w from attnGAN.
-    l1_w = nn.CrossEntropyLoss()(match_scores, match_labels)
-    # This corresponds to L2_w from attnGAN.
-    l2_w = nn.CrossEntropyLoss()(match_scores_t, match_labels)
-
-    incept_feat_norm = torch.linalg.norm(global_incept_feat, dim=1)
-    sent_emb_norm = torch.linalg.norm(sent_emb, dim=1)
-
-    # shape: (batch, batch)
-    global_match_score = global_incept_feat @ (sent_emb.T)
-
-    global_match_score = (
-        global_match_score / torch.outer(incept_feat_norm, sent_emb_norm)
-    ).clamp(min=1e-8)
-    global_match_score = gamma3 * global_match_score
-
-    # mask out the scores for mis-matched samples
-    global_match_score.data.masked_fill_(masks, -float("inf"))  # type: ignore
-
-    global_match_t = global_match_score.T  # shape: (batch, batch)
-
-    # This corresponds to L1_s from attnGAN.
-    l1_s = nn.CrossEntropyLoss()(global_match_score, match_labels)
-    # This corresponds to L2_s from attnGAN.
-    l2_s = nn.CrossEntropyLoss()(global_match_t, match_labels)
-
-    loss_damsm = lambda3 * (l1_w + l2_w + l1_s + l2_s)
-
-    return loss_damsm
-
-
-def compute_relevance(c_i: torch.Tensor, query_words: torch.Tensor) -> Any:
-    """Computes the cosine similarity between the region context vector and the query words.
-
-    Args:
-        c_i: The region context vector. shape: (batch * L, D)
-        query_words: The query words. shape: (batch * L, D)
-    """
-    prod = c_i * query_words  # shape: (batch * L, D)
-    numr = torch.sum(prod, dim=1)  # shape: (batch * L, 1)
-    norm_c = torch.linalg.norm(c_i, ord=2, dim=1)
-    norm_q = torch.linalg.norm(query_words, ord=2, dim=1)
-    denr = norm_c * norm_q
-    r_i = (numr / denr).clamp(min=1e-8).squeeze()  # shape: (batch * L, 1)
-    return r_i
-
-
-def compute_region_context_vector(
-    local_incept_feat: torch.Tensor, query_words: torch.Tensor, gamma1: float
-) -> Any:
-    """Compute the region context vector (c_i) from attnGAN paper.
-
-    Args:
-        local_incept_feat: The local inception features. [shape: (batch, D, 17, 17)]
-        query_words: The embeddings for all the words in the captions. shape: (batch, D, L)
-        gamma1: The gamma1 value from attnGAN paper.
-    """
-    batch, L = query_words.size(0), query_words.size(2)  # pylint: disable=invalid-name
-
-    feat_height, feat_width = local_incept_feat.size(2), local_incept_feat.size(3)
-    N = feat_height * feat_width  # pylint: disable=invalid-name
-
-    # Reshape the local inception features to (batch, D, N)
-    local_incept_feat = local_incept_feat.view(batch, -1, N)
-    # shape: (batch, N, D)
-    incept_feat_t = local_incept_feat.transpose(1, 2)
-
-    sim_matrix = incept_feat_t @ query_words  # shape: (batch, N, L)
-    sim_matrix = sim_matrix.view(batch * N, L)  # shape: (batch * N, L)
-
-    sim_matrix = nn.Softmax(dim=1)(sim_matrix)  # shape: (batch * N, L)
-    sim_matrix = sim_matrix.view(batch, N, L)  # shape: (batch, N, L)
-
-    sim_matrix = torch.transpose(sim_matrix, 1, 2)  # shape: (batch, L, N)
-    sim_matrix = sim_matrix.reshape(batch * L, N)  # shape: (batch * L, N)
-
-    alpha_j = gamma1 * sim_matrix  # shape: (batch * L, N)
-    alpha_j = nn.Softmax(dim=1)(alpha_j)  # shape: (batch * L, N)
-    alpha_j = alpha_j.view(batch, L, N)  # shape: (batch, L, N)
-    alpha_j_t = torch.transpose(alpha_j, 1, 2)  # shape: (batch, N, L)
-
-    c_i = (
-        local_incept_feat @ alpha_j_t
-    )  # shape: (batch, D, L) [summing over N dimension in paper, so we multiply like this]
-    return c_i
-
-
-def discriminator_loss(
-    logits: Dict[str, Dict[str, torch.Tensor]],
-    labels: Dict[str, Dict[str, torch.Tensor]],
-) -> Any:
-    """
-    Calculate discriminator objective
-
-    :param dict[str, dict[str, torch.Tensor]] logits:
-        Dictionary with fake/real and word-level/uncond/cond logits
-
-        Example:
-
-        logits = {
-            "fake": {
-                "word_level": torch.Tensor (BxL)
-                "uncond": torch.Tensor (Bx1)
-                "cond": torch.Tensor (Bx1)
-            },
-            "real": {
-                "word_level": torch.Tensor (BxL)
-                "uncond": torch.Tensor (Bx1)
-                "cond": torch.Tensor (Bx1)
-            },
-        }
-    :param dict[str, dict[str, torch.Tensor]] labels:
-        Dictionary with fake/real and word-level/image labels
-
-        Example:
-
-        labels = {
-            "fake": {
-                "word_level": torch.Tensor (BxL)
-                "image": torch.Tensor (Bx1)
-            },
-            "real": {
-                "word_level": torch.Tensor (BxL)
-                "image": torch.Tensor (Bx1)
-            },
-        }
-    :param float lambda_4: Hyperparameter for word loss in paper
-    :return: Discriminator objective loss
-    :rtype: Any
-    """
-    # define main loss functions for logit losses
-    tot_loss = 0.0
-    bce_logits = nn.BCEWithLogitsLoss()
-    bce = nn.BCELoss()
-    # calculate word-level loss
-    word_loss = bce(logits["real"]["word_level"], labels["real"]["word_level"])
-    # calculate unconditional adversarial loss
-    uncond_loss = bce_logits(logits["real"]["uncond"], labels["real"]["image"])
-
-    # calculate conditional adversarial loss
-    cond_loss = bce_logits(logits["real"]["cond"], labels["real"]["image"])
-
-    tot_loss = (uncond_loss + cond_loss) / 2.0
-
-    fake_uncond_loss = bce_logits(logits["fake"]["uncond"], labels["fake"]["image"])
-    fake_cond_loss = bce_logits(logits["fake"]["cond"], labels["fake"]["image"])
-
-    tot_loss += (fake_uncond_loss + fake_cond_loss) / 3.0
-    tot_loss += word_loss
-
-    return tot_loss
-
-
-def kl_loss(mu_tensor: torch.Tensor, logvar: torch.Tensor) -> Any:
-    """
-    Calculate KL loss
-
-    :param torch.Tensor mu_tensor: Mean of latent distribution
-    :param torch.Tensor logvar: Log variance of latent distribution
-    :return: KL loss [-0.5 * (1 + log(sigma) - mu^2 - sigma^2)]
-    :rtype: Any
-    """
-    return torch.mean(-0.5 * (1 + 0.5 * logvar - mu_tensor.pow(2) - torch.exp(logvar)))
diff --git a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/text/english.py b/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/text/english.py
deleted file mode 100644
index 6817392ba8a9eb830351de89fb7afc5ad72f5e42..0000000000000000000000000000000000000000
--- a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/text/english.py
+++ /dev/null
@@ -1,188 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-
-'''
-Cleaners are transformations that run over the input text at both training and eval time.
-
-Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
-hyperparameter. Some cleaners are English-specific. You'll typically want to use:
-  1. "english_cleaners" for English text
-  2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
-     the Unidecode library (https://pypi.python.org/pypi/Unidecode)
-  3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
-     the symbols in symbols.py to match your data).
-'''
-
-
-# Regular expression matching whitespace:
-
-
-import re
-import inflect
-from unidecode import unidecode
-import eng_to_ipa as ipa
-_inflect = inflect.engine()
-_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
-_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
-_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
-_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
-_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
-_number_re = re.compile(r'[0-9]+')
-
-# List of (regular expression, replacement) pairs for abbreviations:
-_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
-    ('mrs', 'misess'),
-    ('mr', 'mister'),
-    ('dr', 'doctor'),
-    ('st', 'saint'),
-    ('co', 'company'),
-    ('jr', 'junior'),
-    ('maj', 'major'),
-    ('gen', 'general'),
-    ('drs', 'doctors'),
-    ('rev', 'reverend'),
-    ('lt', 'lieutenant'),
-    ('hon', 'honorable'),
-    ('sgt', 'sergeant'),
-    ('capt', 'captain'),
-    ('esq', 'esquire'),
-    ('ltd', 'limited'),
-    ('col', 'colonel'),
-    ('ft', 'fort'),
-]]
-
-
-# List of (ipa, lazy ipa) pairs:
-_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
-    ('r', 'ɹ'),
-    ('æ', 'e'),
-    ('ɑ', 'a'),
-    ('ɔ', 'o'),
-    ('ð', 'z'),
-    ('θ', 's'),
-    ('ɛ', 'e'),
-    ('ɪ', 'i'),
-    ('ʊ', 'u'),
-    ('ʒ', 'ʥ'),
-    ('ʤ', 'ʥ'),
-    ('ˈ', '↓'),
-]]
-
-# List of (ipa, lazy ipa2) pairs:
-_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
-    ('r', 'ɹ'),
-    ('ð', 'z'),
-    ('θ', 's'),
-    ('ʒ', 'ʑ'),
-    ('ʤ', 'dʑ'),
-    ('ˈ', '↓'),
-]]
-
-# List of (ipa, ipa2) pairs
-_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
-    ('r', 'ɹ'),
-    ('ʤ', 'dʒ'),
-    ('ʧ', 'tʃ')
-]]
-
-
-def expand_abbreviations(text):
-    for regex, replacement in _abbreviations:
-        text = re.sub(regex, replacement, text)
-    return text
-
-
-def collapse_whitespace(text):
-    return re.sub(r'\s+', ' ', text)
-
-
-def _remove_commas(m):
-    return m.group(1).replace(',', '')
-
-
-def _expand_decimal_point(m):
-    return m.group(1).replace('.', ' point ')
-
-
-def _expand_dollars(m):
-    match = m.group(1)
-    parts = match.split('.')
-    if len(parts) > 2:
-        return match + ' dollars'  # Unexpected format
-    dollars = int(parts[0]) if parts[0] else 0
-    cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
-    if dollars and cents:
-        dollar_unit = 'dollar' if dollars == 1 else 'dollars'
-        cent_unit = 'cent' if cents == 1 else 'cents'
-        return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
-    elif dollars:
-        dollar_unit = 'dollar' if dollars == 1 else 'dollars'
-        return '%s %s' % (dollars, dollar_unit)
-    elif cents:
-        cent_unit = 'cent' if cents == 1 else 'cents'
-        return '%s %s' % (cents, cent_unit)
-    else:
-        return 'zero dollars'
-
-
-def _expand_ordinal(m):
-    return _inflect.number_to_words(m.group(0))
-
-
-def _expand_number(m):
-    num = int(m.group(0))
-    if num > 1000 and num < 3000:
-        if num == 2000:
-            return 'two thousand'
-        elif num > 2000 and num < 2010:
-            return 'two thousand ' + _inflect.number_to_words(num % 100)
-        elif num % 100 == 0:
-            return _inflect.number_to_words(num // 100) + ' hundred'
-        else:
-            return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
-    else:
-        return _inflect.number_to_words(num, andword='')
-
-
-def normalize_numbers(text):
-    text = re.sub(_comma_number_re, _remove_commas, text)
-    text = re.sub(_pounds_re, r'\1 pounds', text)
-    text = re.sub(_dollars_re, _expand_dollars, text)
-    text = re.sub(_decimal_number_re, _expand_decimal_point, text)
-    text = re.sub(_ordinal_re, _expand_ordinal, text)
-    text = re.sub(_number_re, _expand_number, text)
-    return text
-
-
-def mark_dark_l(text):
-    return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text)
-
-
-def english_to_ipa(text):
-    text = unidecode(text).lower()
-    text = expand_abbreviations(text)
-    text = normalize_numbers(text)
-    phonemes = ipa.convert(text)
-    phonemes = collapse_whitespace(phonemes)
-    return phonemes
-
-
-def english_to_lazy_ipa(text):
-    text = english_to_ipa(text)
-    for regex, replacement in _lazy_ipa:
-        text = re.sub(regex, replacement, text)
-    return text
-
-
-def english_to_ipa2(text):
-    text = english_to_ipa(text)
-    text = mark_dark_l(text)
-    for regex, replacement in _ipa_to_ipa2:
-        text = re.sub(regex, replacement, text)
-    return text.replace('...', '…')
-
-
-def english_to_lazy_ipa2(text):
-    text = english_to_ipa(text)
-    for regex, replacement in _lazy_ipa2:
-        text = re.sub(regex, replacement, text)
-    return text
diff --git a/spaces/Mahiruoshi/MyGO_VIts-bert/preprocess_text.py b/spaces/Mahiruoshi/MyGO_VIts-bert/preprocess_text.py
deleted file mode 100644
index afa1d2829764105404ef7f30a50ec5db2261ee7a..0000000000000000000000000000000000000000
--- a/spaces/Mahiruoshi/MyGO_VIts-bert/preprocess_text.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import json
-from collections import defaultdict
-from random import shuffle
-from typing import Optional
-
-from tqdm import tqdm
-import click
-from text.cleaner import clean_text
-
-
-@click.command()
-@click.option(
-    "--transcription-path",
-    default="filelists/Mygo.list",
-    type=click.Path(exists=True, file_okay=True, dir_okay=False),
-)
-@click.option("--cleaned-path", default=None)
-@click.option("--train-path", default="filelists/train.list")
-@click.option("--val-path", default="filelists/val.list")
-@click.option(
-    "--config-path",
-    default="configs/config.json",
-    type=click.Path(exists=True, file_okay=True, dir_okay=False),
-)
-@click.option("--val-per-spk", default=4)
-@click.option("--max-val-total", default=8)
-@click.option("--clean/--no-clean", default=True)
-def main(
-    transcription_path: str,
-    cleaned_path: Optional[str],
-    train_path: str,
-    val_path: str,
-    config_path: str,
-    val_per_spk: int,
-    max_val_total: int,
-    clean: bool,
-):
-    if cleaned_path is None:
-        cleaned_path = transcription_path + ".cleaned"
-
-    if clean:
-        out_file = open(cleaned_path, "w", encoding="utf-8")
-        for line in tqdm(open(transcription_path, encoding="utf-8").readlines()):
-            try:
-                utt, spk, language, text = line.strip().split("|")
-                norm_text, phones, tones, word2ph = clean_text(text, language)
-                out_file.write(
-                    "{}|{}|{}|{}|{}|{}|{}\n".format(
-                        utt,
-                        spk,
-                        language,
-                        norm_text,
-                        " ".join(phones),
-                        " ".join([str(i) for i in tones]),
-                        " ".join([str(i) for i in word2ph]),
-                    )
-                )
-            except Exception as error:
-                print("err!", line, error)
-
-        out_file.close()
-
-        transcription_path = cleaned_path
-
-    spk_utt_map = defaultdict(list)
-    spk_id_map = {}
-    current_sid = 0
-
-    with open(transcription_path, encoding="utf-8") as f:
-        for line in f.readlines():
-            utt, spk, language, text, phones, tones, word2ph = line.strip().split("|")
-            spk_utt_map[spk].append(line)
-
-            if spk not in spk_id_map.keys():
-                spk_id_map[spk] = current_sid
-                current_sid += 1
-
-    train_list = []
-    val_list = []
-
-    for spk, utts in spk_utt_map.items():
-        shuffle(utts)
-        val_list += utts[:val_per_spk]
-        train_list += utts[val_per_spk:]
-
-    if len(val_list) > max_val_total:
-        train_list += val_list[max_val_total:]
-        val_list = val_list[:max_val_total]
-
-    with open(train_path, "w", encoding="utf-8") as f:
-        for line in train_list:
-            f.write(line)
-
-    with open(val_path, "w", encoding="utf-8") as f:
-        for line in val_list:
-            f.write(line)
-
-    config = json.load(open(config_path, encoding="utf-8"))
-    config["data"]["spk2id"] = spk_id_map
-    with open(config_path, "w", encoding="utf-8") as f:
-        json.dump(config, f, indent=2, ensure_ascii=False)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/makeaprotagonist/dataset/dataset.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/makeaprotagonist/dataset/dataset.py
deleted file mode 100644
index eb453c9018e9d31e1771b937a615ee0ba684a62a..0000000000000000000000000000000000000000
--- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/makeaprotagonist/dataset/dataset.py
+++ /dev/null
@@ -1,160 +0,0 @@
-
-import torch
-from torch.utils.data import Dataset
-import torch.nn.functional as F
-
-from einops import rearrange
-import os
-import os.path as osp
-from glob import glob
-import imageio
-import cv2
-import numpy as np
-import random
-import ipdb
-
-class MakeAProtagonistDataset(Dataset):
-    def __init__(
-            self,
-            video_dir: str,
-            prompt: str,
-            condition: list[str] = 'openpose', ## type of condition used
-            video_suffix: str = '.jpg',
-            condition_suffix: str = '.png',
-            width: int = 512,
-            height: int = 512,
-            n_sample_frames: int = 8,
-            sample_start_idx: int = 0,
-            sample_frame_rate: int = 1,
-            random_sample: bool = False,
-            mask_dir: str = None, 
-            **kwargs,
-    ):
-        self.video_dir = video_dir ## path to the video dir
-        self.video_path = osp.join(self.video_dir, 'images')
-
-        self.condition = condition
-        if isinstance(condition, str):
-            condition = [condition]
-        self.condition_path = {_condition: osp.join(self.video_dir, _condition) for _condition in condition}
-        self.video_suffix = video_suffix
-        self.condition_suffix = condition_suffix
-        self.random_sample = random_sample
-        self.mask_dir = mask_dir
-        if mask_dir:
-            self.mask_dir = osp.join(self.video_dir, mask_dir)
-
-        ## get frame path
-        frame_list_path = osp.join(self.video_dir, 'frame_list.txt')
-        if not osp.isfile(frame_list_path):
-            all_frames = sorted(glob(osp.join(self.video_path, '*')))
-            self.frame_list = []
-            with open(frame_list_path, 'w') as f:
-                for _frame_path in all_frames:
-                    _frame_name = osp.basename(_frame_path).split('.')[0]
-                    self.frame_list.append(_frame_name)
-                    f.write(_frame_name + '\n')
-        
-        else:
-            with open(frame_list_path, 'r') as f:
-                self.frame_list = f.read().splitlines()
-        
-        self.video_length = len(self.frame_list)
-
-        self.prompt = prompt
-        self.prompt_ids = None
-
-        self.width = width
-        self.height = height
-        self.n_sample_frames = n_sample_frames
-        self.sample_start_idx = sample_start_idx
-        self.sample_frame_rate = sample_frame_rate
-        self.img_embeddings = []
-
-        print('Training on Video {} \t totally {} frames'.format(self.video_dir.split('/')[-1], self.video_length))
-    
-    @torch.no_grad()
-    def preprocess_img_embedding(self, feature_extractor, image_encoder):
-        for f_name in self.frame_list:
-            image = imageio.imread(osp.join(self.video_path, f_name + self.video_suffix))
-            image = feature_extractor(images=image, return_tensors="pt").pixel_values
-            image_embeds = image_encoder(image).image_embeds
-            self.img_embeddings.append(image_embeds[0]) # 1,768 --> 768
-
-
-    def __len__(self):
-        return 1
-
-    def __getitem__(self, index):
-        # load and sample video frames
-        video_indices = list(range(self.sample_start_idx, self.video_length, self.sample_frame_rate))
-        video = []
-        conditions = {_condition: [] for _condition in self.condition}
-
-        mask = []
-        if self.random_sample:
-            start_index = random.randint(0,len(video_indices) - self.n_sample_frames) ## [a,b] include both
-        else:
-            start_index = 0
-        sample_index = video_indices[start_index:start_index+self.n_sample_frames]
-        # ipdb.set_trace()
-        for _f_idx in sample_index:
-            _frame = imageio.imread(osp.join(self.video_path, self.frame_list[_f_idx] + self.video_suffix))
-            if self.mask_dir:
-                _mask = imageio.imread(osp.join(self.mask_dir, self.frame_list[_f_idx] + '.png')).astype(np.float32) ## H,W 0 and 255
-                _mask /= 255 # 0 and 1
-            else:
-                _mask = np.ones(_frame.shape[:2])
-            video.append(_frame)
-            mask.append(_mask)
-
-            for _control_type, _control_path in self.condition_path.items():
-                _condition = imageio.imread(osp.join(_control_path, self.frame_list[_f_idx] + self.condition_suffix)) ## 
-                conditions[_control_type].append(_condition)
-        
-        ref_idx = random.choice(sample_index) # idx random sample one ref image index from the select video clip
-
-        video = torch.from_numpy(np.stack(video, axis=0)).float() # f,h,w,c
-        
-        video = rearrange(video, "f h w c -> f c h w")
-        video = F.interpolate(video, size=(self.height, self.width), mode='bilinear')
-
-        # ipdb.set_trace()
-        conditions_transform = {}
-        for _control_type, condition in conditions.items():
-            condition = torch.from_numpy(np.stack(condition, axis=0)).float() # f,h,w,c
-            condition = rearrange(condition, "f h w c -> f c h w")
-            condition = F.interpolate(condition, size=(self.height, self.width), mode='bilinear')
-            conditions_transform[_control_type] = condition / 255
-
-        mask = torch.from_numpy(np.stack(mask, axis=0)).float() # f,h,w
-        mask = rearrange(mask[:,:,:,None], "f h w c -> f c h w")
-        mask = F.interpolate(mask, size=(self.height, self.width), mode='nearest')
-
-        ref_img = imageio.imread(osp.join(self.video_path, self.frame_list[ref_idx] + self.video_suffix)) # read ref image
-        ref_img = torch.from_numpy(ref_img).float() # h,w,c convert to tensor
-        ref_img = ref_img.permute(2,0,1).unsqueeze(0).repeat(self.n_sample_frames,1,1,1)  ## h,w,c -> c,h,w -> 1,c,h,w -> f,c,h,w
-        ref_img = F.interpolate(ref_img, size=(self.height, self.width), mode='bilinear')
-
-        ref_condition = torch.zeros_like(ref_img)
-        # ipdb.set_trace()
-        example = {
-            "pixel_values": (video / 127.5 - 1.0),
-            "conditions": conditions_transform,
-            # "prompt_ids": self.prompt_ids,
-            "ref_img": (ref_img / 127.5 - 1.0),
-            "ref_condition": ref_condition / 255,
-            "masks": mask, 
-            "sample_indices": torch.LongTensor(sample_index),  
-
-        }
-
-        ref_imbed = None
-        if len(self.img_embeddings):
-            ref_imbed = self.img_embeddings[ref_idx]
-            example["ref_imbed"] = ref_imbed
-
-
-        return example
-
-
diff --git a/spaces/Marshalls/testmtd/feature_extraction/madmom/ml/hmm.c b/spaces/Marshalls/testmtd/feature_extraction/madmom/ml/hmm.c
deleted file mode 100644
index b5d023818c5287ac4e1d86f40d20c3a29182d6ba..0000000000000000000000000000000000000000
--- a/spaces/Marshalls/testmtd/feature_extraction/madmom/ml/hmm.c
+++ /dev/null
@@ -1,32430 +0,0 @@
-/* Generated by Cython 0.29.22 */
-
-/* BEGIN: Cython Metadata
-{
-    "distutils": {
-        "depends": [
-            "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include/numpy/arrayobject.h",
-            "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include/numpy/arrayscalars.h",
-            "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include/numpy/ndarrayobject.h",
-            "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include/numpy/ndarraytypes.h",
-            "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include/numpy/npy_math.h",
-            "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include/numpy/ufuncobject.h"
-        ],
-        "include_dirs": [
-            "/home/guillefix/.local/lib/python3.8/site-packages/numpy/core/include"
-        ],
-        "name": "madmom.ml.hmm",
-        "sources": [
-            "madmom/ml/hmm.pyx"
-        ]
-    },
-    "module_name": "madmom.ml.hmm"
-}
-END: Cython Metadata */
-
-#define PY_SSIZE_T_CLEAN
-#include "Python.h"
-#ifndef Py_PYTHON_H
-    #error Python headers needed to compile C extensions, please install development version of Python.
-#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
-    #error Cython requires Python 2.6+ or Python 3.3+.
-#else
-#define CYTHON_ABI "0_29_22"
-#define CYTHON_HEX_VERSION 0x001D16F0
-#define CYTHON_FUTURE_DIVISION 1
-#include <stddef.h>
-#ifndef offsetof
-  #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
-#endif
-#if !defined(WIN32) && !defined(MS_WINDOWS)
-  #ifndef __stdcall
-    #define __stdcall
-  #endif
-  #ifndef __cdecl
-    #define __cdecl
-  #endif
-  #ifndef __fastcall
-    #define __fastcall
-  #endif
-#endif
-#ifndef DL_IMPORT
-  #define DL_IMPORT(t) t
-#endif
-#ifndef DL_EXPORT
-  #define DL_EXPORT(t) t
-#endif
-#define __PYX_COMMA ,
-#ifndef HAVE_LONG_LONG
-  #if PY_VERSION_HEX >= 0x02070000
-    #define HAVE_LONG_LONG
-  #endif
-#endif
-#ifndef PY_LONG_LONG
-  #define PY_LONG_LONG LONG_LONG
-#endif
-#ifndef Py_HUGE_VAL
-  #define Py_HUGE_VAL HUGE_VAL
-#endif
-#ifdef PYPY_VERSION
-  #define CYTHON_COMPILING_IN_PYPY 1
-  #define CYTHON_COMPILING_IN_PYSTON 0
-  #define CYTHON_COMPILING_IN_CPYTHON 0
-  #undef CYTHON_USE_TYPE_SLOTS
-  #define CYTHON_USE_TYPE_SLOTS 0
-  #undef CYTHON_USE_PYTYPE_LOOKUP
-  #define CYTHON_USE_PYTYPE_LOOKUP 0
-  #if PY_VERSION_HEX < 0x03050000
-    #undef CYTHON_USE_ASYNC_SLOTS
-    #define CYTHON_USE_ASYNC_SLOTS 0
-  #elif !defined(CYTHON_USE_ASYNC_SLOTS)
-    #define CYTHON_USE_ASYNC_SLOTS 1
-  #endif
-  #undef CYTHON_USE_PYLIST_INTERNALS
-  #define CYTHON_USE_PYLIST_INTERNALS 0
-  #undef CYTHON_USE_UNICODE_INTERNALS
-  #define CYTHON_USE_UNICODE_INTERNALS 0
-  #undef CYTHON_USE_UNICODE_WRITER
-  #define CYTHON_USE_UNICODE_WRITER 0
-  #undef CYTHON_USE_PYLONG_INTERNALS
-  #define CYTHON_USE_PYLONG_INTERNALS 0
-  #undef CYTHON_AVOID_BORROWED_REFS
-  #define CYTHON_AVOID_BORROWED_REFS 1
-  #undef CYTHON_ASSUME_SAFE_MACROS
-  #define CYTHON_ASSUME_SAFE_MACROS 0
-  #undef CYTHON_UNPACK_METHODS
-  #define CYTHON_UNPACK_METHODS 0
-  #undef CYTHON_FAST_THREAD_STATE
-  #define CYTHON_FAST_THREAD_STATE 0
-  #undef CYTHON_FAST_PYCALL
-  #define CYTHON_FAST_PYCALL 0
-  #undef CYTHON_PEP489_MULTI_PHASE_INIT
-  #define CYTHON_PEP489_MULTI_PHASE_INIT 0
-  #undef CYTHON_USE_TP_FINALIZE
-  #define CYTHON_USE_TP_FINALIZE 0
-  #undef CYTHON_USE_DICT_VERSIONS
-  #define CYTHON_USE_DICT_VERSIONS 0
-  #undef CYTHON_USE_EXC_INFO_STACK
-  #define CYTHON_USE_EXC_INFO_STACK 0
-#elif defined(PYSTON_VERSION)
-  #define CYTHON_COMPILING_IN_PYPY 0
-  #define CYTHON_COMPILING_IN_PYSTON 1
-  #define CYTHON_COMPILING_IN_CPYTHON 0
-  #ifndef CYTHON_USE_TYPE_SLOTS
-    #define CYTHON_USE_TYPE_SLOTS 1
-  #endif
-  #undef CYTHON_USE_PYTYPE_LOOKUP
-  #define CYTHON_USE_PYTYPE_LOOKUP 0
-  #undef CYTHON_USE_ASYNC_SLOTS
-  #define CYTHON_USE_ASYNC_SLOTS 0
-  #undef CYTHON_USE_PYLIST_INTERNALS
-  #define CYTHON_USE_PYLIST_INTERNALS 0
-  #ifndef CYTHON_USE_UNICODE_INTERNALS
-    #define CYTHON_USE_UNICODE_INTERNALS 1
-  #endif
-  #undef CYTHON_USE_UNICODE_WRITER
-  #define CYTHON_USE_UNICODE_WRITER 0
-  #undef CYTHON_USE_PYLONG_INTERNALS
-  #define CYTHON_USE_PYLONG_INTERNALS 0
-  #ifndef CYTHON_AVOID_BORROWED_REFS
-    #define CYTHON_AVOID_BORROWED_REFS 0
-  #endif
-  #ifndef CYTHON_ASSUME_SAFE_MACROS
-    #define CYTHON_ASSUME_SAFE_MACROS 1
-  #endif
-  #ifndef CYTHON_UNPACK_METHODS
-    #define CYTHON_UNPACK_METHODS 1
-  #endif
-  #undef CYTHON_FAST_THREAD_STATE
-  #define CYTHON_FAST_THREAD_STATE 0
-  #undef CYTHON_FAST_PYCALL
-  #define CYTHON_FAST_PYCALL 0
-  #undef CYTHON_PEP489_MULTI_PHASE_INIT
-  #define CYTHON_PEP489_MULTI_PHASE_INIT 0
-  #undef CYTHON_USE_TP_FINALIZE
-  #define CYTHON_USE_TP_FINALIZE 0
-  #undef CYTHON_USE_DICT_VERSIONS
-  #define CYTHON_USE_DICT_VERSIONS 0
-  #undef CYTHON_USE_EXC_INFO_STACK
-  #define CYTHON_USE_EXC_INFO_STACK 0
-#else
-  #define CYTHON_COMPILING_IN_PYPY 0
-  #define CYTHON_COMPILING_IN_PYSTON 0
-  #define CYTHON_COMPILING_IN_CPYTHON 1
-  #ifndef CYTHON_USE_TYPE_SLOTS
-    #define CYTHON_USE_TYPE_SLOTS 1
-  #endif
-  #if PY_VERSION_HEX < 0x02070000
-    #undef CYTHON_USE_PYTYPE_LOOKUP
-    #define CYTHON_USE_PYTYPE_LOOKUP 0
-  #elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
-    #define CYTHON_USE_PYTYPE_LOOKUP 1
-  #endif
-  #if PY_MAJOR_VERSION < 3
-    #undef CYTHON_USE_ASYNC_SLOTS
-    #define CYTHON_USE_ASYNC_SLOTS 0
-  #elif !defined(CYTHON_USE_ASYNC_SLOTS)
-    #define CYTHON_USE_ASYNC_SLOTS 1
-  #endif
-  #if PY_VERSION_HEX < 0x02070000
-    #undef CYTHON_USE_PYLONG_INTERNALS
-    #define CYTHON_USE_PYLONG_INTERNALS 0
-  #elif !defined(CYTHON_USE_PYLONG_INTERNALS)
-    #define CYTHON_USE_PYLONG_INTERNALS 1
-  #endif
-  #ifndef CYTHON_USE_PYLIST_INTERNALS
-    #define CYTHON_USE_PYLIST_INTERNALS 1
-  #endif
-  #ifndef CYTHON_USE_UNICODE_INTERNALS
-    #define CYTHON_USE_UNICODE_INTERNALS 1
-  #endif
-  #if PY_VERSION_HEX < 0x030300F0
-    #undef CYTHON_USE_UNICODE_WRITER
-    #define CYTHON_USE_UNICODE_WRITER 0
-  #elif !defined(CYTHON_USE_UNICODE_WRITER)
-    #define CYTHON_USE_UNICODE_WRITER 1
-  #endif
-  #ifndef CYTHON_AVOID_BORROWED_REFS
-    #define CYTHON_AVOID_BORROWED_REFS 0
-  #endif
-  #ifndef CYTHON_ASSUME_SAFE_MACROS
-    #define CYTHON_ASSUME_SAFE_MACROS 1
-  #endif
-  #ifndef CYTHON_UNPACK_METHODS
-    #define CYTHON_UNPACK_METHODS 1
-  #endif
-  #ifndef CYTHON_FAST_THREAD_STATE
-    #define CYTHON_FAST_THREAD_STATE 1
-  #endif
-  #ifndef CYTHON_FAST_PYCALL
-    #define CYTHON_FAST_PYCALL 1
-  #endif
-  #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
-    #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
-  #endif
-  #ifndef CYTHON_USE_TP_FINALIZE
-    #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
-  #endif
-  #ifndef CYTHON_USE_DICT_VERSIONS
-    #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
-  #endif
-  #ifndef CYTHON_USE_EXC_INFO_STACK
-    #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
-  #endif
-#endif
-#if !defined(CYTHON_FAST_PYCCALL)
-#define CYTHON_FAST_PYCCALL  (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
-#endif
-#if CYTHON_USE_PYLONG_INTERNALS
-  #include "longintrepr.h"
-  #undef SHIFT
-  #undef BASE
-  #undef MASK
-  #ifdef SIZEOF_VOID_P
-    enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
-  #endif
-#endif
-#ifndef __has_attribute
-  #define __has_attribute(x) 0
-#endif
-#ifndef __has_cpp_attribute
-  #define __has_cpp_attribute(x) 0
-#endif
-#ifndef CYTHON_RESTRICT
-  #if defined(__GNUC__)
-    #define CYTHON_RESTRICT __restrict__
-  #elif defined(_MSC_VER) && _MSC_VER >= 1400
-    #define CYTHON_RESTRICT __restrict
-  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
-    #define CYTHON_RESTRICT restrict
-  #else
-    #define CYTHON_RESTRICT
-  #endif
-#endif
-#ifndef CYTHON_UNUSED
-# if defined(__GNUC__)
-#   if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
-#     define CYTHON_UNUSED __attribute__ ((__unused__))
-#   else
-#     define CYTHON_UNUSED
-#   endif
-# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
-#   define CYTHON_UNUSED __attribute__ ((__unused__))
-# else
-#   define CYTHON_UNUSED
-# endif
-#endif
-#ifndef CYTHON_MAYBE_UNUSED_VAR
-#  if defined(__cplusplus)
-     template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
-#  else
-#    define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
-#  endif
-#endif
-#ifndef CYTHON_NCP_UNUSED
-# if CYTHON_COMPILING_IN_CPYTHON
-#  define CYTHON_NCP_UNUSED
-# else
-#  define CYTHON_NCP_UNUSED CYTHON_UNUSED
-# endif
-#endif
-#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
-#ifdef _MSC_VER
-    #ifndef _MSC_STDINT_H_
-        #if _MSC_VER < 1300
-           typedef unsigned char     uint8_t;
-           typedef unsigned int      uint32_t;
-        #else
-           typedef unsigned __int8   uint8_t;
-           typedef unsigned __int32  uint32_t;
-        #endif
-    #endif
-#else
-   #include <stdint.h>
-#endif
-#ifndef CYTHON_FALLTHROUGH
-  #if defined(__cplusplus) && __cplusplus >= 201103L
-    #if __has_cpp_attribute(fallthrough)
-      #define CYTHON_FALLTHROUGH [[fallthrough]]
-    #elif __has_cpp_attribute(clang::fallthrough)
-      #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
-    #elif __has_cpp_attribute(gnu::fallthrough)
-      #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
-    #endif
-  #endif
-  #ifndef CYTHON_FALLTHROUGH
-    #if __has_attribute(fallthrough)
-      #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
-    #else
-      #define CYTHON_FALLTHROUGH
-    #endif
-  #endif
-  #if defined(__clang__ ) && defined(__apple_build_version__)
-    #if __apple_build_version__ < 7000000
-      #undef  CYTHON_FALLTHROUGH
-      #define CYTHON_FALLTHROUGH
-    #endif
-  #endif
-#endif
-
-#ifndef CYTHON_INLINE
-  #if defined(__clang__)
-    #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
-  #elif defined(__GNUC__)
-    #define CYTHON_INLINE __inline__
-  #elif defined(_MSC_VER)
-    #define CYTHON_INLINE __inline
-  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
-    #define CYTHON_INLINE inline
-  #else
-    #define CYTHON_INLINE
-  #endif
-#endif
-
-#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
-  #define Py_OptimizeFlag 0
-#endif
-#define __PYX_BUILD_PY_SSIZE_T "n"
-#define CYTHON_FORMAT_SSIZE_T "z"
-#if PY_MAJOR_VERSION < 3
-  #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
-  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
-          PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
-  #define __Pyx_DefaultClassType PyClass_Type
-#else
-  #define __Pyx_BUILTIN_MODULE_NAME "builtins"
-#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
-  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
-          PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
-#else
-  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
-          PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
-#endif
-  #define __Pyx_DefaultClassType PyType_Type
-#endif
-#ifndef Py_TPFLAGS_CHECKTYPES
-  #define Py_TPFLAGS_CHECKTYPES 0
-#endif
-#ifndef Py_TPFLAGS_HAVE_INDEX
-  #define Py_TPFLAGS_HAVE_INDEX 0
-#endif
-#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
-  #define Py_TPFLAGS_HAVE_NEWBUFFER 0
-#endif
-#ifndef Py_TPFLAGS_HAVE_FINALIZE
-  #define Py_TPFLAGS_HAVE_FINALIZE 0
-#endif
-#ifndef METH_STACKLESS
-  #define METH_STACKLESS 0
-#endif
-#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
-  #ifndef METH_FASTCALL
-     #define METH_FASTCALL 0x80
-  #endif
-  typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
-  typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
-                                                          Py_ssize_t nargs, PyObject *kwnames);
-#else
-  #define __Pyx_PyCFunctionFast _PyCFunctionFast
-  #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
-#endif
-#if CYTHON_FAST_PYCCALL
-#define __Pyx_PyFastCFunction_Check(func)\
-    ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
-#else
-#define __Pyx_PyFastCFunction_Check(func) 0
-#endif
-#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
-  #define PyObject_Malloc(s)   PyMem_Malloc(s)
-  #define PyObject_Free(p)     PyMem_Free(p)
-  #define PyObject_Realloc(p)  PyMem_Realloc(p)
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
-  #define PyMem_RawMalloc(n)           PyMem_Malloc(n)
-  #define PyMem_RawRealloc(p, n)       PyMem_Realloc(p, n)
-  #define PyMem_RawFree(p)             PyMem_Free(p)
-#endif
-#if CYTHON_COMPILING_IN_PYSTON
-  #define __Pyx_PyCode_HasFreeVars(co)  PyCode_HasFreeVars(co)
-  #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
-#else
-  #define __Pyx_PyCode_HasFreeVars(co)  (PyCode_GetNumFree(co) > 0)
-  #define __Pyx_PyFrame_SetLineNumber(frame, lineno)  (frame)->f_lineno = (lineno)
-#endif
-#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
-  #define __Pyx_PyThreadState_Current PyThreadState_GET()
-#elif PY_VERSION_HEX >= 0x03060000
-  #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
-#elif PY_VERSION_HEX >= 0x03000000
-  #define __Pyx_PyThreadState_Current PyThreadState_GET()
-#else
-  #define __Pyx_PyThreadState_Current _PyThreadState_Current
-#endif
-#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
-#include "pythread.h"
-#define Py_tss_NEEDS_INIT 0
-typedef int Py_tss_t;
-static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
-  *key = PyThread_create_key();
-  return 0;
-}
-static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
-  Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
-  *key = Py_tss_NEEDS_INIT;
-  return key;
-}
-static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
-  PyObject_Free(key);
-}
-static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
-  return *key != Py_tss_NEEDS_INIT;
-}
-static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
-  PyThread_delete_key(*key);
-  *key = Py_tss_NEEDS_INIT;
-}
-static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
-  return PyThread_set_key_value(*key, value);
-}
-static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
-  return PyThread_get_key_value(*key);
-}
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
-#define __Pyx_PyDict_NewPresized(n)  ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
-#else
-#define __Pyx_PyDict_NewPresized(n)  PyDict_New()
-#endif
-#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
-  #define __Pyx_PyNumber_Divide(x,y)         PyNumber_TrueDivide(x,y)
-  #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceTrueDivide(x,y)
-#else
-  #define __Pyx_PyNumber_Divide(x,y)         PyNumber_Divide(x,y)
-  #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceDivide(x,y)
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
-#define __Pyx_PyDict_GetItemStr(dict, name)  _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
-#else
-#define __Pyx_PyDict_GetItemStr(dict, name)  PyDict_GetItem(dict, name)
-#endif
-#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
-  #define CYTHON_PEP393_ENABLED 1
-  #define __Pyx_PyUnicode_READY(op)       (likely(PyUnicode_IS_READY(op)) ?\
-                                              0 : _PyUnicode_Ready((PyObject *)(op)))
-  #define __Pyx_PyUnicode_GET_LENGTH(u)   PyUnicode_GET_LENGTH(u)
-  #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
-  #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u)   PyUnicode_MAX_CHAR_VALUE(u)
-  #define __Pyx_PyUnicode_KIND(u)         PyUnicode_KIND(u)
-  #define __Pyx_PyUnicode_DATA(u)         PyUnicode_DATA(u)
-  #define __Pyx_PyUnicode_READ(k, d, i)   PyUnicode_READ(k, d, i)
-  #define __Pyx_PyUnicode_WRITE(k, d, i, ch)  PyUnicode_WRITE(k, d, i, ch)
-  #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
-  #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
-  #else
-  #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != PyUnicode_GET_LENGTH(u))
-  #endif
-#else
-  #define CYTHON_PEP393_ENABLED 0
-  #define PyUnicode_1BYTE_KIND  1
-  #define PyUnicode_2BYTE_KIND  2
-  #define PyUnicode_4BYTE_KIND  4
-  #define __Pyx_PyUnicode_READY(op)       (0)
-  #define __Pyx_PyUnicode_GET_LENGTH(u)   PyUnicode_GET_SIZE(u)
-  #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
-  #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u)   ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
-  #define __Pyx_PyUnicode_KIND(u)         (sizeof(Py_UNICODE))
-  #define __Pyx_PyUnicode_DATA(u)         ((void*)PyUnicode_AS_UNICODE(u))
-  #define __Pyx_PyUnicode_READ(k, d, i)   ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
-  #define __Pyx_PyUnicode_WRITE(k, d, i, ch)  (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
-  #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != PyUnicode_GET_SIZE(u))
-#endif
-#if CYTHON_COMPILING_IN_PYPY
-  #define __Pyx_PyUnicode_Concat(a, b)      PyNumber_Add(a, b)
-  #define __Pyx_PyUnicode_ConcatSafe(a, b)  PyNumber_Add(a, b)
-#else
-  #define __Pyx_PyUnicode_Concat(a, b)      PyUnicode_Concat(a, b)
-  #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
-      PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
-#endif
-#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
-  #define PyUnicode_Contains(u, s)  PySequence_Contains(u, s)
-#endif
-#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
-  #define PyByteArray_Check(obj)  PyObject_TypeCheck(obj, &PyByteArray_Type)
-#endif
-#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
-  #define PyObject_Format(obj, fmt)  PyObject_CallMethod(obj, "__format__", "O", fmt)
-#endif
-#define __Pyx_PyString_FormatSafe(a, b)   ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
-#define __Pyx_PyUnicode_FormatSafe(a, b)  ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
-#if PY_MAJOR_VERSION >= 3
-  #define __Pyx_PyString_Format(a, b)  PyUnicode_Format(a, b)
-#else
-  #define __Pyx_PyString_Format(a, b)  PyString_Format(a, b)
-#endif
-#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
-  #define PyObject_ASCII(o)            PyObject_Repr(o)
-#endif
-#if PY_MAJOR_VERSION >= 3
-  #define PyBaseString_Type            PyUnicode_Type
-  #define PyStringObject               PyUnicodeObject
-  #define PyString_Type                PyUnicode_Type
-  #define PyString_Check               PyUnicode_Check
-  #define PyString_CheckExact          PyUnicode_CheckExact
-#ifndef PyObject_Unicode
-  #define PyObject_Unicode             PyObject_Str
-#endif
-#endif
-#if PY_MAJOR_VERSION >= 3
-  #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
-  #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
-#else
-  #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
-  #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
-#endif
-#ifndef PySet_CheckExact
-  #define PySet_CheckExact(obj)        (Py_TYPE(obj) == &PySet_Type)
-#endif
-#if PY_VERSION_HEX >= 0x030900A4
-  #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
-  #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
-#else
-  #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
-  #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
-#endif
-#if CYTHON_ASSUME_SAFE_MACROS
-  #define __Pyx_PySequence_SIZE(seq)  Py_SIZE(seq)
-#else
-  #define __Pyx_PySequence_SIZE(seq)  PySequence_Size(seq)
-#endif
-#if PY_MAJOR_VERSION >= 3
-  #define PyIntObject                  PyLongObject
-  #define PyInt_Type                   PyLong_Type
-  #define PyInt_Check(op)              PyLong_Check(op)
-  #define PyInt_CheckExact(op)         PyLong_CheckExact(op)
-  #define PyInt_FromString             PyLong_FromString
-  #define PyInt_FromUnicode            PyLong_FromUnicode
-  #define PyInt_FromLong               PyLong_FromLong
-  #define PyInt_FromSize_t             PyLong_FromSize_t
-  #define PyInt_FromSsize_t            PyLong_FromSsize_t
-  #define PyInt_AsLong                 PyLong_AsLong
-  #define PyInt_AS_LONG                PyLong_AS_LONG
-  #define PyInt_AsSsize_t              PyLong_AsSsize_t
-  #define PyInt_AsUnsignedLongMask     PyLong_AsUnsignedLongMask
-  #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
-  #define PyNumber_Int                 PyNumber_Long
-#endif
-#if PY_MAJOR_VERSION >= 3
-  #define PyBoolObject                 PyLongObject
-#endif
-#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
-  #ifndef PyUnicode_InternFromString
-    #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
-  #endif
-#endif
-#if PY_VERSION_HEX < 0x030200A4
-  typedef long Py_hash_t;
-  #define __Pyx_PyInt_FromHash_t PyInt_FromLong
-  #define __Pyx_PyInt_AsHash_t   PyInt_AsLong
-#else
-  #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
-  #define __Pyx_PyInt_AsHash_t   PyInt_AsSsize_t
-#endif
-#if PY_MAJOR_VERSION >= 3
-  #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
-#else
-  #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
-#endif
-#if CYTHON_USE_ASYNC_SLOTS
-  #if PY_VERSION_HEX >= 0x030500B1
-    #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
-    #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
-  #else
-    #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
-  #endif
-#else
-  #define __Pyx_PyType_AsAsync(obj) NULL
-#endif
-#ifndef __Pyx_PyAsyncMethodsStruct
-    typedef struct {
-        unaryfunc am_await;
-        unaryfunc am_aiter;
-        unaryfunc am_anext;
-    } __Pyx_PyAsyncMethodsStruct;
-#endif
-
-#if defined(WIN32) || defined(MS_WINDOWS)
-  #define _USE_MATH_DEFINES
-#endif
-#include <math.h>
-#ifdef NAN
-#define __PYX_NAN() ((float) NAN)
-#else
-static CYTHON_INLINE float __PYX_NAN() {
-  float value;
-  memset(&value, 0xFF, sizeof(value));
-  return value;
-}
-#endif
-#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
-#define __Pyx_truncl trunc
-#else
-#define __Pyx_truncl truncl
-#endif
-
-#define __PYX_MARK_ERR_POS(f_index, lineno) \
-    { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
-#define __PYX_ERR(f_index, lineno, Ln_error) \
-    { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
-
-#ifndef __PYX_EXTERN_C
-  #ifdef __cplusplus
-    #define __PYX_EXTERN_C extern "C"
-  #else
-    #define __PYX_EXTERN_C extern
-  #endif
-#endif
-
-#define __PYX_HAVE__madmom__ml__hmm
-#define __PYX_HAVE_API__madmom__ml__hmm
-/* Early includes */
-#include <string.h>
-#include <stdio.h>
-#include "numpy/arrayobject.h"
-#include "numpy/ndarrayobject.h"
-#include "numpy/ndarraytypes.h"
-#include "numpy/arrayscalars.h"
-#include "numpy/ufuncobject.h"
-
-    /* NumPy API declarations from "numpy/__init__.pxd" */
-    
-#include "numpy/npy_math.h"
-#include "pythread.h"
-#include <stdlib.h>
-#include "pystate.h"
-#ifdef _OPENMP
-#include <omp.h>
-#endif /* _OPENMP */
-
-#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
-#define CYTHON_WITHOUT_ASSERTIONS
-#endif
-
-typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
-                const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
-
-#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
-#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
-#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
-#define __PYX_DEFAULT_STRING_ENCODING ""
-#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
-#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
-#define __Pyx_uchar_cast(c) ((unsigned char)c)
-#define __Pyx_long_cast(x) ((long)x)
-#define __Pyx_fits_Py_ssize_t(v, type, is_signed)  (\
-    (sizeof(type) < sizeof(Py_ssize_t))  ||\
-    (sizeof(type) > sizeof(Py_ssize_t) &&\
-          likely(v < (type)PY_SSIZE_T_MAX ||\
-                 v == (type)PY_SSIZE_T_MAX)  &&\
-          (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
-                                v == (type)PY_SSIZE_T_MIN)))  ||\
-    (sizeof(type) == sizeof(Py_ssize_t) &&\
-          (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
-                               v == (type)PY_SSIZE_T_MAX)))  )
-static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
-    return (size_t) i < (size_t) limit;
-}
-#if defined (__cplusplus) && __cplusplus >= 201103L
-    #include <cstdlib>
-    #define __Pyx_sst_abs(value) std::abs(value)
-#elif SIZEOF_INT >= SIZEOF_SIZE_T
-    #define __Pyx_sst_abs(value) abs(value)
-#elif SIZEOF_LONG >= SIZEOF_SIZE_T
-    #define __Pyx_sst_abs(value) labs(value)
-#elif defined (_MSC_VER)
-    #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
-#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
-    #define __Pyx_sst_abs(value) llabs(value)
-#elif defined (__GNUC__)
-    #define __Pyx_sst_abs(value) __builtin_llabs(value)
-#else
-    #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
-#endif
-static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
-static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
-#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
-#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
-#define __Pyx_PyBytes_FromString        PyBytes_FromString
-#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
-static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
-#if PY_MAJOR_VERSION < 3
-    #define __Pyx_PyStr_FromString        __Pyx_PyBytes_FromString
-    #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
-#else
-    #define __Pyx_PyStr_FromString        __Pyx_PyUnicode_FromString
-    #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
-#endif
-#define __Pyx_PyBytes_AsWritableString(s)     ((char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsWritableSString(s)    ((signed char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsWritableUString(s)    ((unsigned char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsString(s)     ((const char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsSString(s)    ((const signed char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsUString(s)    ((const unsigned char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyObject_AsWritableString(s)    ((char*) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_AsWritableSString(s)    ((signed char*) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_AsWritableUString(s)    ((unsigned char*) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_AsSString(s)    ((const signed char*) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_AsUString(s)    ((const unsigned char*) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_FromCString(s)  __Pyx_PyObject_FromString((const char*)s)
-#define __Pyx_PyBytes_FromCString(s)   __Pyx_PyBytes_FromString((const char*)s)
-#define __Pyx_PyByteArray_FromCString(s)   __Pyx_PyByteArray_FromString((const char*)s)
-#define __Pyx_PyStr_FromCString(s)     __Pyx_PyStr_FromString((const char*)s)
-#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
-static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
-    const Py_UNICODE *u_end = u;
-    while (*u_end++) ;
-    return (size_t)(u_end - u - 1);
-}
-#define __Pyx_PyUnicode_FromUnicode(u)       PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
-#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
-#define __Pyx_PyUnicode_AsUnicode            PyUnicode_AsUnicode
-#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
-#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
-static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
-static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
-static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
-static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
-#define __Pyx_PySequence_Tuple(obj)\
-    (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
-static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
-static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
-#if CYTHON_ASSUME_SAFE_MACROS
-#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
-#else
-#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
-#endif
-#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
-#if PY_MAJOR_VERSION >= 3
-#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
-#else
-#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
-#endif
-#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
-#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
-static int __Pyx_sys_getdefaultencoding_not_ascii;
-static int __Pyx_init_sys_getdefaultencoding_params(void) {
-    PyObject* sys;
-    PyObject* default_encoding = NULL;
-    PyObject* ascii_chars_u = NULL;
-    PyObject* ascii_chars_b = NULL;
-    const char* default_encoding_c;
-    sys = PyImport_ImportModule("sys");
-    if (!sys) goto bad;
-    default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
-    Py_DECREF(sys);
-    if (!default_encoding) goto bad;
-    default_encoding_c = PyBytes_AsString(default_encoding);
-    if (!default_encoding_c) goto bad;
-    if (strcmp(default_encoding_c, "ascii") == 0) {
-        __Pyx_sys_getdefaultencoding_not_ascii = 0;
-    } else {
-        char ascii_chars[128];
-        int c;
-        for (c = 0; c < 128; c++) {
-            ascii_chars[c] = c;
-        }
-        __Pyx_sys_getdefaultencoding_not_ascii = 1;
-        ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
-        if (!ascii_chars_u) goto bad;
-        ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
-        if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
-            PyErr_Format(
-                PyExc_ValueError,
-                "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
-                default_encoding_c);
-            goto bad;
-        }
-        Py_DECREF(ascii_chars_u);
-        Py_DECREF(ascii_chars_b);
-    }
-    Py_DECREF(default_encoding);
-    return 0;
-bad:
-    Py_XDECREF(default_encoding);
-    Py_XDECREF(ascii_chars_u);
-    Py_XDECREF(ascii_chars_b);
-    return -1;
-}
-#endif
-#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
-#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
-#else
-#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
-#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
-static char* __PYX_DEFAULT_STRING_ENCODING;
-static int __Pyx_init_sys_getdefaultencoding_params(void) {
-    PyObject* sys;
-    PyObject* default_encoding = NULL;
-    char* default_encoding_c;
-    sys = PyImport_ImportModule("sys");
-    if (!sys) goto bad;
-    default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
-    Py_DECREF(sys);
-    if (!default_encoding) goto bad;
-    default_encoding_c = PyBytes_AsString(default_encoding);
-    if (!default_encoding_c) goto bad;
-    __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
-    if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
-    strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
-    Py_DECREF(default_encoding);
-    return 0;
-bad:
-    Py_XDECREF(default_encoding);
-    return -1;
-}
-#endif
-#endif
-
-
-/* Test for GCC > 2.95 */
-#if defined(__GNUC__)     && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
-  #define likely(x)   __builtin_expect(!!(x), 1)
-  #define unlikely(x) __builtin_expect(!!(x), 0)
-#else /* !__GNUC__ or GCC < 2.95 */
-  #define likely(x)   (x)
-  #define unlikely(x) (x)
-#endif /* __GNUC__ */
-static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
-
-static PyObject *__pyx_m = NULL;
-static PyObject *__pyx_d;
-static PyObject *__pyx_b;
-static PyObject *__pyx_cython_runtime = NULL;
-static PyObject *__pyx_empty_tuple;
-static PyObject *__pyx_empty_bytes;
-static PyObject *__pyx_empty_unicode;
-static int __pyx_lineno;
-static int __pyx_clineno = 0;
-static const char * __pyx_cfilenm= __FILE__;
-static const char *__pyx_filename;
-
-/* Header.proto */
-#if !defined(CYTHON_CCOMPLEX)
-  #if defined(__cplusplus)
-    #define CYTHON_CCOMPLEX 1
-  #elif defined(_Complex_I)
-    #define CYTHON_CCOMPLEX 1
-  #else
-    #define CYTHON_CCOMPLEX 0
-  #endif
-#endif
-#if CYTHON_CCOMPLEX
-  #ifdef __cplusplus
-    #include <complex>
-  #else
-    #include <complex.h>
-  #endif
-#endif
-#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
-  #undef _Complex_I
-  #define _Complex_I 1.0fj
-#endif
-
-
-static const char *__pyx_f[] = {
-  "madmom/ml/hmm.pyx",
-  "__init__.pxd",
-  "stringsource",
-  "type.pxd",
-};
-/* MemviewSliceStruct.proto */
-struct __pyx_memoryview_obj;
-typedef struct {
-  struct __pyx_memoryview_obj *memview;
-  char *data;
-  Py_ssize_t shape[8];
-  Py_ssize_t strides[8];
-  Py_ssize_t suboffsets[8];
-} __Pyx_memviewslice;
-#define __Pyx_MemoryView_Len(m)  (m.shape[0])
-
-/* Atomics.proto */
-#include <pythread.h>
-#ifndef CYTHON_ATOMICS
-    #define CYTHON_ATOMICS 1
-#endif
-#define __pyx_atomic_int_type int
-#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
-                    (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
-                    !defined(__i386__)
-    #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
-    #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
-    #ifdef __PYX_DEBUG_ATOMICS
-        #warning "Using GNU atomics"
-    #endif
-#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
-    #include <Windows.h>
-    #undef __pyx_atomic_int_type
-    #define __pyx_atomic_int_type LONG
-    #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
-    #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
-    #ifdef __PYX_DEBUG_ATOMICS
-        #pragma message ("Using MSVC atomics")
-    #endif
-#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
-    #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
-    #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
-    #ifdef __PYX_DEBUG_ATOMICS
-        #warning "Using Intel atomics"
-    #endif
-#else
-    #undef CYTHON_ATOMICS
-    #define CYTHON_ATOMICS 0
-    #ifdef __PYX_DEBUG_ATOMICS
-        #warning "Not using atomics"
-    #endif
-#endif
-typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
-#if CYTHON_ATOMICS
-    #define __pyx_add_acquisition_count(memview)\
-             __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
-    #define __pyx_sub_acquisition_count(memview)\
-            __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
-#else
-    #define __pyx_add_acquisition_count(memview)\
-            __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
-    #define __pyx_sub_acquisition_count(memview)\
-            __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
-#endif
-
-/* ForceInitThreads.proto */
-#ifndef __PYX_FORCE_INIT_THREADS
-  #define __PYX_FORCE_INIT_THREADS 0
-#endif
-
-/* NoFastGil.proto */
-#define __Pyx_PyGILState_Ensure PyGILState_Ensure
-#define __Pyx_PyGILState_Release PyGILState_Release
-#define __Pyx_FastGIL_Remember()
-#define __Pyx_FastGIL_Forget()
-#define __Pyx_FastGilFuncInit()
-
-/* BufferFormatStructs.proto */
-#define IS_UNSIGNED(type) (((type) -1) > 0)
-struct __Pyx_StructField_;
-#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
-typedef struct {
-  const char* name;
-  struct __Pyx_StructField_* fields;
-  size_t size;
-  size_t arraysize[8];
-  int ndim;
-  char typegroup;
-  char is_unsigned;
-  int flags;
-} __Pyx_TypeInfo;
-typedef struct __Pyx_StructField_ {
-  __Pyx_TypeInfo* type;
-  const char* name;
-  size_t offset;
-} __Pyx_StructField;
-typedef struct {
-  __Pyx_StructField* field;
-  size_t parent_offset;
-} __Pyx_BufFmt_StackElem;
-typedef struct {
-  __Pyx_StructField root;
-  __Pyx_BufFmt_StackElem* head;
-  size_t fmt_offset;
-  size_t new_count, enc_count;
-  size_t struct_alignment;
-  int is_complex;
-  char enc_type;
-  char new_packmode;
-  char enc_packmode;
-  char is_valid_array;
-} __Pyx_BufFmt_Context;
-
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":690
- * # in Cython to enable them only on the right systems.
- * 
- * ctypedef npy_int8       int8_t             # <<<<<<<<<<<<<<
- * ctypedef npy_int16      int16_t
- * ctypedef npy_int32      int32_t
- */
-typedef npy_int8 __pyx_t_5numpy_int8_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":691
- * 
- * ctypedef npy_int8       int8_t
- * ctypedef npy_int16      int16_t             # <<<<<<<<<<<<<<
- * ctypedef npy_int32      int32_t
- * ctypedef npy_int64      int64_t
- */
-typedef npy_int16 __pyx_t_5numpy_int16_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":692
- * ctypedef npy_int8       int8_t
- * ctypedef npy_int16      int16_t
- * ctypedef npy_int32      int32_t             # <<<<<<<<<<<<<<
- * ctypedef npy_int64      int64_t
- * #ctypedef npy_int96      int96_t
- */
-typedef npy_int32 __pyx_t_5numpy_int32_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":693
- * ctypedef npy_int16      int16_t
- * ctypedef npy_int32      int32_t
- * ctypedef npy_int64      int64_t             # <<<<<<<<<<<<<<
- * #ctypedef npy_int96      int96_t
- * #ctypedef npy_int128     int128_t
- */
-typedef npy_int64 __pyx_t_5numpy_int64_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":697
- * #ctypedef npy_int128     int128_t
- * 
- * ctypedef npy_uint8      uint8_t             # <<<<<<<<<<<<<<
- * ctypedef npy_uint16     uint16_t
- * ctypedef npy_uint32     uint32_t
- */
-typedef npy_uint8 __pyx_t_5numpy_uint8_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":698
- * 
- * ctypedef npy_uint8      uint8_t
- * ctypedef npy_uint16     uint16_t             # <<<<<<<<<<<<<<
- * ctypedef npy_uint32     uint32_t
- * ctypedef npy_uint64     uint64_t
- */
-typedef npy_uint16 __pyx_t_5numpy_uint16_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":699
- * ctypedef npy_uint8      uint8_t
- * ctypedef npy_uint16     uint16_t
- * ctypedef npy_uint32     uint32_t             # <<<<<<<<<<<<<<
- * ctypedef npy_uint64     uint64_t
- * #ctypedef npy_uint96     uint96_t
- */
-typedef npy_uint32 __pyx_t_5numpy_uint32_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":700
- * ctypedef npy_uint16     uint16_t
- * ctypedef npy_uint32     uint32_t
- * ctypedef npy_uint64     uint64_t             # <<<<<<<<<<<<<<
- * #ctypedef npy_uint96     uint96_t
- * #ctypedef npy_uint128    uint128_t
- */
-typedef npy_uint64 __pyx_t_5numpy_uint64_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":704
- * #ctypedef npy_uint128    uint128_t
- * 
- * ctypedef npy_float32    float32_t             # <<<<<<<<<<<<<<
- * ctypedef npy_float64    float64_t
- * #ctypedef npy_float80    float80_t
- */
-typedef npy_float32 __pyx_t_5numpy_float32_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":705
- * 
- * ctypedef npy_float32    float32_t
- * ctypedef npy_float64    float64_t             # <<<<<<<<<<<<<<
- * #ctypedef npy_float80    float80_t
- * #ctypedef npy_float128   float128_t
- */
-typedef npy_float64 __pyx_t_5numpy_float64_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":714
- * # The int types are mapped a bit surprising --
- * # numpy.int corresponds to 'l' and numpy.long to 'q'
- * ctypedef npy_long       int_t             # <<<<<<<<<<<<<<
- * ctypedef npy_longlong   long_t
- * ctypedef npy_longlong   longlong_t
- */
-typedef npy_long __pyx_t_5numpy_int_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":715
- * # numpy.int corresponds to 'l' and numpy.long to 'q'
- * ctypedef npy_long       int_t
- * ctypedef npy_longlong   long_t             # <<<<<<<<<<<<<<
- * ctypedef npy_longlong   longlong_t
- * 
- */
-typedef npy_longlong __pyx_t_5numpy_long_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":716
- * ctypedef npy_long       int_t
- * ctypedef npy_longlong   long_t
- * ctypedef npy_longlong   longlong_t             # <<<<<<<<<<<<<<
- * 
- * ctypedef npy_ulong      uint_t
- */
-typedef npy_longlong __pyx_t_5numpy_longlong_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":718
- * ctypedef npy_longlong   longlong_t
- * 
- * ctypedef npy_ulong      uint_t             # <<<<<<<<<<<<<<
- * ctypedef npy_ulonglong  ulong_t
- * ctypedef npy_ulonglong  ulonglong_t
- */
-typedef npy_ulong __pyx_t_5numpy_uint_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":719
- * 
- * ctypedef npy_ulong      uint_t
- * ctypedef npy_ulonglong  ulong_t             # <<<<<<<<<<<<<<
- * ctypedef npy_ulonglong  ulonglong_t
- * 
- */
-typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":720
- * ctypedef npy_ulong      uint_t
- * ctypedef npy_ulonglong  ulong_t
- * ctypedef npy_ulonglong  ulonglong_t             # <<<<<<<<<<<<<<
- * 
- * ctypedef npy_intp       intp_t
- */
-typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":722
- * ctypedef npy_ulonglong  ulonglong_t
- * 
- * ctypedef npy_intp       intp_t             # <<<<<<<<<<<<<<
- * ctypedef npy_uintp      uintp_t
- * 
- */
-typedef npy_intp __pyx_t_5numpy_intp_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":723
- * 
- * ctypedef npy_intp       intp_t
- * ctypedef npy_uintp      uintp_t             # <<<<<<<<<<<<<<
- * 
- * ctypedef npy_double     float_t
- */
-typedef npy_uintp __pyx_t_5numpy_uintp_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":725
- * ctypedef npy_uintp      uintp_t
- * 
- * ctypedef npy_double     float_t             # <<<<<<<<<<<<<<
- * ctypedef npy_double     double_t
- * ctypedef npy_longdouble longdouble_t
- */
-typedef npy_double __pyx_t_5numpy_float_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":726
- * 
- * ctypedef npy_double     float_t
- * ctypedef npy_double     double_t             # <<<<<<<<<<<<<<
- * ctypedef npy_longdouble longdouble_t
- * 
- */
-typedef npy_double __pyx_t_5numpy_double_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":727
- * ctypedef npy_double     float_t
- * ctypedef npy_double     double_t
- * ctypedef npy_longdouble longdouble_t             # <<<<<<<<<<<<<<
- * 
- * ctypedef npy_cfloat      cfloat_t
- */
-typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
-
-/* "madmom/ml/hmm.pyx":28
- * 
- * 
- * ctypedef np.uint32_t uint32_t             # <<<<<<<<<<<<<<
- * 
- * 
- */
-typedef __pyx_t_5numpy_uint32_t __pyx_t_6madmom_2ml_3hmm_uint32_t;
-/* Declarations.proto */
-#if CYTHON_CCOMPLEX
-  #ifdef __cplusplus
-    typedef ::std::complex< float > __pyx_t_float_complex;
-  #else
-    typedef float _Complex __pyx_t_float_complex;
-  #endif
-#else
-    typedef struct { float real, imag; } __pyx_t_float_complex;
-#endif
-static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
-
-/* Declarations.proto */
-#if CYTHON_CCOMPLEX
-  #ifdef __cplusplus
-    typedef ::std::complex< double > __pyx_t_double_complex;
-  #else
-    typedef double _Complex __pyx_t_double_complex;
-  #endif
-#else
-    typedef struct { double real, imag; } __pyx_t_double_complex;
-#endif
-static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
-
-
-/*--- Type declarations ---*/
-struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator;
-struct __pyx_array_obj;
-struct __pyx_MemviewEnum_obj;
-struct __pyx_memoryview_obj;
-struct __pyx_memoryviewslice_obj;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":729
- * ctypedef npy_longdouble longdouble_t
- * 
- * ctypedef npy_cfloat      cfloat_t             # <<<<<<<<<<<<<<
- * ctypedef npy_cdouble     cdouble_t
- * ctypedef npy_clongdouble clongdouble_t
- */
-typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":730
- * 
- * ctypedef npy_cfloat      cfloat_t
- * ctypedef npy_cdouble     cdouble_t             # <<<<<<<<<<<<<<
- * ctypedef npy_clongdouble clongdouble_t
- * 
- */
-typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":731
- * ctypedef npy_cfloat      cfloat_t
- * ctypedef npy_cdouble     cdouble_t
- * ctypedef npy_clongdouble clongdouble_t             # <<<<<<<<<<<<<<
- * 
- * ctypedef npy_cdouble     complex_t
- */
-typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":733
- * ctypedef npy_clongdouble clongdouble_t
- * 
- * ctypedef npy_cdouble     complex_t             # <<<<<<<<<<<<<<
- * 
- * cdef inline object PyArray_MultiIterNew1(a):
- */
-typedef npy_cdouble __pyx_t_5numpy_complex_t;
-
-/* "madmom/ml/hmm.pyx":664
- *     @cython.wraparound(False)
- *     @cython.initializedcheck(False)
- *     def forward_generator(self, observations, block_size=None):             # <<<<<<<<<<<<<<
- *         """
- *         Compute the forward variables at each time step. Instead of computing
- */
-struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator {
-  PyObject_HEAD
-  PyObject *__pyx_v_block_size;
-  unsigned int __pyx_v_block_sz;
-  unsigned int __pyx_v_frame;
-  __Pyx_memviewslice __pyx_v_fwd_cur;
-  __Pyx_memviewslice __pyx_v_fwd_prev;
-  double __pyx_v_norm_factor;
-  unsigned int __pyx_v_num_observations;
-  unsigned int __pyx_v_num_states;
-  unsigned int __pyx_v_obs_end;
-  unsigned int __pyx_v_obs_start;
-  PyObject *__pyx_v_observations;
-  PyObject *__pyx_v_om;
-  __Pyx_memviewslice __pyx_v_om_densities;
-  __Pyx_memviewslice __pyx_v_om_pointers;
-  unsigned int __pyx_v_prev_pointer;
-  double __pyx_v_prob_sum;
-  PyObject *__pyx_v_self;
-  unsigned int __pyx_v_state;
-  PyObject *__pyx_v_tm;
-  __Pyx_memviewslice __pyx_v_tm_probabilities;
-  __Pyx_memviewslice __pyx_v_tm_ptrs;
-  __Pyx_memviewslice __pyx_v_tm_states;
-  unsigned int __pyx_t_0;
-  unsigned int __pyx_t_1;
-  unsigned int __pyx_t_2;
-};
-
-
-/* "View.MemoryView":105
- * 
- * @cname("__pyx_array")
- * cdef class array:             # <<<<<<<<<<<<<<
- * 
- *     cdef:
- */
-struct __pyx_array_obj {
-  PyObject_HEAD
-  struct __pyx_vtabstruct_array *__pyx_vtab;
-  char *data;
-  Py_ssize_t len;
-  char *format;
-  int ndim;
-  Py_ssize_t *_shape;
-  Py_ssize_t *_strides;
-  Py_ssize_t itemsize;
-  PyObject *mode;
-  PyObject *_format;
-  void (*callback_free_data)(void *);
-  int free_data;
-  int dtype_is_object;
-};
-
-
-/* "View.MemoryView":279
- * 
- * @cname('__pyx_MemviewEnum')
- * cdef class Enum(object):             # <<<<<<<<<<<<<<
- *     cdef object name
- *     def __init__(self, name):
- */
-struct __pyx_MemviewEnum_obj {
-  PyObject_HEAD
-  PyObject *name;
-};
-
-
-/* "View.MemoryView":330
- * 
- * @cname('__pyx_memoryview')
- * cdef class memoryview(object):             # <<<<<<<<<<<<<<
- * 
- *     cdef object obj
- */
-struct __pyx_memoryview_obj {
-  PyObject_HEAD
-  struct __pyx_vtabstruct_memoryview *__pyx_vtab;
-  PyObject *obj;
-  PyObject *_size;
-  PyObject *_array_interface;
-  PyThread_type_lock lock;
-  __pyx_atomic_int acquisition_count[2];
-  __pyx_atomic_int *acquisition_count_aligned_p;
-  Py_buffer view;
-  int flags;
-  int dtype_is_object;
-  __Pyx_TypeInfo *typeinfo;
-};
-
-
-/* "View.MemoryView":965
- * 
- * @cname('__pyx_memoryviewslice')
- * cdef class _memoryviewslice(memoryview):             # <<<<<<<<<<<<<<
- *     "Internal class for passing memoryview slices to Python"
- * 
- */
-struct __pyx_memoryviewslice_obj {
-  struct __pyx_memoryview_obj __pyx_base;
-  __Pyx_memviewslice from_slice;
-  PyObject *from_object;
-  PyObject *(*to_object_func)(char *);
-  int (*to_dtype_func)(char *, PyObject *);
-};
-
-
-
-/* "View.MemoryView":105
- * 
- * @cname("__pyx_array")
- * cdef class array:             # <<<<<<<<<<<<<<
- * 
- *     cdef:
- */
-
-struct __pyx_vtabstruct_array {
-  PyObject *(*get_memview)(struct __pyx_array_obj *);
-};
-static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
-
-
-/* "View.MemoryView":330
- * 
- * @cname('__pyx_memoryview')
- * cdef class memoryview(object):             # <<<<<<<<<<<<<<
- * 
- *     cdef object obj
- */
-
-struct __pyx_vtabstruct_memoryview {
-  char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
-  PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
-  PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
-  PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
-  PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
-  PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
-  PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
-};
-static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
-
-
-/* "View.MemoryView":965
- * 
- * @cname('__pyx_memoryviewslice')
- * cdef class _memoryviewslice(memoryview):             # <<<<<<<<<<<<<<
- *     "Internal class for passing memoryview slices to Python"
- * 
- */
-
-struct __pyx_vtabstruct__memoryviewslice {
-  struct __pyx_vtabstruct_memoryview __pyx_base;
-};
-static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
-
-/* --- Runtime support code (head) --- */
-/* Refnanny.proto */
-#ifndef CYTHON_REFNANNY
-  #define CYTHON_REFNANNY 0
-#endif
-#if CYTHON_REFNANNY
-  typedef struct {
-    void (*INCREF)(void*, PyObject*, int);
-    void (*DECREF)(void*, PyObject*, int);
-    void (*GOTREF)(void*, PyObject*, int);
-    void (*GIVEREF)(void*, PyObject*, int);
-    void* (*SetupContext)(const char*, int, const char*);
-    void (*FinishContext)(void**);
-  } __Pyx_RefNannyAPIStruct;
-  static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
-  static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
-  #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
-#ifdef WITH_THREAD
-  #define __Pyx_RefNannySetupContext(name, acquire_gil)\
-          if (acquire_gil) {\
-              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
-              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
-              PyGILState_Release(__pyx_gilstate_save);\
-          } else {\
-              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
-          }
-#else
-  #define __Pyx_RefNannySetupContext(name, acquire_gil)\
-          __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
-#endif
-  #define __Pyx_RefNannyFinishContext()\
-          __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
-  #define __Pyx_INCREF(r)  __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
-  #define __Pyx_DECREF(r)  __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
-  #define __Pyx_GOTREF(r)  __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
-  #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
-  #define __Pyx_XINCREF(r)  do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
-  #define __Pyx_XDECREF(r)  do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
-  #define __Pyx_XGOTREF(r)  do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
-  #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
-#else
-  #define __Pyx_RefNannyDeclarations
-  #define __Pyx_RefNannySetupContext(name, acquire_gil)
-  #define __Pyx_RefNannyFinishContext()
-  #define __Pyx_INCREF(r) Py_INCREF(r)
-  #define __Pyx_DECREF(r) Py_DECREF(r)
-  #define __Pyx_GOTREF(r)
-  #define __Pyx_GIVEREF(r)
-  #define __Pyx_XINCREF(r) Py_XINCREF(r)
-  #define __Pyx_XDECREF(r) Py_XDECREF(r)
-  #define __Pyx_XGOTREF(r)
-  #define __Pyx_XGIVEREF(r)
-#endif
-#define __Pyx_XDECREF_SET(r, v) do {\
-        PyObject *tmp = (PyObject *) r;\
-        r = v; __Pyx_XDECREF(tmp);\
-    } while (0)
-#define __Pyx_DECREF_SET(r, v) do {\
-        PyObject *tmp = (PyObject *) r;\
-        r = v; __Pyx_DECREF(tmp);\
-    } while (0)
-#define __Pyx_CLEAR(r)    do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
-#define __Pyx_XCLEAR(r)   do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
-
-/* PyObjectGetAttrStr.proto */
-#if CYTHON_USE_TYPE_SLOTS
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
-#else
-#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
-#endif
-
-/* GetBuiltinName.proto */
-static PyObject *__Pyx_GetBuiltinName(PyObject *name);
-
-/* RaiseArgTupleInvalid.proto */
-static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
-    Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
-
-/* RaiseDoubleKeywords.proto */
-static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
-
-/* ParseKeywords.proto */
-static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
-    PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
-    const char* function_name);
-
-/* PyObjectSetAttrStr.proto */
-#if CYTHON_USE_TYPE_SLOTS
-#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL)
-static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value);
-#else
-#define __Pyx_PyObject_DelAttrStr(o,n)   PyObject_DelAttr(o,n)
-#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v)
-#endif
-
-/* PyDictVersioning.proto */
-#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
-#define __PYX_DICT_VERSION_INIT  ((PY_UINT64_T) -1)
-#define __PYX_GET_DICT_VERSION(dict)  (((PyDictObject*)(dict))->ma_version_tag)
-#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
-    (version_var) = __PYX_GET_DICT_VERSION(dict);\
-    (cache_var) = (value);
-#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
-    static PY_UINT64_T __pyx_dict_version = 0;\
-    static PyObject *__pyx_dict_cached_value = NULL;\
-    if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
-        (VAR) = __pyx_dict_cached_value;\
-    } else {\
-        (VAR) = __pyx_dict_cached_value = (LOOKUP);\
-        __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
-    }\
-}
-static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
-static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
-static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
-#else
-#define __PYX_GET_DICT_VERSION(dict)  (0)
-#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
-#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP)  (VAR) = (LOOKUP);
-#endif
-
-/* GetModuleGlobalName.proto */
-#if CYTHON_USE_DICT_VERSIONS
-#define __Pyx_GetModuleGlobalName(var, name)  {\
-    static PY_UINT64_T __pyx_dict_version = 0;\
-    static PyObject *__pyx_dict_cached_value = NULL;\
-    (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
-        (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
-        __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
-}
-#define __Pyx_GetModuleGlobalNameUncached(var, name)  {\
-    PY_UINT64_T __pyx_dict_version;\
-    PyObject *__pyx_dict_cached_value;\
-    (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
-}
-static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
-#else
-#define __Pyx_GetModuleGlobalName(var, name)  (var) = __Pyx__GetModuleGlobalName(name)
-#define __Pyx_GetModuleGlobalNameUncached(var, name)  (var) = __Pyx__GetModuleGlobalName(name)
-static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
-#endif
-
-/* PyCFunctionFastCall.proto */
-#if CYTHON_FAST_PYCCALL
-static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
-#else
-#define __Pyx_PyCFunction_FastCall(func, args, nargs)  (assert(0), NULL)
-#endif
-
-/* PyFunctionFastCall.proto */
-#if CYTHON_FAST_PYCALL
-#define __Pyx_PyFunction_FastCall(func, args, nargs)\
-    __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
-#if 1 || PY_VERSION_HEX < 0x030600B1
-static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
-#else
-#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
-#endif
-#define __Pyx_BUILD_ASSERT_EXPR(cond)\
-    (sizeof(char [1 - 2*!(cond)]) - 1)
-#ifndef Py_MEMBER_SIZE
-#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
-#endif
-  static size_t __pyx_pyframe_localsplus_offset = 0;
-  #include "frameobject.h"
-  #define __Pxy_PyFrame_Initialize_Offsets()\
-    ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
-     (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
-  #define __Pyx_PyFrame_GetLocalsplus(frame)\
-    (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
-#endif
-
-/* PyObjectCall.proto */
-#if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
-#else
-#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
-#endif
-
-/* PyObjectCall2Args.proto */
-static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
-
-/* PyObjectCallMethO.proto */
-#if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
-#endif
-
-/* PyObjectCallOneArg.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
-
-/* Import.proto */
-static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
-
-/* ImportFrom.proto */
-static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
-
-/* PyObjectCallNoArg.proto */
-#if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
-#else
-#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
-#endif
-
-/* RaiseTooManyValuesToUnpack.proto */
-static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
-
-/* RaiseNeedMoreValuesToUnpack.proto */
-static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
-
-/* IterFinish.proto */
-static CYTHON_INLINE int __Pyx_IterFinish(void);
-
-/* UnpackItemEndCheck.proto */
-static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected);
-
-/* PyThreadStateGet.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_PyThreadState_declare  PyThreadState *__pyx_tstate;
-#define __Pyx_PyThreadState_assign  __pyx_tstate = __Pyx_PyThreadState_Current;
-#define __Pyx_PyErr_Occurred()  __pyx_tstate->curexc_type
-#else
-#define __Pyx_PyThreadState_declare
-#define __Pyx_PyThreadState_assign
-#define __Pyx_PyErr_Occurred()  PyErr_Occurred()
-#endif
-
-/* PyErrFetchRestore.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
-#define __Pyx_ErrRestoreWithState(type, value, tb)  __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
-#define __Pyx_ErrFetchWithState(type, value, tb)    __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
-#define __Pyx_ErrRestore(type, value, tb)  __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
-#define __Pyx_ErrFetch(type, value, tb)    __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
-static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
-static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
-#if CYTHON_COMPILING_IN_CPYTHON
-#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
-#else
-#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
-#endif
-#else
-#define __Pyx_PyErr_Clear() PyErr_Clear()
-#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
-#define __Pyx_ErrRestoreWithState(type, value, tb)  PyErr_Restore(type, value, tb)
-#define __Pyx_ErrFetchWithState(type, value, tb)  PyErr_Fetch(type, value, tb)
-#define __Pyx_ErrRestoreInState(tstate, type, value, tb)  PyErr_Restore(type, value, tb)
-#define __Pyx_ErrFetchInState(tstate, type, value, tb)  PyErr_Fetch(type, value, tb)
-#define __Pyx_ErrRestore(type, value, tb)  PyErr_Restore(type, value, tb)
-#define __Pyx_ErrFetch(type, value, tb)  PyErr_Fetch(type, value, tb)
-#endif
-
-/* RaiseException.proto */
-static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
-
-/* PyIntBinop.proto */
-#if !CYTHON_COMPILING_IN_PYPY
-static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
-#else
-#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
-    (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
-#endif
-
-/* GetItemInt.proto */
-#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
-    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
-    __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
-    (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
-               __Pyx_GetItemInt_Generic(o, to_py_func(i))))
-#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
-    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
-    __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
-    (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
-                                                              int wraparound, int boundscheck);
-#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
-    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
-    __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
-    (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
-                                                              int wraparound, int boundscheck);
-static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
-                                                     int is_list, int wraparound, int boundscheck);
-
-/* ObjectGetItem.proto */
-#if CYTHON_USE_TYPE_SLOTS
-static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
-#else
-#define __Pyx_PyObject_GetItem(obj, key)  PyObject_GetItem(obj, key)
-#endif
-
-/* SetItemInt.proto */
-#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
-    (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
-    __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\
-    (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\
-               __Pyx_SetItemInt_Generic(o, to_py_func(i), v)))
-static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v);
-static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v,
-                                               int is_list, int wraparound, int boundscheck);
-
-/* MemviewSliceInit.proto */
-#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
-#define __Pyx_MEMVIEW_DIRECT   1
-#define __Pyx_MEMVIEW_PTR      2
-#define __Pyx_MEMVIEW_FULL     4
-#define __Pyx_MEMVIEW_CONTIG   8
-#define __Pyx_MEMVIEW_STRIDED  16
-#define __Pyx_MEMVIEW_FOLLOW   32
-#define __Pyx_IS_C_CONTIG 1
-#define __Pyx_IS_F_CONTIG 2
-static int __Pyx_init_memviewslice(
-                struct __pyx_memoryview_obj *memview,
-                int ndim,
-                __Pyx_memviewslice *memviewslice,
-                int memview_is_new_reference);
-static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
-    __pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
-static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
-    __pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
-#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
-#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
-#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
-#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
-static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
-static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
-
-/* SliceObject.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
-        PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
-        PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
-        int has_cstart, int has_cstop, int wraparound);
-
-/* GetTopmostException.proto */
-#if CYTHON_USE_EXC_INFO_STACK
-static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
-#endif
-
-/* SaveResetException.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_ExceptionSave(type, value, tb)  __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
-static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
-#define __Pyx_ExceptionReset(type, value, tb)  __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
-static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
-#else
-#define __Pyx_ExceptionSave(type, value, tb)   PyErr_GetExcInfo(type, value, tb)
-#define __Pyx_ExceptionReset(type, value, tb)  PyErr_SetExcInfo(type, value, tb)
-#endif
-
-/* PyErrExceptionMatches.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
-static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
-#else
-#define __Pyx_PyErr_ExceptionMatches(err)  PyErr_ExceptionMatches(err)
-#endif
-
-/* GetException.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_GetException(type, value, tb)  __Pyx__GetException(__pyx_tstate, type, value, tb)
-static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
-#else
-static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
-#endif
-
-/* ArgTypeTest.proto */
-#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
-    ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
-        __Pyx__ArgTypeTest(obj, type, name, exact))
-static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
-
-/* IncludeStringH.proto */
-#include <string.h>
-
-/* BytesEquals.proto */
-static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
-
-/* UnicodeEquals.proto */
-static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
-
-/* StrEquals.proto */
-#if PY_MAJOR_VERSION >= 3
-#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
-#else
-#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
-#endif
-
-/* None.proto */
-static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
-
-/* UnaryNegOverflows.proto */
-#define UNARY_NEG_WOULD_OVERFLOW(x)\
-        (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
-
-static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
-static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
-/* GetAttr.proto */
-static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
-
-/* decode_c_string_utf16.proto */
-static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
-    int byteorder = 0;
-    return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
-}
-static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
-    int byteorder = -1;
-    return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
-}
-static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
-    int byteorder = 1;
-    return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
-}
-
-/* decode_c_string.proto */
-static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
-         const char* cstring, Py_ssize_t start, Py_ssize_t stop,
-         const char* encoding, const char* errors,
-         PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
-
-/* GetAttr3.proto */
-static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
-
-/* RaiseNoneIterError.proto */
-static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
-
-/* ExtTypeTest.proto */
-static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
-
-/* SwapException.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_ExceptionSwap(type, value, tb)  __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
-static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
-#else
-static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
-#endif
-
-/* FastTypeChecks.proto */
-#if CYTHON_COMPILING_IN_CPYTHON
-#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
-static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
-static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
-static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
-#else
-#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
-#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
-#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
-#endif
-#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
-
-static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
-/* ListCompAppend.proto */
-#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
-static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
-    PyListObject* L = (PyListObject*) list;
-    Py_ssize_t len = Py_SIZE(list);
-    if (likely(L->allocated > len)) {
-        Py_INCREF(x);
-        PyList_SET_ITEM(list, len, x);
-        __Pyx_SET_SIZE(list, len + 1);
-        return 0;
-    }
-    return PyList_Append(list, x);
-}
-#else
-#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
-#endif
-
-/* ListExtend.proto */
-static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
-#if CYTHON_COMPILING_IN_CPYTHON
-    PyObject* none = _PyList_Extend((PyListObject*)L, v);
-    if (unlikely(!none))
-        return -1;
-    Py_DECREF(none);
-    return 0;
-#else
-    return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
-#endif
-}
-
-/* ListAppend.proto */
-#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
-static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
-    PyListObject* L = (PyListObject*) list;
-    Py_ssize_t len = Py_SIZE(list);
-    if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
-        Py_INCREF(x);
-        PyList_SET_ITEM(list, len, x);
-        __Pyx_SET_SIZE(list, len + 1);
-        return 0;
-    }
-    return PyList_Append(list, x);
-}
-#else
-#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
-#endif
-
-/* None.proto */
-static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
-
-/* None.proto */
-static CYTHON_INLINE long __Pyx_div_long(long, long);
-
-/* HasAttr.proto */
-static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
-
-/* PyObject_GenericGetAttrNoDict.proto */
-#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
-#else
-#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
-#endif
-
-/* PyObject_GenericGetAttr.proto */
-#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
-static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
-#else
-#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
-#endif
-
-/* SetVTable.proto */
-static int __Pyx_SetVtable(PyObject *dict, void *vtable);
-
-/* PyObjectGetAttrStrNoError.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
-
-/* SetupReduce.proto */
-static int __Pyx_setup_reduce(PyObject* type_obj);
-
-/* TypeImport.proto */
-#ifndef __PYX_HAVE_RT_ImportType_proto
-#define __PYX_HAVE_RT_ImportType_proto
-enum __Pyx_ImportType_CheckSize {
-   __Pyx_ImportType_CheckSize_Error = 0,
-   __Pyx_ImportType_CheckSize_Warn = 1,
-   __Pyx_ImportType_CheckSize_Ignore = 2
-};
-static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
-#endif
-
-/* CalculateMetaclass.proto */
-static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases);
-
-/* FetchCommonType.proto */
-static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type);
-
-/* CythonFunctionShared.proto */
-#define __Pyx_CyFunction_USED 1
-#define __Pyx_CYFUNCTION_STATICMETHOD  0x01
-#define __Pyx_CYFUNCTION_CLASSMETHOD   0x02
-#define __Pyx_CYFUNCTION_CCLASS        0x04
-#define __Pyx_CyFunction_GetClosure(f)\
-    (((__pyx_CyFunctionObject *) (f))->func_closure)
-#define __Pyx_CyFunction_GetClassObj(f)\
-    (((__pyx_CyFunctionObject *) (f))->func_classobj)
-#define __Pyx_CyFunction_Defaults(type, f)\
-    ((type *)(((__pyx_CyFunctionObject *) (f))->defaults))
-#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\
-    ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g)
-typedef struct {
-    PyCFunctionObject func;
-#if PY_VERSION_HEX < 0x030500A0
-    PyObject *func_weakreflist;
-#endif
-    PyObject *func_dict;
-    PyObject *func_name;
-    PyObject *func_qualname;
-    PyObject *func_doc;
-    PyObject *func_globals;
-    PyObject *func_code;
-    PyObject *func_closure;
-    PyObject *func_classobj;
-    void *defaults;
-    int defaults_pyobjects;
-    size_t defaults_size;  // used by FusedFunction for copying defaults
-    int flags;
-    PyObject *defaults_tuple;
-    PyObject *defaults_kwdict;
-    PyObject *(*defaults_getter)(PyObject *);
-    PyObject *func_annotations;
-} __pyx_CyFunctionObject;
-static PyTypeObject *__pyx_CyFunctionType = 0;
-#define __Pyx_CyFunction_Check(obj)  (__Pyx_TypeCheck(obj, __pyx_CyFunctionType))
-static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml,
-                                      int flags, PyObject* qualname,
-                                      PyObject *self,
-                                      PyObject *module, PyObject *globals,
-                                      PyObject* code);
-static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m,
-                                                         size_t size,
-                                                         int pyobjects);
-static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m,
-                                                            PyObject *tuple);
-static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m,
-                                                             PyObject *dict);
-static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m,
-                                                              PyObject *dict);
-static int __pyx_CyFunction_init(void);
-
-/* CythonFunction.proto */
-static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml,
-                                      int flags, PyObject* qualname,
-                                      PyObject *closure,
-                                      PyObject *module, PyObject *globals,
-                                      PyObject* code);
-
-/* SetNameInClass.proto */
-#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
-#define __Pyx_SetNameInClass(ns, name, value)\
-    (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value))
-#elif CYTHON_COMPILING_IN_CPYTHON
-#define __Pyx_SetNameInClass(ns, name, value)\
-    (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value))
-#else
-#define __Pyx_SetNameInClass(ns, name, value)  PyObject_SetItem(ns, name, value)
-#endif
-
-/* ClassMethod.proto */
-#include "descrobject.h"
-static CYTHON_UNUSED PyObject* __Pyx_Method_ClassMethod(PyObject *method);
-
-/* Py3ClassCreate.proto */
-static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname,
-                                           PyObject *mkw, PyObject *modname, PyObject *doc);
-static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict,
-                                      PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass);
-
-/* CLineInTraceback.proto */
-#ifdef CYTHON_CLINE_IN_TRACEBACK
-#define __Pyx_CLineForTraceback(tstate, c_line)  (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
-#else
-static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
-#endif
-
-/* CodeObjectCache.proto */
-typedef struct {
-    PyCodeObject* code_object;
-    int code_line;
-} __Pyx_CodeObjectCacheEntry;
-struct __Pyx_CodeObjectCache {
-    int count;
-    int max_count;
-    __Pyx_CodeObjectCacheEntry* entries;
-};
-static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
-static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
-static PyCodeObject *__pyx_find_code_object(int code_line);
-static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
-
-/* AddTraceback.proto */
-static void __Pyx_AddTraceback(const char *funcname, int c_line,
-                               int py_line, const char *filename);
-
-#if PY_MAJOR_VERSION < 3
-    static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
-    static void __Pyx_ReleaseBuffer(Py_buffer *view);
-#else
-    #define __Pyx_GetBuffer PyObject_GetBuffer
-    #define __Pyx_ReleaseBuffer PyBuffer_Release
-#endif
-
-
-/* BufferStructDeclare.proto */
-typedef struct {
-  Py_ssize_t shape, strides, suboffsets;
-} __Pyx_Buf_DimInfo;
-typedef struct {
-  size_t refcount;
-  Py_buffer pybuffer;
-} __Pyx_Buffer;
-typedef struct {
-  __Pyx_Buffer *rcbuffer;
-  char *data;
-  __Pyx_Buf_DimInfo diminfo[8];
-} __Pyx_LocalBuf_ND;
-
-/* MemviewSliceIsContig.proto */
-static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
-
-/* OverlappingSlices.proto */
-static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
-                                __Pyx_memviewslice *slice2,
-                                int ndim, size_t itemsize);
-
-/* Capsule.proto */
-static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
-
-/* GCCDiagnostics.proto */
-#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
-#define __Pyx_HAS_GCC_DIAGNOSTIC
-#endif
-
-/* MemviewDtypeToObject.proto */
-static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp);
-static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj);
-
-/* RealImag.proto */
-#if CYTHON_CCOMPLEX
-  #ifdef __cplusplus
-    #define __Pyx_CREAL(z) ((z).real())
-    #define __Pyx_CIMAG(z) ((z).imag())
-  #else
-    #define __Pyx_CREAL(z) (__real__(z))
-    #define __Pyx_CIMAG(z) (__imag__(z))
-  #endif
-#else
-    #define __Pyx_CREAL(z) ((z).real)
-    #define __Pyx_CIMAG(z) ((z).imag)
-#endif
-#if defined(__cplusplus) && CYTHON_CCOMPLEX\
-        && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
-    #define __Pyx_SET_CREAL(z,x) ((z).real(x))
-    #define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
-#else
-    #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
-    #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
-#endif
-
-/* Arithmetic.proto */
-#if CYTHON_CCOMPLEX
-    #define __Pyx_c_eq_float(a, b)   ((a)==(b))
-    #define __Pyx_c_sum_float(a, b)  ((a)+(b))
-    #define __Pyx_c_diff_float(a, b) ((a)-(b))
-    #define __Pyx_c_prod_float(a, b) ((a)*(b))
-    #define __Pyx_c_quot_float(a, b) ((a)/(b))
-    #define __Pyx_c_neg_float(a)     (-(a))
-  #ifdef __cplusplus
-    #define __Pyx_c_is_zero_float(z) ((z)==(float)0)
-    #define __Pyx_c_conj_float(z)    (::std::conj(z))
-    #if 1
-        #define __Pyx_c_abs_float(z)     (::std::abs(z))
-        #define __Pyx_c_pow_float(a, b)  (::std::pow(a, b))
-    #endif
-  #else
-    #define __Pyx_c_is_zero_float(z) ((z)==0)
-    #define __Pyx_c_conj_float(z)    (conjf(z))
-    #if 1
-        #define __Pyx_c_abs_float(z)     (cabsf(z))
-        #define __Pyx_c_pow_float(a, b)  (cpowf(a, b))
-    #endif
- #endif
-#else
-    static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex);
-    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex);
-    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex);
-    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex);
-    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex);
-    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex);
-    static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex);
-    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex);
-    #if 1
-        static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex);
-        static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex);
-    #endif
-#endif
-
-/* Arithmetic.proto */
-#if CYTHON_CCOMPLEX
-    #define __Pyx_c_eq_double(a, b)   ((a)==(b))
-    #define __Pyx_c_sum_double(a, b)  ((a)+(b))
-    #define __Pyx_c_diff_double(a, b) ((a)-(b))
-    #define __Pyx_c_prod_double(a, b) ((a)*(b))
-    #define __Pyx_c_quot_double(a, b) ((a)/(b))
-    #define __Pyx_c_neg_double(a)     (-(a))
-  #ifdef __cplusplus
-    #define __Pyx_c_is_zero_double(z) ((z)==(double)0)
-    #define __Pyx_c_conj_double(z)    (::std::conj(z))
-    #if 1
-        #define __Pyx_c_abs_double(z)     (::std::abs(z))
-        #define __Pyx_c_pow_double(a, b)  (::std::pow(a, b))
-    #endif
-  #else
-    #define __Pyx_c_is_zero_double(z) ((z)==0)
-    #define __Pyx_c_conj_double(z)    (conj(z))
-    #if 1
-        #define __Pyx_c_abs_double(z)     (cabs(z))
-        #define __Pyx_c_pow_double(a, b)  (cpow(a, b))
-    #endif
- #endif
-#else
-    static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex);
-    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex);
-    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex);
-    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex);
-    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex);
-    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex);
-    static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex);
-    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex);
-    #if 1
-        static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex);
-        static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex);
-    #endif
-#endif
-
-/* MemviewSliceCopyTemplate.proto */
-static __Pyx_memviewslice
-__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
-                                 const char *mode, int ndim,
-                                 size_t sizeof_dtype, int contig_flag,
-                                 int dtype_is_object);
-
-/* CIntToPy.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
-
-/* CIntFromPy.proto */
-static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *);
-
-/* CIntToPy.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value);
-
-/* CIntToPy.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_uint32(npy_uint32 value);
-
-/* CIntFromPy.proto */
-static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
-
-/* CIntFromPy.proto */
-static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
-
-/* CIntToPy.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
-
-/* CIntFromPy.proto */
-static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
-
-/* IsLittleEndian.proto */
-static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
-
-/* BufferFormatCheck.proto */
-static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
-static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
-                              __Pyx_BufFmt_StackElem* stack,
-                              __Pyx_TypeInfo* type);
-
-/* TypeInfoCompare.proto */
-static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
-
-/* MemviewSliceValidateAndInit.proto */
-static int __Pyx_ValidateAndInit_memviewslice(
-                int *axes_specs,
-                int c_or_f_flag,
-                int buf_flags,
-                int ndim,
-                __Pyx_TypeInfo *dtype,
-                __Pyx_BufFmt_StackElem stack[],
-                __Pyx_memviewslice *memviewslice,
-                PyObject *original_obj);
-
-/* ObjectToMemviewSlice.proto */
-static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(PyObject *, int writable_flag);
-
-/* ObjectToMemviewSlice.proto */
-static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *, int writable_flag);
-
-/* ObjectToMemviewSlice.proto */
-static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag);
-
-/* ObjectToMemviewSlice.proto */
-static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(PyObject *, int writable_flag);
-
-/* PyObjectGetMethod.proto */
-static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);
-
-/* PyObjectCallMethod1.proto */
-static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg);
-
-/* CoroutineBase.proto */
-typedef PyObject *(*__pyx_coroutine_body_t)(PyObject *, PyThreadState *, PyObject *);
-#if CYTHON_USE_EXC_INFO_STACK
-#define __Pyx_ExcInfoStruct  _PyErr_StackItem
-#else
-typedef struct {
-    PyObject *exc_type;
-    PyObject *exc_value;
-    PyObject *exc_traceback;
-} __Pyx_ExcInfoStruct;
-#endif
-typedef struct {
-    PyObject_HEAD
-    __pyx_coroutine_body_t body;
-    PyObject *closure;
-    __Pyx_ExcInfoStruct gi_exc_state;
-    PyObject *gi_weakreflist;
-    PyObject *classobj;
-    PyObject *yieldfrom;
-    PyObject *gi_name;
-    PyObject *gi_qualname;
-    PyObject *gi_modulename;
-    PyObject *gi_code;
-    int resume_label;
-    char is_running;
-} __pyx_CoroutineObject;
-static __pyx_CoroutineObject *__Pyx__Coroutine_New(
-    PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
-    PyObject *name, PyObject *qualname, PyObject *module_name);
-static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit(
-            __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
-            PyObject *name, PyObject *qualname, PyObject *module_name);
-static CYTHON_INLINE void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *self);
-static int __Pyx_Coroutine_clear(PyObject *self);
-static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value);
-static PyObject *__Pyx_Coroutine_Close(PyObject *self);
-static PyObject *__Pyx_Coroutine_Throw(PyObject *gen, PyObject *args);
-#if CYTHON_USE_EXC_INFO_STACK
-#define __Pyx_Coroutine_SwapException(self)
-#define __Pyx_Coroutine_ResetAndClearException(self)  __Pyx_Coroutine_ExceptionClear(&(self)->gi_exc_state)
-#else
-#define __Pyx_Coroutine_SwapException(self) {\
-    __Pyx_ExceptionSwap(&(self)->gi_exc_state.exc_type, &(self)->gi_exc_state.exc_value, &(self)->gi_exc_state.exc_traceback);\
-    __Pyx_Coroutine_ResetFrameBackpointer(&(self)->gi_exc_state);\
-    }
-#define __Pyx_Coroutine_ResetAndClearException(self) {\
-    __Pyx_ExceptionReset((self)->gi_exc_state.exc_type, (self)->gi_exc_state.exc_value, (self)->gi_exc_state.exc_traceback);\
-    (self)->gi_exc_state.exc_type = (self)->gi_exc_state.exc_value = (self)->gi_exc_state.exc_traceback = NULL;\
-    }
-#endif
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_PyGen_FetchStopIterationValue(pvalue)\
-    __Pyx_PyGen__FetchStopIterationValue(__pyx_tstate, pvalue)
-#else
-#define __Pyx_PyGen_FetchStopIterationValue(pvalue)\
-    __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, pvalue)
-#endif
-static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *tstate, PyObject **pvalue);
-static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state);
-
-/* PatchModuleWithCoroutine.proto */
-static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code);
-
-/* PatchGeneratorABC.proto */
-static int __Pyx_patch_abc(void);
-
-/* Generator.proto */
-#define __Pyx_Generator_USED
-static PyTypeObject *__pyx_GeneratorType = 0;
-#define __Pyx_Generator_CheckExact(obj) (Py_TYPE(obj) == __pyx_GeneratorType)
-#define __Pyx_Generator_New(body, code, closure, name, qualname, module_name)\
-    __Pyx__Coroutine_New(__pyx_GeneratorType, body, code, closure, name, qualname, module_name)
-static PyObject *__Pyx_Generator_Next(PyObject *self);
-static int __pyx_Generator_init(void);
-
-/* CheckBinaryVersion.proto */
-static int __Pyx_check_binary_version(void);
-
-/* InitStrings.proto */
-static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
-
-static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
-static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
-static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
-static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
-static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
-static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
-static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
-static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
-static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
-static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
-
-/* Module declarations from 'cpython.buffer' */
-
-/* Module declarations from 'libc.string' */
-
-/* Module declarations from 'libc.stdio' */
-
-/* Module declarations from '__builtin__' */
-
-/* Module declarations from 'cpython.type' */
-static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
-
-/* Module declarations from 'cpython' */
-
-/* Module declarations from 'cpython.object' */
-
-/* Module declarations from 'cpython.ref' */
-
-/* Module declarations from 'cpython.mem' */
-
-/* Module declarations from 'numpy' */
-
-/* Module declarations from 'numpy' */
-static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
-static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
-static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
-static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
-static PyTypeObject *__pyx_ptype_5numpy_generic = 0;
-static PyTypeObject *__pyx_ptype_5numpy_number = 0;
-static PyTypeObject *__pyx_ptype_5numpy_integer = 0;
-static PyTypeObject *__pyx_ptype_5numpy_signedinteger = 0;
-static PyTypeObject *__pyx_ptype_5numpy_unsignedinteger = 0;
-static PyTypeObject *__pyx_ptype_5numpy_inexact = 0;
-static PyTypeObject *__pyx_ptype_5numpy_floating = 0;
-static PyTypeObject *__pyx_ptype_5numpy_complexfloating = 0;
-static PyTypeObject *__pyx_ptype_5numpy_flexible = 0;
-static PyTypeObject *__pyx_ptype_5numpy_character = 0;
-static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
-
-/* Module declarations from 'cython.view' */
-
-/* Module declarations from 'cython' */
-
-/* Module declarations from 'numpy.math' */
-
-/* Module declarations from 'madmom.ml.hmm' */
-static PyTypeObject *__pyx_ptype_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator = 0;
-static PyTypeObject *__pyx_array_type = 0;
-static PyTypeObject *__pyx_MemviewEnum_type = 0;
-static PyTypeObject *__pyx_memoryview_type = 0;
-static PyTypeObject *__pyx_memoryviewslice_type = 0;
-static PyObject *generic = 0;
-static PyObject *strided = 0;
-static PyObject *indirect = 0;
-static PyObject *contiguous = 0;
-static PyObject *indirect_contiguous = 0;
-static int __pyx_memoryview_thread_locks_used;
-static PyThread_type_lock __pyx_memoryview_thread_locks[8];
-static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
-static void *__pyx_align_pointer(void *, size_t); /*proto*/
-static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
-static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
-static PyObject *_unellipsify(PyObject *, int); /*proto*/
-static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
-static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
-static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
-static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
-static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
-static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
-static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
-static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
-static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
-static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
-static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
-static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
-static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
-static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
-static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
-static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
-static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
-static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
-static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
-static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
-static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
-static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
-static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
-static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
-static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
-static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
-static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
-static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
-static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_6madmom_2ml_3hmm_uint32_t = { "uint32_t", NULL, sizeof(__pyx_t_6madmom_2ml_3hmm_uint32_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_6madmom_2ml_3hmm_uint32_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_6madmom_2ml_3hmm_uint32_t), 0 };
-static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
-#define __Pyx_MODULE_NAME "madmom.ml.hmm"
-extern int __pyx_module_is_main_madmom__ml__hmm;
-int __pyx_module_is_main_madmom__ml__hmm = 0;
-
-/* Implementation of 'madmom.ml.hmm' */
-static PyObject *__pyx_builtin_object;
-static PyObject *__pyx_builtin_property;
-static PyObject *__pyx_builtin_staticmethod;
-static PyObject *__pyx_builtin_ValueError;
-static PyObject *__pyx_builtin_max;
-static PyObject *__pyx_builtin_NotImplementedError;
-static PyObject *__pyx_builtin_super;
-static PyObject *__pyx_builtin_range;
-static PyObject *__pyx_builtin_RuntimeWarning;
-static PyObject *__pyx_builtin_ImportError;
-static PyObject *__pyx_builtin_MemoryError;
-static PyObject *__pyx_builtin_enumerate;
-static PyObject *__pyx_builtin_TypeError;
-static PyObject *__pyx_builtin_Ellipsis;
-static PyObject *__pyx_builtin_id;
-static PyObject *__pyx_builtin_IndexError;
-static const char __pyx_k_O[] = "O";
-static const char __pyx_k_T[] = "T";
-static const char __pyx_k_c[] = "c";
-static const char __pyx_k_id[] = "id";
-static const char __pyx_k_np[] = "np";
-static const char __pyx_k_om[] = "om";
-static const char __pyx_k_tm[] = "tm";
-static const char __pyx_k_HMM[] = "HMM";
-static const char __pyx_k_cls[] = "cls";
-static const char __pyx_k_doc[] = "__doc__";
-static const char __pyx_k_exp[] = "exp";
-static const char __pyx_k_fwd[] = "fwd";
-static const char __pyx_k_int[] = "int";
-static const char __pyx_k_log[] = "log";
-static const char __pyx_k_max[] = "max";
-static const char __pyx_k_new[] = "__new__";
-static const char __pyx_k_obj[] = "obj";
-static const char __pyx_k_pop[] = "pop";
-static const char __pyx_k_sum[] = "sum";
-static const char __pyx_k_args[] = "args";
-static const char __pyx_k_axis[] = "axis";
-static const char __pyx_k_base[] = "base";
-static const char __pyx_k_copy[] = "copy";
-static const char __pyx_k_data[] = "data";
-static const char __pyx_k_dict[] = "__dict__";
-static const char __pyx_k_init[] = "__init__";
-static const char __pyx_k_main[] = "__main__";
-static const char __pyx_k_mode[] = "mode";
-static const char __pyx_k_name[] = "name";
-static const char __pyx_k_ndim[] = "ndim";
-static const char __pyx_k_ones[] = "ones";
-static const char __pyx_k_pack[] = "pack";
-static const char __pyx_k_path[] = "path";
-static const char __pyx_k_prev[] = "_prev";
-static const char __pyx_k_self[] = "self";
-static const char __pyx_k_send[] = "send";
-static const char __pyx_k_size[] = "size";
-static const char __pyx_k_step[] = "step";
-static const char __pyx_k_stop[] = "stop";
-static const char __pyx_k_test[] = "__test__";
-static const char __pyx_k_warn[] = "warn";
-static const char __pyx_k_ASCII[] = "ASCII";
-static const char __pyx_k_array[] = "array";
-static const char __pyx_k_class[] = "__class__";
-static const char __pyx_k_close[] = "close";
-static const char __pyx_k_dtype[] = "dtype";
-static const char __pyx_k_empty[] = "empty";
-static const char __pyx_k_error[] = "error";
-static const char __pyx_k_flags[] = "flags";
-static const char __pyx_k_float[] = "float";
-static const char __pyx_k_frame[] = "frame";
-static const char __pyx_k_isinf[] = "isinf";
-static const char __pyx_k_numpy[] = "numpy";
-static const char __pyx_k_range[] = "range";
-static const char __pyx_k_reset[] = "reset";
-static const char __pyx_k_shape[] = "shape";
-static const char __pyx_k_start[] = "start";
-static const char __pyx_k_state[] = "state";
-static const char __pyx_k_super[] = "super";
-static const char __pyx_k_throw[] = "throw";
-static const char __pyx_k_zeros[] = "zeros";
-static const char __pyx_k_arange[] = "arange";
-static const char __pyx_k_argmax[] = "argmax";
-static const char __pyx_k_astype[] = "astype";
-static const char __pyx_k_encode[] = "encode";
-static const char __pyx_k_format[] = "format";
-static const char __pyx_k_import[] = "__import__";
-static const char __pyx_k_indptr[] = "indptr";
-static const char __pyx_k_module[] = "__module__";
-static const char __pyx_k_name_2[] = "__name__";
-static const char __pyx_k_object[] = "object";
-static const char __pyx_k_pickle[] = "pickle";
-static const char __pyx_k_reduce[] = "__reduce__";
-static const char __pyx_k_states[] = "states";
-static const char __pyx_k_struct[] = "struct";
-static const char __pyx_k_uint32[] = "uint32";
-static const char __pyx_k_unpack[] = "unpack";
-static const char __pyx_k_update[] = "update";
-static const char __pyx_k_asarray[] = "asarray";
-static const char __pyx_k_density[] = "density";
-static const char __pyx_k_fortran[] = "fortran";
-static const char __pyx_k_forward[] = "forward";
-static const char __pyx_k_fwd_cur[] = "fwd_cur";
-static const char __pyx_k_indices[] = "indices";
-static const char __pyx_k_memview[] = "memview";
-static const char __pyx_k_nonzero[] = "nonzero";
-static const char __pyx_k_obs_end[] = "obs_end";
-static const char __pyx_k_pointer[] = "pointer";
-static const char __pyx_k_prepare[] = "__prepare__";
-static const char __pyx_k_tm_ptrs[] = "tm_ptrs";
-static const char __pyx_k_viterbi[] = "viterbi";
-static const char __pyx_k_weights[] = "weights";
-static const char __pyx_k_Ellipsis[] = "Ellipsis";
-static const char __pyx_k_allclose[] = "allclose";
-static const char __pyx_k_bincount[] = "bincount";
-static const char __pyx_k_block_sz[] = "block_sz";
-static const char __pyx_k_fwd_prev[] = "fwd_prev";
-static const char __pyx_k_getstate[] = "__getstate__";
-static const char __pyx_k_itemsize[] = "itemsize";
-static const char __pyx_k_pointers[] = "pointers";
-static const char __pyx_k_prob_sum[] = "prob_sum";
-static const char __pyx_k_property[] = "property";
-static const char __pyx_k_pyx_type[] = "__pyx_type";
-static const char __pyx_k_qualname[] = "__qualname__";
-static const char __pyx_k_setstate[] = "__setstate__";
-static const char __pyx_k_warnings[] = "warnings";
-static const char __pyx_k_TypeError[] = "TypeError";
-static const char __pyx_k_densities[] = "densities";
-static const char __pyx_k_enumerate[] = "enumerate";
-static const char __pyx_k_metaclass[] = "__metaclass__";
-static const char __pyx_k_obs_start[] = "obs_start";
-static const char __pyx_k_pyx_state[] = "__pyx_state";
-static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
-static const char __pyx_k_tm_states[] = "tm_states";
-static const char __pyx_k_IndexError[] = "IndexError";
-static const char __pyx_k_ValueError[] = "ValueError";
-static const char __pyx_k_block_size[] = "block_size";
-static const char __pyx_k_csr_matrix[] = "csr_matrix";
-static const char __pyx_k_from_dense[] = "from_dense";
-static const char __pyx_k_make_dense[] = "make_dense";
-static const char __pyx_k_num_states[] = "num_states";
-static const char __pyx_k_prev_state[] = "prev_state";
-static const char __pyx_k_pyx_result[] = "__pyx_result";
-static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
-static const char __pyx_k_ImportError[] = "ImportError";
-static const char __pyx_k_MemoryError[] = "MemoryError";
-static const char __pyx_k_PickleError[] = "PickleError";
-static const char __pyx_k_bt_pointers[] = "bt_pointers";
-static const char __pyx_k_make_sparse[] = "make_sparse";
-static const char __pyx_k_norm_factor[] = "norm_factor";
-static const char __pyx_k_om_pointers[] = "om_pointers";
-static const char __pyx_k_prev_states[] = "prev_states";
-static const char __pyx_k_tm_pointers[] = "tm_pointers";
-static const char __pyx_k_transitions[] = "transitions";
-static const char __pyx_k_observations[] = "observations";
-static const char __pyx_k_om_densities[] = "om_densities";
-static const char __pyx_k_prev_pointer[] = "prev_pointer";
-static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
-static const char __pyx_k_scipy_sparse[] = "scipy.sparse";
-static const char __pyx_k_staticmethod[] = "staticmethod";
-static const char __pyx_k_stringsource[] = "stringsource";
-static const char __pyx_k_log_densities[] = "log_densities";
-static const char __pyx_k_madmom_ml_hmm[] = "madmom.ml.hmm";
-static const char __pyx_k_probabilities[] = "probabilities";
-static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
-static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
-static const char __pyx_k_RuntimeWarning[] = "RuntimeWarning";
-static const char __pyx_k_TransitionModel[] = "TransitionModel";
-static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
-static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
-static const char __pyx_k_current_viterbi[] = "current_viterbi";
-static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
-static const char __pyx_k_log_probability[] = "log_probability";
-static const char __pyx_k_num_transitions[] = "num_transitions";
-static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
-static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
-static const char __pyx_k_transition_prob[] = "transition_prob";
-static const char __pyx_k_ObservationModel[] = "ObservationModel";
-static const char __pyx_k_num_observations[] = "num_observations";
-static const char __pyx_k_previous_viterbi[] = "previous_viterbi";
-static const char __pyx_k_tm_probabilities[] = "tm_probabilities";
-static const char __pyx_k_transition_model[] = "transition_model";
-static const char __pyx_k_HiddenMarkovModel[] = "HiddenMarkovModel";
-static const char __pyx_k_forward_generator[] = "forward_generator";
-static const char __pyx_k_log_probabilities[] = "log_probabilities";
-static const char __pyx_k_madmom_ml_hmm_pyx[] = "madmom/ml/hmm.pyx";
-static const char __pyx_k_observation_model[] = "observation_model";
-static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
-static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
-static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
-static const char __pyx_k_NotImplementedError[] = "NotImplementedError";
-static const char __pyx_k_initial_distribution[] = "initial_distribution";
-static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
-static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
-static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
-static const char __pyx_k_TransitionModel___init[] = "TransitionModel.__init__";
-static const char __pyx_k_HiddenMarkovModel_reset[] = "HiddenMarkovModel.reset";
-static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
-static const char __pyx_k_ObservationModel___init[] = "ObservationModel.__init__";
-static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
-static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
-static const char __pyx_k_DiscreteObservationModel[] = "DiscreteObservationModel";
-static const char __pyx_k_HiddenMarkovModel___init[] = "HiddenMarkovModel.__init__";
-static const char __pyx_k_HiddenMarkovModel_forward[] = "HiddenMarkovModel.forward";
-static const char __pyx_k_HiddenMarkovModel_viterbi[] = "HiddenMarkovModel.viterbi";
-static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
-static const char __pyx_k_observation_probabilities[] = "observation_probabilities";
-static const char __pyx_k_ObservationModel_densities[] = "ObservationModel.densities";
-static const char __pyx_k_TransitionModel_from_dense[] = "TransitionModel.from_dense";
-static const char __pyx_k_TransitionModel_make_dense[] = "TransitionModel.make_dense";
-static const char __pyx_k_TransitionModel_num_states[] = "TransitionModel.num_states";
-static const char __pyx_k_TransitionModel_make_sparse[] = "TransitionModel.make_sparse";
-static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
-static const char __pyx_k_HiddenMarkovModel___getstate[] = "HiddenMarkovModel.__getstate__";
-static const char __pyx_k_HiddenMarkovModel___setstate[] = "HiddenMarkovModel.__setstate__";
-static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
-static const char __pyx_k_Not_a_probability_distribution[] = "Not a probability distribution.";
-static const char __pyx_k_ObservationModel_log_densities[] = "ObservationModel.log_densities";
-static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
-static const char __pyx_k_DiscreteObservationModel___init[] = "DiscreteObservationModel.__init__";
-static const char __pyx_k_Hidden_Markov_Model_To_search_f[] = "\n    Hidden Markov Model\n\n    To search for the best path through the state space with the Viterbi\n    algorithm, the following parameters must be defined.\n\n    Parameters\n    ----------\n    transition_model : :class:`TransitionModel` instance\n        Transition model.\n    observation_model : :class:`ObservationModel` instance\n        Observation model.\n    initial_distribution : numpy array, optional\n        Initial state distribution; if 'None' a uniform distribution is\n        assumed.\n\n    Examples\n    --------\n    Create a simple HMM with two states and three observation types. The\n    initial distribution is uniform.\n\n    >>> tm = TransitionModel.from_dense([0, 1, 0, 1], [0, 0, 1, 1],\n    ...                                 [0.7, 0.3, 0.6, 0.4])\n    >>> om = DiscreteObservationModel(np.array([[0.2, 0.3, 0.5],\n    ...                                         [0.7, 0.1, 0.2]]))\n    >>> hmm = HiddenMarkovModel(tm, om)\n\n    Now we can decode the most probable state sequence and get the\n    log-probability of the sequence\n\n    >>> seq, log_p = hmm.viterbi([0, 0, 1, 1, 0, 0, 0, 2, 2])\n    >>> log_p  #  doctest: +ELLIPSIS\n    -12.87...\n    >>> seq\n    array([1, 1, 0, 0, 1, 1, 1, 0, 0], dtype=uint32)\n\n    Compute the forward variables:\n\n    >>> hmm.forward([0, 0, 1, 1, 0, 0, 0, 2, 2])\n    array([[ 0.34667,  0.65333],\n           [ 0.33171,  0.66829],\n           [ 0.83814,  0.16186],\n           [ 0.86645,  0.13355],\n           [ 0.38502,  0.61498],\n           [ 0.33539,  0.66461],\n           [ 0.33063,  0.66937],\n           [ 0.81179,  0.18821],\n           [ 0.84231,  0.15769]])\n    ";
-static const char __pyx_k_Observation_model_class_for_a_H[] = "\n    Observation model class for a HMM.\n\n    The observation model is defined as a plain 1D numpy arrays `pointers` and\n    the methods `log_densities()` and `densities()` which return 2D numpy\n    arrays with the (log) densities of the observations.\n\n    Parameters\n    ----------\n    pointers : numpy array (num_states,)\n        Pointers from HMM states to the correct densities. The length of the\n        array must be equal to the number of states of the HMM and pointing\n        from each state to the corresponding column of the array returned\n        by one of the `log_densities()` or `densities()` methods. The\n        `pointers` type must be np.uint32.\n\n    See Also\n    --------\n    ObservationModel.log_densities\n    ObservationModel.densities\n\n    ";
-static const char __pyx_k_Simple_discrete_observation_mod[] = "\n    Simple discrete observation model that takes an observation matrix of the\n    form (num_states x num_observations) containing P(observation | state).\n\n    Parameters\n    ----------\n    observation_probabilities : numpy array\n        Observation probabilities as a 2D array of shape (num_observations,\n        num_states). Has to sum to 1 over the second axis, since it\n        represents P(observation | state).\n\n    Examples\n    --------\n    Assuming two states and three observation types, instantiate a discrete\n    observation model:\n\n    >>> om = DiscreteObservationModel(np.array([[0.1, 0.5, 0.4],\n    ...                                         [0.7, 0.2, 0.1]]))\n    >>> om  # doctest: +ELLIPSIS\n    <madmom.ml.hmm.DiscreteObservationModel object at 0x...>\n\n    If the probabilities do not sum to 1, it throws a ValueError:\n\n    >>> om = DiscreteObservationModel(np.array([[0.5, 0.5, 0.5],\n    ...                                         [0.5, 0.5, 0.5]]))\n    ... # doctest: +IGNORE_EXCEPTION_DETAIL\n    Traceback (most recent call last):\n    ...\n    ValueError: Not a probability distribution.\n\n    ";
-static const char __pyx_k_This_module_contains_Hidden_Mar[] = "\nThis module contains Hidden Markov Model (HMM) functionality.\n\nNotes\n-----\nIf you want to change this module and use it interactively, use pyximport.\n\n>>> import pyximport\n>>> pyximport.install(reload_support=True,\n...                   setup_args={'include_dirs': np.get_include()})\n... # doctest: +ELLIPSIS\n(None, <pyximport.pyximport.PyxImporter object at 0x...>)\n";
-static const char __pyx_k_TransitionModel_num_transitions[] = "TransitionModel.num_transitions";
-static const char __pyx_k_Transition_model_class_for_a_HM[] = "\n    Transition model class for a HMM.\n\n    The transition model is defined similar to a scipy compressed sparse row\n    matrix and holds all transition probabilities from one state to an other.\n    This allows an efficient Viterbi decoding of the HMM.\n\n    Parameters\n    ----------\n    states : numpy array\n        All states transitioning to state s are stored in:\n        states[pointers[s]:pointers[s+1]]\n    pointers : numpy array\n        Pointers for the `states` array for state s.\n    probabilities : numpy array\n        The corresponding transition are stored in:\n        probabilities[pointers[s]:pointers[s+1]].\n\n    Notes\n    -----\n    This class should be either used for loading saved transition models or\n    being sub-classed to define a specific transition model.\n\n    See Also\n    --------\n    scipy.sparse.csr_matrix\n\n    Examples\n    --------\n    Create a simple transition model with two states using a list of\n    transitions and their probabilities\n\n    >>> tm = TransitionModel.from_dense([0, 1, 0, 1], [0, 0, 1, 1],\n    ...                                 [0.8, 0.2, 0.3, 0.7])\n    >>> tm  # doctest: +ELLIPSIS\n    <madmom.ml.hmm.TransitionModel object at 0x...>\n\n    TransitionModel.from_dense will check if the supplied probabilities for\n    each state sum to 1 (and thus represent a correct probability distribution)\n\n    >>> tm = TransitionModel.from_dense([0, 1], [1, 0], [0.5, 1.0])\n    ... # doctest: +IGNORE_EXCEPTION_DETAIL\n    Traceback (most recent call last):\n    ...\n    ValueError: Not a probability distribution.\n\n    ";
-static const char __pyx_k_inf_log_probability_during_Vite[] = "-inf log probability during Viterbi decoding cannot find a valid path";
-static const char __pyx_k_must_be_implemented_by_subclass[] = "must be implemented by subclass";
-static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import";
-static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
-static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
-static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
-static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
-static const char __pyx_k_DiscreteObservationModel_densiti[] = "DiscreteObservationModel.densities";
-static const char __pyx_k_DiscreteObservationModel_log_den[] = "DiscreteObservationModel.log_densities";
-static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
-static const char __pyx_k_HiddenMarkovModel_forward_genera[] = "HiddenMarkovModel.forward_generator";
-static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))";
-static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
-static const char __pyx_k_Initial_distribution_is_not_a_pr[] = "Initial distribution is not a probability distribution.";
-static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
-static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
-static const char __pyx_k_TransitionModel_log_probabilitie[] = "TransitionModel.log_probabilities";
-static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
-static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
-static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
-static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import";
-static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
-static PyObject *__pyx_n_s_ASCII;
-static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
-static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
-static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
-static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
-static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
-static PyObject *__pyx_n_s_DiscreteObservationModel;
-static PyObject *__pyx_n_s_DiscreteObservationModel___init;
-static PyObject *__pyx_n_s_DiscreteObservationModel_densiti;
-static PyObject *__pyx_n_s_DiscreteObservationModel_log_den;
-static PyObject *__pyx_n_s_Ellipsis;
-static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
-static PyObject *__pyx_n_s_HMM;
-static PyObject *__pyx_n_s_HiddenMarkovModel;
-static PyObject *__pyx_n_s_HiddenMarkovModel___getstate;
-static PyObject *__pyx_n_s_HiddenMarkovModel___init;
-static PyObject *__pyx_n_s_HiddenMarkovModel___setstate;
-static PyObject *__pyx_n_s_HiddenMarkovModel_forward;
-static PyObject *__pyx_n_s_HiddenMarkovModel_forward_genera;
-static PyObject *__pyx_n_s_HiddenMarkovModel_reset;
-static PyObject *__pyx_n_s_HiddenMarkovModel_viterbi;
-static PyObject *__pyx_kp_s_Hidden_Markov_Model_To_search_f;
-static PyObject *__pyx_n_s_ImportError;
-static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0;
-static PyObject *__pyx_n_s_IndexError;
-static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
-static PyObject *__pyx_kp_s_Initial_distribution_is_not_a_pr;
-static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
-static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
-static PyObject *__pyx_n_s_MemoryError;
-static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
-static PyObject *__pyx_kp_s_MemoryView_of_r_object;
-static PyObject *__pyx_n_s_NotImplementedError;
-static PyObject *__pyx_kp_s_Not_a_probability_distribution;
-static PyObject *__pyx_n_b_O;
-static PyObject *__pyx_n_s_ObservationModel;
-static PyObject *__pyx_n_s_ObservationModel___init;
-static PyObject *__pyx_n_s_ObservationModel_densities;
-static PyObject *__pyx_n_s_ObservationModel_log_densities;
-static PyObject *__pyx_kp_s_Observation_model_class_for_a_H;
-static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
-static PyObject *__pyx_n_s_PickleError;
-static PyObject *__pyx_n_s_RuntimeWarning;
-static PyObject *__pyx_kp_s_Simple_discrete_observation_mod;
-static PyObject *__pyx_n_s_T;
-static PyObject *__pyx_n_s_TransitionModel;
-static PyObject *__pyx_n_s_TransitionModel___init;
-static PyObject *__pyx_n_s_TransitionModel_from_dense;
-static PyObject *__pyx_n_s_TransitionModel_log_probabilitie;
-static PyObject *__pyx_n_s_TransitionModel_make_dense;
-static PyObject *__pyx_n_s_TransitionModel_make_sparse;
-static PyObject *__pyx_n_s_TransitionModel_num_states;
-static PyObject *__pyx_n_s_TransitionModel_num_transitions;
-static PyObject *__pyx_kp_s_Transition_model_class_for_a_HM;
-static PyObject *__pyx_n_s_TypeError;
-static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
-static PyObject *__pyx_n_s_ValueError;
-static PyObject *__pyx_n_s_View_MemoryView;
-static PyObject *__pyx_n_s_allclose;
-static PyObject *__pyx_n_s_allocate_buffer;
-static PyObject *__pyx_n_s_arange;
-static PyObject *__pyx_n_s_argmax;
-static PyObject *__pyx_n_s_args;
-static PyObject *__pyx_n_s_array;
-static PyObject *__pyx_n_s_asarray;
-static PyObject *__pyx_n_s_astype;
-static PyObject *__pyx_n_s_axis;
-static PyObject *__pyx_n_s_base;
-static PyObject *__pyx_n_s_bincount;
-static PyObject *__pyx_n_s_block_size;
-static PyObject *__pyx_n_s_block_sz;
-static PyObject *__pyx_n_s_bt_pointers;
-static PyObject *__pyx_n_s_c;
-static PyObject *__pyx_n_u_c;
-static PyObject *__pyx_n_s_class;
-static PyObject *__pyx_n_s_cline_in_traceback;
-static PyObject *__pyx_n_s_close;
-static PyObject *__pyx_n_s_cls;
-static PyObject *__pyx_kp_s_contiguous_and_direct;
-static PyObject *__pyx_kp_s_contiguous_and_indirect;
-static PyObject *__pyx_n_s_copy;
-static PyObject *__pyx_n_s_csr_matrix;
-static PyObject *__pyx_n_s_current_viterbi;
-static PyObject *__pyx_n_s_data;
-static PyObject *__pyx_n_s_densities;
-static PyObject *__pyx_n_s_density;
-static PyObject *__pyx_n_s_dict;
-static PyObject *__pyx_n_s_doc;
-static PyObject *__pyx_n_s_dtype;
-static PyObject *__pyx_n_s_dtype_is_object;
-static PyObject *__pyx_n_s_empty;
-static PyObject *__pyx_n_s_encode;
-static PyObject *__pyx_n_s_enumerate;
-static PyObject *__pyx_n_s_error;
-static PyObject *__pyx_n_s_exp;
-static PyObject *__pyx_n_s_flags;
-static PyObject *__pyx_n_s_float;
-static PyObject *__pyx_n_s_format;
-static PyObject *__pyx_n_s_fortran;
-static PyObject *__pyx_n_u_fortran;
-static PyObject *__pyx_n_s_forward;
-static PyObject *__pyx_n_s_forward_generator;
-static PyObject *__pyx_n_s_frame;
-static PyObject *__pyx_n_s_from_dense;
-static PyObject *__pyx_n_s_fwd;
-static PyObject *__pyx_n_s_fwd_cur;
-static PyObject *__pyx_n_s_fwd_prev;
-static PyObject *__pyx_n_s_getstate;
-static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
-static PyObject *__pyx_n_s_id;
-static PyObject *__pyx_n_s_import;
-static PyObject *__pyx_n_s_indices;
-static PyObject *__pyx_n_s_indptr;
-static PyObject *__pyx_kp_s_inf_log_probability_during_Vite;
-static PyObject *__pyx_n_s_init;
-static PyObject *__pyx_n_s_initial_distribution;
-static PyObject *__pyx_n_s_int;
-static PyObject *__pyx_n_s_isinf;
-static PyObject *__pyx_n_s_itemsize;
-static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
-static PyObject *__pyx_n_s_log;
-static PyObject *__pyx_n_s_log_densities;
-static PyObject *__pyx_n_s_log_probabilities;
-static PyObject *__pyx_n_s_log_probability;
-static PyObject *__pyx_n_s_madmom_ml_hmm;
-static PyObject *__pyx_kp_s_madmom_ml_hmm_pyx;
-static PyObject *__pyx_n_s_main;
-static PyObject *__pyx_n_s_make_dense;
-static PyObject *__pyx_n_s_make_sparse;
-static PyObject *__pyx_n_s_max;
-static PyObject *__pyx_n_s_memview;
-static PyObject *__pyx_n_s_metaclass;
-static PyObject *__pyx_n_s_mode;
-static PyObject *__pyx_n_s_module;
-static PyObject *__pyx_kp_s_must_be_implemented_by_subclass;
-static PyObject *__pyx_n_s_name;
-static PyObject *__pyx_n_s_name_2;
-static PyObject *__pyx_n_s_ndim;
-static PyObject *__pyx_n_s_new;
-static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
-static PyObject *__pyx_n_s_nonzero;
-static PyObject *__pyx_n_s_norm_factor;
-static PyObject *__pyx_n_s_np;
-static PyObject *__pyx_n_s_num_observations;
-static PyObject *__pyx_n_s_num_states;
-static PyObject *__pyx_n_s_num_transitions;
-static PyObject *__pyx_n_s_numpy;
-static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to;
-static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor;
-static PyObject *__pyx_n_s_obj;
-static PyObject *__pyx_n_s_object;
-static PyObject *__pyx_n_s_obs_end;
-static PyObject *__pyx_n_s_obs_start;
-static PyObject *__pyx_n_s_observation_model;
-static PyObject *__pyx_n_s_observation_probabilities;
-static PyObject *__pyx_n_s_observations;
-static PyObject *__pyx_n_s_om;
-static PyObject *__pyx_n_s_om_densities;
-static PyObject *__pyx_n_s_om_pointers;
-static PyObject *__pyx_n_s_ones;
-static PyObject *__pyx_n_s_pack;
-static PyObject *__pyx_n_s_path;
-static PyObject *__pyx_n_s_pickle;
-static PyObject *__pyx_n_s_pointer;
-static PyObject *__pyx_n_s_pointers;
-static PyObject *__pyx_n_s_pop;
-static PyObject *__pyx_n_s_prepare;
-static PyObject *__pyx_n_s_prev;
-static PyObject *__pyx_n_s_prev_pointer;
-static PyObject *__pyx_n_s_prev_state;
-static PyObject *__pyx_n_s_prev_states;
-static PyObject *__pyx_n_s_previous_viterbi;
-static PyObject *__pyx_n_s_prob_sum;
-static PyObject *__pyx_n_s_probabilities;
-static PyObject *__pyx_n_s_property;
-static PyObject *__pyx_n_s_pyx_PickleError;
-static PyObject *__pyx_n_s_pyx_checksum;
-static PyObject *__pyx_n_s_pyx_getbuffer;
-static PyObject *__pyx_n_s_pyx_result;
-static PyObject *__pyx_n_s_pyx_state;
-static PyObject *__pyx_n_s_pyx_type;
-static PyObject *__pyx_n_s_pyx_unpickle_Enum;
-static PyObject *__pyx_n_s_pyx_vtable;
-static PyObject *__pyx_n_s_qualname;
-static PyObject *__pyx_n_s_range;
-static PyObject *__pyx_n_s_reduce;
-static PyObject *__pyx_n_s_reduce_cython;
-static PyObject *__pyx_n_s_reduce_ex;
-static PyObject *__pyx_n_s_reset;
-static PyObject *__pyx_n_s_scipy_sparse;
-static PyObject *__pyx_n_s_self;
-static PyObject *__pyx_n_s_send;
-static PyObject *__pyx_n_s_setstate;
-static PyObject *__pyx_n_s_setstate_cython;
-static PyObject *__pyx_n_s_shape;
-static PyObject *__pyx_n_s_size;
-static PyObject *__pyx_n_s_start;
-static PyObject *__pyx_n_s_state;
-static PyObject *__pyx_n_s_states;
-static PyObject *__pyx_n_s_staticmethod;
-static PyObject *__pyx_n_s_step;
-static PyObject *__pyx_n_s_stop;
-static PyObject *__pyx_kp_s_strided_and_direct;
-static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
-static PyObject *__pyx_kp_s_strided_and_indirect;
-static PyObject *__pyx_kp_s_stringsource;
-static PyObject *__pyx_n_s_struct;
-static PyObject *__pyx_n_s_sum;
-static PyObject *__pyx_n_s_super;
-static PyObject *__pyx_n_s_test;
-static PyObject *__pyx_n_s_throw;
-static PyObject *__pyx_n_s_tm;
-static PyObject *__pyx_n_s_tm_pointers;
-static PyObject *__pyx_n_s_tm_probabilities;
-static PyObject *__pyx_n_s_tm_ptrs;
-static PyObject *__pyx_n_s_tm_states;
-static PyObject *__pyx_n_s_transition_model;
-static PyObject *__pyx_n_s_transition_prob;
-static PyObject *__pyx_n_s_transitions;
-static PyObject *__pyx_n_s_uint32;
-static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
-static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
-static PyObject *__pyx_n_s_unpack;
-static PyObject *__pyx_n_s_update;
-static PyObject *__pyx_n_s_viterbi;
-static PyObject *__pyx_n_s_warn;
-static PyObject *__pyx_n_s_warnings;
-static PyObject *__pyx_n_s_weights;
-static PyObject *__pyx_n_s_zeros;
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_states, PyObject *__pyx_v_pointers, PyObject *__pyx_v_probabilities); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel_2num_states(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel_4num_transitions(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel_6log_probabilities(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel_8make_dense(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_states, PyObject *__pyx_v_pointers, PyObject *__pyx_v_probabilities); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel_10make_sparse(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_states, PyObject *__pyx_v_prev_states, PyObject *__pyx_v_probabilities); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel_12from_dense(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_cls, PyObject *__pyx_v_states, PyObject *__pyx_v_prev_states, PyObject *__pyx_v_probabilities); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_16ObservationModel___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_pointers); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_16ObservationModel_2log_densities(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v_observations); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_16ObservationModel_4densities(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observations); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_24DiscreteObservationModel___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observation_probabilities); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_24DiscreteObservationModel_2densities(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observations); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_24DiscreteObservationModel_4log_densities(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observations); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_transition_model, PyObject *__pyx_v_observation_model, PyObject *__pyx_v_initial_distribution); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_2__getstate__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_4__setstate__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_state); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_6reset(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_initial_distribution); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_8viterbi(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observations); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_10forward(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observations, PyObject *__pyx_v_reset); /* proto */
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_12forward_generator(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observations, PyObject *__pyx_v_block_size); /* proto */
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
-static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
-static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
-static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
-static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
-static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
-static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
-static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
-static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
-static PyObject *__pyx_tp_new_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-static PyObject *__pyx_int_0;
-static PyObject *__pyx_int_1;
-static PyObject *__pyx_int_184977713;
-static PyObject *__pyx_int_neg_1;
-static PyObject *__pyx_tuple_;
-static PyObject *__pyx_slice__3;
-static PyObject *__pyx_tuple__2;
-static PyObject *__pyx_tuple__4;
-static PyObject *__pyx_tuple__5;
-static PyObject *__pyx_tuple__6;
-static PyObject *__pyx_tuple__7;
-static PyObject *__pyx_tuple__9;
-static PyObject *__pyx_tuple__10;
-static PyObject *__pyx_tuple__11;
-static PyObject *__pyx_tuple__12;
-static PyObject *__pyx_tuple__13;
-static PyObject *__pyx_tuple__14;
-static PyObject *__pyx_tuple__15;
-static PyObject *__pyx_tuple__16;
-static PyObject *__pyx_tuple__17;
-static PyObject *__pyx_tuple__18;
-static PyObject *__pyx_tuple__19;
-static PyObject *__pyx_tuple__20;
-static PyObject *__pyx_tuple__21;
-static PyObject *__pyx_tuple__22;
-static PyObject *__pyx_tuple__23;
-static PyObject *__pyx_tuple__24;
-static PyObject *__pyx_tuple__25;
-static PyObject *__pyx_tuple__26;
-static PyObject *__pyx_tuple__27;
-static PyObject *__pyx_tuple__28;
-static PyObject *__pyx_tuple__29;
-static PyObject *__pyx_tuple__31;
-static PyObject *__pyx_tuple__33;
-static PyObject *__pyx_tuple__35;
-static PyObject *__pyx_tuple__37;
-static PyObject *__pyx_tuple__39;
-static PyObject *__pyx_tuple__41;
-static PyObject *__pyx_tuple__43;
-static PyObject *__pyx_tuple__44;
-static PyObject *__pyx_tuple__46;
-static PyObject *__pyx_tuple__48;
-static PyObject *__pyx_tuple__50;
-static PyObject *__pyx_tuple__52;
-static PyObject *__pyx_tuple__54;
-static PyObject *__pyx_tuple__56;
-static PyObject *__pyx_tuple__57;
-static PyObject *__pyx_tuple__59;
-static PyObject *__pyx_tuple__60;
-static PyObject *__pyx_tuple__62;
-static PyObject *__pyx_tuple__64;
-static PyObject *__pyx_tuple__66;
-static PyObject *__pyx_tuple__67;
-static PyObject *__pyx_tuple__69;
-static PyObject *__pyx_tuple__71;
-static PyObject *__pyx_tuple__72;
-static PyObject *__pyx_tuple__73;
-static PyObject *__pyx_tuple__74;
-static PyObject *__pyx_tuple__75;
-static PyObject *__pyx_tuple__76;
-static PyObject *__pyx_tuple__77;
-static PyObject *__pyx_tuple__78;
-static PyObject *__pyx_tuple__79;
-static PyObject *__pyx_codeobj__8;
-static PyObject *__pyx_codeobj__30;
-static PyObject *__pyx_codeobj__32;
-static PyObject *__pyx_codeobj__34;
-static PyObject *__pyx_codeobj__36;
-static PyObject *__pyx_codeobj__38;
-static PyObject *__pyx_codeobj__40;
-static PyObject *__pyx_codeobj__42;
-static PyObject *__pyx_codeobj__45;
-static PyObject *__pyx_codeobj__47;
-static PyObject *__pyx_codeobj__49;
-static PyObject *__pyx_codeobj__51;
-static PyObject *__pyx_codeobj__53;
-static PyObject *__pyx_codeobj__55;
-static PyObject *__pyx_codeobj__58;
-static PyObject *__pyx_codeobj__61;
-static PyObject *__pyx_codeobj__63;
-static PyObject *__pyx_codeobj__65;
-static PyObject *__pyx_codeobj__68;
-static PyObject *__pyx_codeobj__70;
-static PyObject *__pyx_codeobj__80;
-/* Late includes */
-
-/* "madmom/ml/hmm.pyx":80
- *     """
- * 
- *     def __init__(self, states, pointers, probabilities):             # <<<<<<<<<<<<<<
- *         # save the parameters
- *         self.states = states
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_15TransitionModel___init__[] = "TransitionModel.__init__(self, states, pointers, probabilities)";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_1__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_1__init__, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_15TransitionModel___init__};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_self = 0;
-  PyObject *__pyx_v_states = 0;
-  PyObject *__pyx_v_pointers = 0;
-  PyObject *__pyx_v_probabilities = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_states,&__pyx_n_s_pointers,&__pyx_n_s_probabilities,0};
-    PyObject* values[4] = {0,0,0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
-        CYTHON_FALLTHROUGH;
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        CYTHON_FALLTHROUGH;
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_states)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("__init__", 1, 4, 4, 1); __PYX_ERR(0, 80, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  2:
-        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pointers)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("__init__", 1, 4, 4, 2); __PYX_ERR(0, 80, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  3:
-        if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_probabilities)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("__init__", 1, 4, 4, 3); __PYX_ERR(0, 80, __pyx_L3_error)
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 80, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 4) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-      values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
-    }
-    __pyx_v_self = values[0];
-    __pyx_v_states = values[1];
-    __pyx_v_pointers = values[2];
-    __pyx_v_probabilities = values[3];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__init__", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 80, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.TransitionModel.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_15TransitionModel___init__(__pyx_self, __pyx_v_self, __pyx_v_states, __pyx_v_pointers, __pyx_v_probabilities);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_states, PyObject *__pyx_v_pointers, PyObject *__pyx_v_probabilities) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__init__", 0);
-
-  /* "madmom/ml/hmm.pyx":82
- *     def __init__(self, states, pointers, probabilities):
- *         # save the parameters
- *         self.states = states             # <<<<<<<<<<<<<<
- *         self.pointers = pointers
- *         self.probabilities = probabilities
- */
-  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_states, __pyx_v_states) < 0) __PYX_ERR(0, 82, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":83
- *         # save the parameters
- *         self.states = states
- *         self.pointers = pointers             # <<<<<<<<<<<<<<
- *         self.probabilities = probabilities
- * 
- */
-  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pointers, __pyx_v_pointers) < 0) __PYX_ERR(0, 83, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":84
- *         self.states = states
- *         self.pointers = pointers
- *         self.probabilities = probabilities             # <<<<<<<<<<<<<<
- * 
- *     @property
- */
-  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_probabilities, __pyx_v_probabilities) < 0) __PYX_ERR(0, 84, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":80
- *     """
- * 
- *     def __init__(self, states, pointers, probabilities):             # <<<<<<<<<<<<<<
- *         # save the parameters
- *         self.states = states
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.TransitionModel.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":87
- * 
- *     @property
- *     def num_states(self):             # <<<<<<<<<<<<<<
- *         """Number of states."""
- *         return len(self.pointers) - 1
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_3num_states(PyObject *__pyx_self, PyObject *__pyx_v_self); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_15TransitionModel_2num_states[] = "TransitionModel.num_states(self)\nNumber of states.";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_3num_states = {"num_states", (PyCFunction)__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_3num_states, METH_O, __pyx_doc_6madmom_2ml_3hmm_15TransitionModel_2num_states};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_3num_states(PyObject *__pyx_self, PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("num_states (wrapper)", 0);
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_15TransitionModel_2num_states(__pyx_self, ((PyObject *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel_2num_states(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  Py_ssize_t __pyx_t_2;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("num_states", 0);
-
-  /* "madmom/ml/hmm.pyx":89
- *     def num_states(self):
- *         """Number of states."""
- *         return len(self.pointers) - 1             # <<<<<<<<<<<<<<
- * 
- *     @property
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_pointers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 89, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = PyObject_Length(__pyx_t_1); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 89, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = PyInt_FromSsize_t((__pyx_t_2 - 1)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 89, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "madmom/ml/hmm.pyx":87
- * 
- *     @property
- *     def num_states(self):             # <<<<<<<<<<<<<<
- *         """Number of states."""
- *         return len(self.pointers) - 1
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("madmom.ml.hmm.TransitionModel.num_states", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":92
- * 
- *     @property
- *     def num_transitions(self):             # <<<<<<<<<<<<<<
- *         """Number of transitions."""
- *         return len(self.probabilities)
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_5num_transitions(PyObject *__pyx_self, PyObject *__pyx_v_self); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_15TransitionModel_4num_transitions[] = "TransitionModel.num_transitions(self)\nNumber of transitions.";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_5num_transitions = {"num_transitions", (PyCFunction)__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_5num_transitions, METH_O, __pyx_doc_6madmom_2ml_3hmm_15TransitionModel_4num_transitions};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_5num_transitions(PyObject *__pyx_self, PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("num_transitions (wrapper)", 0);
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_15TransitionModel_4num_transitions(__pyx_self, ((PyObject *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel_4num_transitions(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  Py_ssize_t __pyx_t_2;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("num_transitions", 0);
-
-  /* "madmom/ml/hmm.pyx":94
- *     def num_transitions(self):
- *         """Number of transitions."""
- *         return len(self.probabilities)             # <<<<<<<<<<<<<<
- * 
- *     @property
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_probabilities); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 94, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = PyObject_Length(__pyx_t_1); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 94, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = PyInt_FromSsize_t(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 94, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "madmom/ml/hmm.pyx":92
- * 
- *     @property
- *     def num_transitions(self):             # <<<<<<<<<<<<<<
- *         """Number of transitions."""
- *         return len(self.probabilities)
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("madmom.ml.hmm.TransitionModel.num_transitions", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":97
- * 
- *     @property
- *     def log_probabilities(self):             # <<<<<<<<<<<<<<
- *         """Transition log probabilities."""
- *         return np.log(self.probabilities)
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_7log_probabilities(PyObject *__pyx_self, PyObject *__pyx_v_self); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_15TransitionModel_6log_probabilities[] = "TransitionModel.log_probabilities(self)\nTransition log probabilities.";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_7log_probabilities = {"log_probabilities", (PyCFunction)__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_7log_probabilities, METH_O, __pyx_doc_6madmom_2ml_3hmm_15TransitionModel_6log_probabilities};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_7log_probabilities(PyObject *__pyx_self, PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("log_probabilities (wrapper)", 0);
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_15TransitionModel_6log_probabilities(__pyx_self, ((PyObject *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel_6log_probabilities(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("log_probabilities", 0);
-
-  /* "madmom/ml/hmm.pyx":99
- *     def log_probabilities(self):
- *         """Transition log probabilities."""
- *         return np.log(self.probabilities)             # <<<<<<<<<<<<<<
- * 
- *     @staticmethod
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 99, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_log); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 99, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_probabilities); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 99, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_4 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
-    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
-    if (likely(__pyx_t_4)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
-      __Pyx_INCREF(__pyx_t_4);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_3, function);
-    }
-  }
-  __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 99, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "madmom/ml/hmm.pyx":97
- * 
- *     @property
- *     def log_probabilities(self):             # <<<<<<<<<<<<<<
- *         """Transition log probabilities."""
- *         return np.log(self.probabilities)
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_AddTraceback("madmom.ml.hmm.TransitionModel.log_probabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":102
- * 
- *     @staticmethod
- *     def make_dense(states, pointers, probabilities):             # <<<<<<<<<<<<<<
- *         """
- *         Return a dense representation of sparse transitions.
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_9make_dense(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_15TransitionModel_8make_dense[] = "TransitionModel.make_dense(states, pointers, probabilities)\n\n        Return a dense representation of sparse transitions.\n\n        Parameters\n        ----------\n        states : numpy array\n            All states transitioning to state s are returned in:\n            states[pointers[s]:pointers[s+1]]\n        pointers : numpy array\n            Pointers for the `states` array for state s.\n        probabilities : numpy array\n            The corresponding transition are returned in:\n            probabilities[pointers[s]:pointers[s+1]].\n\n        Returns\n        -------\n        states : numpy array, shape (num_transitions,)\n            Array with states (i.e. destination states).\n        prev_states : numpy array, shape (num_transitions,)\n            Array with previous states (i.e. origination states).\n        probabilities : numpy array, shape (num_transitions,)\n            Transition probabilities.\n\n        See Also\n        --------\n        :class:`TransitionModel`\n\n        Notes\n        -----\n        Three 1D numpy arrays of same length must be given. The indices\n        correspond to each other, i.e. the first entry of all three arrays\n        define the transition from the state defined prev_states[0] to that\n        defined in states[0] with the probability defined in probabilities[0].\n\n        ";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_9make_dense = {"make_dense", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_9make_dense, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_15TransitionModel_8make_dense};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_9make_dense(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_states = 0;
-  PyObject *__pyx_v_pointers = 0;
-  PyObject *__pyx_v_probabilities = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("make_dense (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_states,&__pyx_n_s_pointers,&__pyx_n_s_probabilities,0};
-    PyObject* values[3] = {0,0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        CYTHON_FALLTHROUGH;
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_states)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pointers)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("make_dense", 1, 3, 3, 1); __PYX_ERR(0, 102, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  2:
-        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_probabilities)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("make_dense", 1, 3, 3, 2); __PYX_ERR(0, 102, __pyx_L3_error)
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "make_dense") < 0)) __PYX_ERR(0, 102, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-    }
-    __pyx_v_states = values[0];
-    __pyx_v_pointers = values[1];
-    __pyx_v_probabilities = values[2];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("make_dense", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 102, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.TransitionModel.make_dense", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_15TransitionModel_8make_dense(__pyx_self, __pyx_v_states, __pyx_v_pointers, __pyx_v_probabilities);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel_8make_dense(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_states, PyObject *__pyx_v_pointers, PyObject *__pyx_v_probabilities) {
-  PyObject *__pyx_v_csr_matrix = NULL;
-  PyObject *__pyx_v_transitions = NULL;
-  PyObject *__pyx_v_prev_states = NULL;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  PyObject *__pyx_t_6 = NULL;
-  PyObject *(*__pyx_t_7)(PyObject *);
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("make_dense", 0);
-  __Pyx_INCREF(__pyx_v_states);
-
-  /* "madmom/ml/hmm.pyx":138
- * 
- *         """
- *         from scipy.sparse import csr_matrix             # <<<<<<<<<<<<<<
- *         # convert everything into a sparse CSR matrix
- *         transitions = csr_matrix((np.array(probabilities),
- */
-  __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_INCREF(__pyx_n_s_csr_matrix);
-  __Pyx_GIVEREF(__pyx_n_s_csr_matrix);
-  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_csr_matrix);
-  __pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy_sparse, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 138, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_csr_matrix); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_INCREF(__pyx_t_1);
-  __pyx_v_csr_matrix = __pyx_t_1;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
-  /* "madmom/ml/hmm.pyx":140
- *         from scipy.sparse import csr_matrix
- *         # convert everything into a sparse CSR matrix
- *         transitions = csr_matrix((np.array(probabilities),             # <<<<<<<<<<<<<<
- *                                   np.array(states), np.array(pointers)))
- *         # convert to correct types
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 140, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_array); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 140, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
-    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
-    if (likely(__pyx_t_3)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
-      __Pyx_INCREF(__pyx_t_3);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_4, function);
-    }
-  }
-  __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_3, __pyx_v_probabilities) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_probabilities);
-  __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 140, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
-  /* "madmom/ml/hmm.pyx":141
- *         # convert everything into a sparse CSR matrix
- *         transitions = csr_matrix((np.array(probabilities),
- *                                   np.array(states), np.array(pointers)))             # <<<<<<<<<<<<<<
- *         # convert to correct types
- *         states, prev_states = transitions.nonzero()
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 141, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) {
-    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5);
-    if (likely(__pyx_t_3)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
-      __Pyx_INCREF(__pyx_t_3);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_5, function);
-    }
-  }
-  __pyx_t_4 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_3, __pyx_v_states) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_states);
-  __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 141, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_array); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 141, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
-    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_6);
-    if (likely(__pyx_t_3)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
-      __Pyx_INCREF(__pyx_t_3);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_6, function);
-    }
-  }
-  __pyx_t_5 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_3, __pyx_v_pointers) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_pointers);
-  __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 141, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-
-  /* "madmom/ml/hmm.pyx":140
- *         from scipy.sparse import csr_matrix
- *         # convert everything into a sparse CSR matrix
- *         transitions = csr_matrix((np.array(probabilities),             # <<<<<<<<<<<<<<
- *                                   np.array(states), np.array(pointers)))
- *         # convert to correct types
- */
-  __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 140, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_4);
-  PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_5);
-  PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_5);
-  __pyx_t_1 = 0;
-  __pyx_t_4 = 0;
-  __pyx_t_5 = 0;
-  __Pyx_INCREF(__pyx_v_csr_matrix);
-  __pyx_t_5 = __pyx_v_csr_matrix; __pyx_t_4 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) {
-    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5);
-    if (likely(__pyx_t_4)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
-      __Pyx_INCREF(__pyx_t_4);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_5, function);
-    }
-  }
-  __pyx_t_2 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_4, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 140, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __pyx_v_transitions = __pyx_t_2;
-  __pyx_t_2 = 0;
-
-  /* "madmom/ml/hmm.pyx":143
- *                                   np.array(states), np.array(pointers)))
- *         # convert to correct types
- *         states, prev_states = transitions.nonzero()             # <<<<<<<<<<<<<<
- *         # return them
- *         return states, prev_states, probabilities
- */
-  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_transitions, __pyx_n_s_nonzero); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 143, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __pyx_t_6 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
-    __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
-    if (likely(__pyx_t_6)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
-      __Pyx_INCREF(__pyx_t_6);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_5, function);
-    }
-  }
-  __pyx_t_2 = (__pyx_t_6) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6) : __Pyx_PyObject_CallNoArg(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
-  if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 143, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) {
-    PyObject* sequence = __pyx_t_2;
-    Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
-    if (unlikely(size != 2)) {
-      if (size > 2) __Pyx_RaiseTooManyValuesError(2);
-      else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
-      __PYX_ERR(0, 143, __pyx_L1_error)
-    }
-    #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-    if (likely(PyTuple_CheckExact(sequence))) {
-      __pyx_t_5 = PyTuple_GET_ITEM(sequence, 0); 
-      __pyx_t_6 = PyTuple_GET_ITEM(sequence, 1); 
-    } else {
-      __pyx_t_5 = PyList_GET_ITEM(sequence, 0); 
-      __pyx_t_6 = PyList_GET_ITEM(sequence, 1); 
-    }
-    __Pyx_INCREF(__pyx_t_5);
-    __Pyx_INCREF(__pyx_t_6);
-    #else
-    __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 143, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 143, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_6);
-    #endif
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  } else {
-    Py_ssize_t index = -1;
-    __pyx_t_4 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __pyx_t_7 = Py_TYPE(__pyx_t_4)->tp_iternext;
-    index = 0; __pyx_t_5 = __pyx_t_7(__pyx_t_4); if (unlikely(!__pyx_t_5)) goto __pyx_L3_unpacking_failed;
-    __Pyx_GOTREF(__pyx_t_5);
-    index = 1; __pyx_t_6 = __pyx_t_7(__pyx_t_4); if (unlikely(!__pyx_t_6)) goto __pyx_L3_unpacking_failed;
-    __Pyx_GOTREF(__pyx_t_6);
-    if (__Pyx_IternextUnpackEndCheck(__pyx_t_7(__pyx_t_4), 2) < 0) __PYX_ERR(0, 143, __pyx_L1_error)
-    __pyx_t_7 = NULL;
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    goto __pyx_L4_unpacking_done;
-    __pyx_L3_unpacking_failed:;
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __pyx_t_7 = NULL;
-    if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
-    __PYX_ERR(0, 143, __pyx_L1_error)
-    __pyx_L4_unpacking_done:;
-  }
-  __Pyx_DECREF_SET(__pyx_v_states, __pyx_t_5);
-  __pyx_t_5 = 0;
-  __pyx_v_prev_states = __pyx_t_6;
-  __pyx_t_6 = 0;
-
-  /* "madmom/ml/hmm.pyx":145
- *         states, prev_states = transitions.nonzero()
- *         # return them
- *         return states, prev_states, probabilities             # <<<<<<<<<<<<<<
- * 
- *     @staticmethod
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 145, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_INCREF(__pyx_v_states);
-  __Pyx_GIVEREF(__pyx_v_states);
-  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_states);
-  __Pyx_INCREF(__pyx_v_prev_states);
-  __Pyx_GIVEREF(__pyx_v_prev_states);
-  PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_prev_states);
-  __Pyx_INCREF(__pyx_v_probabilities);
-  __Pyx_GIVEREF(__pyx_v_probabilities);
-  PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_probabilities);
-  __pyx_r = __pyx_t_2;
-  __pyx_t_2 = 0;
-  goto __pyx_L0;
-
-  /* "madmom/ml/hmm.pyx":102
- * 
- *     @staticmethod
- *     def make_dense(states, pointers, probabilities):             # <<<<<<<<<<<<<<
- *         """
- *         Return a dense representation of sparse transitions.
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_AddTraceback("madmom.ml.hmm.TransitionModel.make_dense", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_csr_matrix);
-  __Pyx_XDECREF(__pyx_v_transitions);
-  __Pyx_XDECREF(__pyx_v_prev_states);
-  __Pyx_XDECREF(__pyx_v_states);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":148
- * 
- *     @staticmethod
- *     def make_sparse(states, prev_states, probabilities):             # <<<<<<<<<<<<<<
- *         """
- *         Return a sparse representation of dense transitions.
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_11make_sparse(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_15TransitionModel_10make_sparse[] = "TransitionModel.make_sparse(states, prev_states, probabilities)\n\n        Return a sparse representation of dense transitions.\n\n        This method removes all duplicate states and thus allows an efficient\n        Viterbi decoding of the HMM.\n\n        Parameters\n        ----------\n        states : numpy array, shape (num_transitions,)\n            Array with states (i.e. destination states).\n        prev_states : numpy array, shape (num_transitions,)\n            Array with previous states (i.e. origination states).\n        probabilities : numpy array, shape (num_transitions,)\n            Transition probabilities.\n\n        Returns\n        -------\n        states : numpy array\n            All states transitioning to state s are returned in:\n            states[pointers[s]:pointers[s+1]]\n        pointers : numpy array\n            Pointers for the `states` array for state s.\n        probabilities : numpy array\n            The corresponding transition are returned in:\n            probabilities[pointers[s]:pointers[s+1]].\n\n        See Also\n        --------\n        :class:`TransitionModel`\n\n        Notes\n        -----\n        Three 1D numpy arrays of same length must be given. The indices\n        correspond to each other, i.e. the first entry of all three arrays\n        define the transition from the state defined prev_states[0] to that\n        defined in states[0] with the probability defined in probabilities[0].\n\n        ";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_11make_sparse = {"make_sparse", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_11make_sparse, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_15TransitionModel_10make_sparse};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_11make_sparse(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_states = 0;
-  PyObject *__pyx_v_prev_states = 0;
-  PyObject *__pyx_v_probabilities = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("make_sparse (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_states,&__pyx_n_s_prev_states,&__pyx_n_s_probabilities,0};
-    PyObject* values[3] = {0,0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        CYTHON_FALLTHROUGH;
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_states)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_prev_states)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("make_sparse", 1, 3, 3, 1); __PYX_ERR(0, 148, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  2:
-        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_probabilities)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("make_sparse", 1, 3, 3, 2); __PYX_ERR(0, 148, __pyx_L3_error)
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "make_sparse") < 0)) __PYX_ERR(0, 148, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-    }
-    __pyx_v_states = values[0];
-    __pyx_v_prev_states = values[1];
-    __pyx_v_probabilities = values[2];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("make_sparse", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 148, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.TransitionModel.make_sparse", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_15TransitionModel_10make_sparse(__pyx_self, __pyx_v_states, __pyx_v_prev_states, __pyx_v_probabilities);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel_10make_sparse(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_states, PyObject *__pyx_v_prev_states, PyObject *__pyx_v_probabilities) {
-  PyObject *__pyx_v_csr_matrix = NULL;
-  PyObject *__pyx_v_num_states = NULL;
-  PyObject *__pyx_v_transitions = NULL;
-  PyObject *__pyx_v_pointers = NULL;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  PyObject *__pyx_t_6 = NULL;
-  int __pyx_t_7;
-  int __pyx_t_8;
-  int __pyx_t_9;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("make_sparse", 0);
-  __Pyx_INCREF(__pyx_v_states);
-  __Pyx_INCREF(__pyx_v_prev_states);
-  __Pyx_INCREF(__pyx_v_probabilities);
-
-  /* "madmom/ml/hmm.pyx":187
- * 
- *         """
- *         from scipy.sparse import csr_matrix             # <<<<<<<<<<<<<<
- *         # check for a proper probability distribution, i.e. the emission
- *         # probabilities of each prev_state must sum to 1
- */
-  __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 187, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_INCREF(__pyx_n_s_csr_matrix);
-  __Pyx_GIVEREF(__pyx_n_s_csr_matrix);
-  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_csr_matrix);
-  __pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy_sparse, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 187, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_csr_matrix); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 187, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_INCREF(__pyx_t_1);
-  __pyx_v_csr_matrix = __pyx_t_1;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
-  /* "madmom/ml/hmm.pyx":190
- *         # check for a proper probability distribution, i.e. the emission
- *         # probabilities of each prev_state must sum to 1
- *         states = np.asarray(states)             # <<<<<<<<<<<<<<
- *         prev_states = np.asarray(prev_states, dtype=np.int)
- *         probabilities = np.asarray(probabilities)
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 190, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_asarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 190, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
-    __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_3);
-    if (likely(__pyx_t_1)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
-      __Pyx_INCREF(__pyx_t_1);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_3, function);
-    }
-  }
-  __pyx_t_2 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_1, __pyx_v_states) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_states);
-  __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
-  if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 190, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __Pyx_DECREF_SET(__pyx_v_states, __pyx_t_2);
-  __pyx_t_2 = 0;
-
-  /* "madmom/ml/hmm.pyx":191
- *         # probabilities of each prev_state must sum to 1
- *         states = np.asarray(states)
- *         prev_states = np.asarray(prev_states, dtype=np.int)             # <<<<<<<<<<<<<<
- *         probabilities = np.asarray(probabilities)
- *         if not np.allclose(np.bincount(prev_states, weights=probabilities), 1):
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 191, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_asarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 191, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 191, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_INCREF(__pyx_v_prev_states);
-  __Pyx_GIVEREF(__pyx_v_prev_states);
-  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_prev_states);
-  __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 191, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 191, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_int); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 191, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 191, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 191, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF_SET(__pyx_v_prev_states, __pyx_t_5);
-  __pyx_t_5 = 0;
-
-  /* "madmom/ml/hmm.pyx":192
- *         states = np.asarray(states)
- *         prev_states = np.asarray(prev_states, dtype=np.int)
- *         probabilities = np.asarray(probabilities)             # <<<<<<<<<<<<<<
- *         if not np.allclose(np.bincount(prev_states, weights=probabilities), 1):
- *             raise ValueError('Not a probability distribution.')
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 192, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_asarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 192, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
-    __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2);
-    if (likely(__pyx_t_1)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
-      __Pyx_INCREF(__pyx_t_1);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_2, function);
-    }
-  }
-  __pyx_t_5 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_1, __pyx_v_probabilities) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_probabilities);
-  __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
-  if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 192, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF_SET(__pyx_v_probabilities, __pyx_t_5);
-  __pyx_t_5 = 0;
-
-  /* "madmom/ml/hmm.pyx":193
- *         prev_states = np.asarray(prev_states, dtype=np.int)
- *         probabilities = np.asarray(probabilities)
- *         if not np.allclose(np.bincount(prev_states, weights=probabilities), 1):             # <<<<<<<<<<<<<<
- *             raise ValueError('Not a probability distribution.')
- *         # convert everything into a sparse CSR matrix, make sure it is square.
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 193, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_allclose); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 193, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 193, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_bincount); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 193, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 193, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_INCREF(__pyx_v_prev_states);
-  __Pyx_GIVEREF(__pyx_v_prev_states);
-  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_prev_states);
-  __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 193, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_weights, __pyx_v_probabilities) < 0) __PYX_ERR(0, 193, __pyx_L1_error)
-  __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 193, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __pyx_t_4 = NULL;
-  __pyx_t_7 = 0;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) {
-    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_1);
-    if (likely(__pyx_t_4)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
-      __Pyx_INCREF(__pyx_t_4);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_1, function);
-      __pyx_t_7 = 1;
-    }
-  }
-  #if CYTHON_FAST_PYCALL
-  if (PyFunction_Check(__pyx_t_1)) {
-    PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_6, __pyx_int_1};
-    __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 193, __pyx_L1_error)
-    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  } else
-  #endif
-  #if CYTHON_FAST_PYCCALL
-  if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) {
-    PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_6, __pyx_int_1};
-    __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 193, __pyx_L1_error)
-    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  } else
-  #endif
-  {
-    __pyx_t_2 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 193, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    if (__pyx_t_4) {
-      __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); __pyx_t_4 = NULL;
-    }
-    __Pyx_GIVEREF(__pyx_t_6);
-    PyTuple_SET_ITEM(__pyx_t_2, 0+__pyx_t_7, __pyx_t_6);
-    __Pyx_INCREF(__pyx_int_1);
-    __Pyx_GIVEREF(__pyx_int_1);
-    PyTuple_SET_ITEM(__pyx_t_2, 1+__pyx_t_7, __pyx_int_1);
-    __pyx_t_6 = 0;
-    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 193, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  }
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_8 < 0)) __PYX_ERR(0, 193, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __pyx_t_9 = ((!__pyx_t_8) != 0);
-  if (unlikely(__pyx_t_9)) {
-
-    /* "madmom/ml/hmm.pyx":194
- *         probabilities = np.asarray(probabilities)
- *         if not np.allclose(np.bincount(prev_states, weights=probabilities), 1):
- *             raise ValueError('Not a probability distribution.')             # <<<<<<<<<<<<<<
- *         # convert everything into a sparse CSR matrix, make sure it is square.
- *         # looking through prev_states is enough, because there *must* be a
- */
-    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 194, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_Raise(__pyx_t_5, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __PYX_ERR(0, 194, __pyx_L1_error)
-
-    /* "madmom/ml/hmm.pyx":193
- *         prev_states = np.asarray(prev_states, dtype=np.int)
- *         probabilities = np.asarray(probabilities)
- *         if not np.allclose(np.bincount(prev_states, weights=probabilities), 1):             # <<<<<<<<<<<<<<
- *             raise ValueError('Not a probability distribution.')
- *         # convert everything into a sparse CSR matrix, make sure it is square.
- */
-  }
-
-  /* "madmom/ml/hmm.pyx":198
- *         # looking through prev_states is enough, because there *must* be a
- *         # transition *from* every state
- *         num_states = max(prev_states) + 1             # <<<<<<<<<<<<<<
- *         transitions = csr_matrix((probabilities, (states, prev_states)),
- *                                  shape=(num_states, num_states))
- */
-  __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_max, __pyx_v_prev_states); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 198, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __pyx_t_1 = __Pyx_PyInt_AddObjC(__pyx_t_5, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 198, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __pyx_v_num_states = __pyx_t_1;
-  __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":199
- *         # transition *from* every state
- *         num_states = max(prev_states) + 1
- *         transitions = csr_matrix((probabilities, (states, prev_states)),             # <<<<<<<<<<<<<<
- *                                  shape=(num_states, num_states))
- *         # convert to correct types
- */
-  __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 199, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_INCREF(__pyx_v_states);
-  __Pyx_GIVEREF(__pyx_v_states);
-  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_states);
-  __Pyx_INCREF(__pyx_v_prev_states);
-  __Pyx_GIVEREF(__pyx_v_prev_states);
-  PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_prev_states);
-  __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 199, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_INCREF(__pyx_v_probabilities);
-  __Pyx_GIVEREF(__pyx_v_probabilities);
-  PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_probabilities);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 199, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_5);
-  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5);
-  __pyx_t_5 = 0;
-
-  /* "madmom/ml/hmm.pyx":200
- *         num_states = max(prev_states) + 1
- *         transitions = csr_matrix((probabilities, (states, prev_states)),
- *                                  shape=(num_states, num_states))             # <<<<<<<<<<<<<<
- *         # convert to correct types
- *         states = transitions.indices.astype(np.uint32)
- */
-  __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 200, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 200, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_INCREF(__pyx_v_num_states);
-  __Pyx_GIVEREF(__pyx_v_num_states);
-  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_num_states);
-  __Pyx_INCREF(__pyx_v_num_states);
-  __Pyx_GIVEREF(__pyx_v_num_states);
-  PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_num_states);
-  if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_shape, __pyx_t_2) < 0) __PYX_ERR(0, 200, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
-  /* "madmom/ml/hmm.pyx":199
- *         # transition *from* every state
- *         num_states = max(prev_states) + 1
- *         transitions = csr_matrix((probabilities, (states, prev_states)),             # <<<<<<<<<<<<<<
- *                                  shape=(num_states, num_states))
- *         # convert to correct types
- */
-  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_v_csr_matrix, __pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 199, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __pyx_v_transitions = __pyx_t_2;
-  __pyx_t_2 = 0;
-
-  /* "madmom/ml/hmm.pyx":202
- *                                  shape=(num_states, num_states))
- *         # convert to correct types
- *         states = transitions.indices.astype(np.uint32)             # <<<<<<<<<<<<<<
- *         pointers = transitions.indptr.astype(np.uint32)
- *         probabilities = transitions.data.astype(dtype=np.float)
- */
-  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_transitions, __pyx_n_s_indices); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 202, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_astype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 202, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 202, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_uint32); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 202, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __pyx_t_5 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) {
-    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_1);
-    if (likely(__pyx_t_5)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
-      __Pyx_INCREF(__pyx_t_5);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_1, function);
-    }
-  }
-  __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_5, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 202, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF_SET(__pyx_v_states, __pyx_t_2);
-  __pyx_t_2 = 0;
-
-  /* "madmom/ml/hmm.pyx":203
- *         # convert to correct types
- *         states = transitions.indices.astype(np.uint32)
- *         pointers = transitions.indptr.astype(np.uint32)             # <<<<<<<<<<<<<<
- *         probabilities = transitions.data.astype(dtype=np.float)
- *         # return them
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_transitions, __pyx_n_s_indptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 203, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_astype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 203, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 203, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_uint32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 203, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
-    __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_6);
-    if (likely(__pyx_t_1)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
-      __Pyx_INCREF(__pyx_t_1);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_6, function);
-    }
-  }
-  __pyx_t_2 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_1, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 203, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __pyx_v_pointers = __pyx_t_2;
-  __pyx_t_2 = 0;
-
-  /* "madmom/ml/hmm.pyx":204
- *         states = transitions.indices.astype(np.uint32)
- *         pointers = transitions.indptr.astype(np.uint32)
- *         probabilities = transitions.data.astype(dtype=np.float)             # <<<<<<<<<<<<<<
- *         # return them
- *         return states, pointers, probabilities
- */
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_transitions, __pyx_n_s_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 204, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_astype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 204, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 204, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 204, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_float); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 204, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 204, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 204, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF_SET(__pyx_v_probabilities, __pyx_t_1);
-  __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":206
- *         probabilities = transitions.data.astype(dtype=np.float)
- *         # return them
- *         return states, pointers, probabilities             # <<<<<<<<<<<<<<
- * 
- *     @classmethod
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 206, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_INCREF(__pyx_v_states);
-  __Pyx_GIVEREF(__pyx_v_states);
-  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_states);
-  __Pyx_INCREF(__pyx_v_pointers);
-  __Pyx_GIVEREF(__pyx_v_pointers);
-  PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_pointers);
-  __Pyx_INCREF(__pyx_v_probabilities);
-  __Pyx_GIVEREF(__pyx_v_probabilities);
-  PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_probabilities);
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "madmom/ml/hmm.pyx":148
- * 
- *     @staticmethod
- *     def make_sparse(states, prev_states, probabilities):             # <<<<<<<<<<<<<<
- *         """
- *         Return a sparse representation of dense transitions.
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_AddTraceback("madmom.ml.hmm.TransitionModel.make_sparse", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_csr_matrix);
-  __Pyx_XDECREF(__pyx_v_num_states);
-  __Pyx_XDECREF(__pyx_v_transitions);
-  __Pyx_XDECREF(__pyx_v_pointers);
-  __Pyx_XDECREF(__pyx_v_states);
-  __Pyx_XDECREF(__pyx_v_prev_states);
-  __Pyx_XDECREF(__pyx_v_probabilities);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":209
- * 
- *     @classmethod
- *     def from_dense(cls, states, prev_states, probabilities):             # <<<<<<<<<<<<<<
- *         """
- *         Instantiate a TransitionModel from dense transitions.
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_13from_dense(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_15TransitionModel_12from_dense[] = "TransitionModel.from_dense(cls, states, prev_states, probabilities)\n\n        Instantiate a TransitionModel from dense transitions.\n\n        Parameters\n        ----------\n        states : numpy array, shape (num_transitions,)\n            Array with states (i.e. destination states).\n        prev_states : numpy array, shape (num_transitions,)\n            Array with previous states (i.e. origination states).\n        probabilities : numpy array, shape (num_transitions,)\n            Transition probabilities.\n\n        Returns\n        -------\n        :class:`TransitionModel` instance\n            TransitionModel instance.\n\n        ";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_13from_dense = {"from_dense", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_13from_dense, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_15TransitionModel_12from_dense};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_15TransitionModel_13from_dense(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_cls = 0;
-  PyObject *__pyx_v_states = 0;
-  PyObject *__pyx_v_prev_states = 0;
-  PyObject *__pyx_v_probabilities = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("from_dense (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_cls,&__pyx_n_s_states,&__pyx_n_s_prev_states,&__pyx_n_s_probabilities,0};
-    PyObject* values[4] = {0,0,0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
-        CYTHON_FALLTHROUGH;
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        CYTHON_FALLTHROUGH;
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cls)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_states)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("from_dense", 1, 4, 4, 1); __PYX_ERR(0, 209, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  2:
-        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_prev_states)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("from_dense", 1, 4, 4, 2); __PYX_ERR(0, 209, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  3:
-        if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_probabilities)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("from_dense", 1, 4, 4, 3); __PYX_ERR(0, 209, __pyx_L3_error)
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "from_dense") < 0)) __PYX_ERR(0, 209, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 4) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-      values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
-    }
-    __pyx_v_cls = values[0];
-    __pyx_v_states = values[1];
-    __pyx_v_prev_states = values[2];
-    __pyx_v_probabilities = values[3];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("from_dense", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 209, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.TransitionModel.from_dense", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_15TransitionModel_12from_dense(__pyx_self, __pyx_v_cls, __pyx_v_states, __pyx_v_prev_states, __pyx_v_probabilities);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_15TransitionModel_12from_dense(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_cls, PyObject *__pyx_v_states, PyObject *__pyx_v_prev_states, PyObject *__pyx_v_probabilities) {
-  PyObject *__pyx_v_transitions = NULL;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_t_4;
-  PyObject *__pyx_t_5 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("from_dense", 0);
-
-  /* "madmom/ml/hmm.pyx":229
- *         """
- *         # get a sparse representation of the transitions
- *         transitions = cls.make_sparse(states, prev_states, probabilities)             # <<<<<<<<<<<<<<
- *         # instantiate a new TransitionModel and return it
- *         return cls(*transitions)
- */
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_cls, __pyx_n_s_make_sparse); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 229, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = NULL;
-  __pyx_t_4 = 0;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
-    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
-    if (likely(__pyx_t_3)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
-      __Pyx_INCREF(__pyx_t_3);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_2, function);
-      __pyx_t_4 = 1;
-    }
-  }
-  #if CYTHON_FAST_PYCALL
-  if (PyFunction_Check(__pyx_t_2)) {
-    PyObject *__pyx_temp[4] = {__pyx_t_3, __pyx_v_states, __pyx_v_prev_states, __pyx_v_probabilities};
-    __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_4, 3+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 229, __pyx_L1_error)
-    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __Pyx_GOTREF(__pyx_t_1);
-  } else
-  #endif
-  #if CYTHON_FAST_PYCCALL
-  if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) {
-    PyObject *__pyx_temp[4] = {__pyx_t_3, __pyx_v_states, __pyx_v_prev_states, __pyx_v_probabilities};
-    __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_4, 3+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 229, __pyx_L1_error)
-    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __Pyx_GOTREF(__pyx_t_1);
-  } else
-  #endif
-  {
-    __pyx_t_5 = PyTuple_New(3+__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 229, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    if (__pyx_t_3) {
-      __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __pyx_t_3 = NULL;
-    }
-    __Pyx_INCREF(__pyx_v_states);
-    __Pyx_GIVEREF(__pyx_v_states);
-    PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_4, __pyx_v_states);
-    __Pyx_INCREF(__pyx_v_prev_states);
-    __Pyx_GIVEREF(__pyx_v_prev_states);
-    PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_4, __pyx_v_prev_states);
-    __Pyx_INCREF(__pyx_v_probabilities);
-    __Pyx_GIVEREF(__pyx_v_probabilities);
-    PyTuple_SET_ITEM(__pyx_t_5, 2+__pyx_t_4, __pyx_v_probabilities);
-    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 229, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  }
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_v_transitions = __pyx_t_1;
-  __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":231
- *         transitions = cls.make_sparse(states, prev_states, probabilities)
- *         # instantiate a new TransitionModel and return it
- *         return cls(*transitions)             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PySequence_Tuple(__pyx_v_transitions); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 231, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_Call(__pyx_v_cls, __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 231, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_r = __pyx_t_2;
-  __pyx_t_2 = 0;
-  goto __pyx_L0;
-
-  /* "madmom/ml/hmm.pyx":209
- * 
- *     @classmethod
- *     def from_dense(cls, states, prev_states, probabilities):             # <<<<<<<<<<<<<<
- *         """
- *         Instantiate a TransitionModel from dense transitions.
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_AddTraceback("madmom.ml.hmm.TransitionModel.from_dense", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_transitions);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":258
- *     """
- * 
- *     def __init__(self, pointers):             # <<<<<<<<<<<<<<
- *         # save parameters
- *         self.pointers = pointers
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_16ObservationModel_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_16ObservationModel___init__[] = "ObservationModel.__init__(self, pointers)";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_16ObservationModel_1__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_16ObservationModel_1__init__, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_16ObservationModel___init__};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_16ObservationModel_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_self = 0;
-  PyObject *__pyx_v_pointers = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_pointers,0};
-    PyObject* values[2] = {0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pointers)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); __PYX_ERR(0, 258, __pyx_L3_error)
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 258, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-    }
-    __pyx_v_self = values[0];
-    __pyx_v_pointers = values[1];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 258, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.ObservationModel.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_16ObservationModel___init__(__pyx_self, __pyx_v_self, __pyx_v_pointers);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_16ObservationModel___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_pointers) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__init__", 0);
-
-  /* "madmom/ml/hmm.pyx":260
- *     def __init__(self, pointers):
- *         # save parameters
- *         self.pointers = pointers             # <<<<<<<<<<<<<<
- * 
- *     def log_densities(self, observations):
- */
-  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_pointers, __pyx_v_pointers) < 0) __PYX_ERR(0, 260, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":258
- *     """
- * 
- *     def __init__(self, pointers):             # <<<<<<<<<<<<<<
- *         # save parameters
- *         self.pointers = pointers
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.ObservationModel.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":262
- *         self.pointers = pointers
- * 
- *     def log_densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Log densities (or probabilities) of the observations for each state.
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_16ObservationModel_3log_densities(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_16ObservationModel_2log_densities[] = "ObservationModel.log_densities(self, observations)\n\n        Log densities (or probabilities) of the observations for each state.\n\n        Parameters\n        ----------\n        observations : numpy array\n            Observations.\n\n        Returns\n        -------\n        numpy array\n            Log densities as a 2D numpy array with the number of rows being\n            equal to the number of observations and the columns representing\n            the different observation log probability densities. The type must\n            be np.float.\n\n        ";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_16ObservationModel_3log_densities = {"log_densities", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_16ObservationModel_3log_densities, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_16ObservationModel_2log_densities};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_16ObservationModel_3log_densities(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  CYTHON_UNUSED PyObject *__pyx_v_self = 0;
-  CYTHON_UNUSED PyObject *__pyx_v_observations = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("log_densities (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_observations,0};
-    PyObject* values[2] = {0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_observations)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("log_densities", 1, 2, 2, 1); __PYX_ERR(0, 262, __pyx_L3_error)
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "log_densities") < 0)) __PYX_ERR(0, 262, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-    }
-    __pyx_v_self = values[0];
-    __pyx_v_observations = values[1];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("log_densities", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 262, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.ObservationModel.log_densities", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_16ObservationModel_2log_densities(__pyx_self, __pyx_v_self, __pyx_v_observations);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_16ObservationModel_2log_densities(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v_observations) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("log_densities", 0);
-
-  /* "madmom/ml/hmm.pyx":280
- * 
- *         """
- *         raise NotImplementedError('must be implemented by subclass')             # <<<<<<<<<<<<<<
- * 
- *     def densities(self, observations):
- */
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_NotImplementedError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 280, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_Raise(__pyx_t_1, 0, 0, 0);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __PYX_ERR(0, 280, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":262
- *         self.pointers = pointers
- * 
- *     def log_densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Log densities (or probabilities) of the observations for each state.
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("madmom.ml.hmm.ObservationModel.log_densities", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":282
- *         raise NotImplementedError('must be implemented by subclass')
- * 
- *     def densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Densities (or probabilities) of the observations for each state.
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_16ObservationModel_5densities(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_16ObservationModel_4densities[] = "ObservationModel.densities(self, observations)\n\n        Densities (or probabilities) of the observations for each state.\n\n        This defaults to computing the exp of the `log_densities`.\n        You can provide a special implementation to speed-up everything.\n\n        Parameters\n        ----------\n        observations : numpy array\n            Observations.\n\n        Returns\n        -------\n        numpy array\n            Densities as a 2D numpy array with the number of rows being equal\n            to the number of observations and the columns representing the\n            different observation log probability densities. The type must be\n            np.float.\n\n        ";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_16ObservationModel_5densities = {"densities", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_16ObservationModel_5densities, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_16ObservationModel_4densities};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_16ObservationModel_5densities(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_self = 0;
-  PyObject *__pyx_v_observations = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("densities (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_observations,0};
-    PyObject* values[2] = {0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_observations)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("densities", 1, 2, 2, 1); __PYX_ERR(0, 282, __pyx_L3_error)
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "densities") < 0)) __PYX_ERR(0, 282, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-    }
-    __pyx_v_self = values[0];
-    __pyx_v_observations = values[1];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("densities", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 282, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.ObservationModel.densities", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_16ObservationModel_4densities(__pyx_self, __pyx_v_self, __pyx_v_observations);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_16ObservationModel_4densities(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observations) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("densities", 0);
-
-  /* "madmom/ml/hmm.pyx":303
- * 
- *         """
- *         return np.exp(self.log_densities(observations))             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 303, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_exp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 303, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_log_densities); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 303, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __pyx_t_5 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) {
-    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
-    if (likely(__pyx_t_5)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
-      __Pyx_INCREF(__pyx_t_5);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_4, function);
-    }
-  }
-  __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_v_observations) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_observations);
-  __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
-  if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 303, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __pyx_t_4 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
-    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
-    if (likely(__pyx_t_4)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
-      __Pyx_INCREF(__pyx_t_4);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_3, function);
-    }
-  }
-  __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 303, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "madmom/ml/hmm.pyx":282
- *         raise NotImplementedError('must be implemented by subclass')
- * 
- *     def densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Densities (or probabilities) of the observations for each state.
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_AddTraceback("madmom.ml.hmm.ObservationModel.densities", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":339
- *     """
- * 
- *     def __init__(self, observation_probabilities):             # <<<<<<<<<<<<<<
- *         # check that it is a probability distribution
- *         if not np.allclose(observation_probabilities.sum(axis=1), 1):
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_24DiscreteObservationModel_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_24DiscreteObservationModel___init__[] = "DiscreteObservationModel.__init__(self, observation_probabilities)";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_24DiscreteObservationModel_1__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_24DiscreteObservationModel_1__init__, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_24DiscreteObservationModel___init__};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_24DiscreteObservationModel_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_self = 0;
-  PyObject *__pyx_v_observation_probabilities = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_observation_probabilities,0};
-    PyObject* values[2] = {0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_observation_probabilities)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, 1); __PYX_ERR(0, 339, __pyx_L3_error)
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 339, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-    }
-    __pyx_v_self = values[0];
-    __pyx_v_observation_probabilities = values[1];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__init__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 339, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.DiscreteObservationModel.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_24DiscreteObservationModel___init__(__pyx_self, __pyx_v_self, __pyx_v_observation_probabilities);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_24DiscreteObservationModel___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observation_probabilities) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  int __pyx_t_6;
-  int __pyx_t_7;
-  int __pyx_t_8;
-  PyObject *__pyx_t_9 = NULL;
-  PyObject *__pyx_t_10 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__init__", 0);
-
-  /* "madmom/ml/hmm.pyx":341
- *     def __init__(self, observation_probabilities):
- *         # check that it is a probability distribution
- *         if not np.allclose(observation_probabilities.sum(axis=1), 1):             # <<<<<<<<<<<<<<
- *             raise ValueError('Not a probability distribution.')
- *         # instantiate an ObservationModel
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 341, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_allclose); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 341, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_observation_probabilities, __pyx_n_s_sum); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 341, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 341, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_axis, __pyx_int_1) < 0) __PYX_ERR(0, 341, __pyx_L1_error)
-  __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_empty_tuple, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 341, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __pyx_t_4 = NULL;
-  __pyx_t_6 = 0;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
-    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
-    if (likely(__pyx_t_4)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
-      __Pyx_INCREF(__pyx_t_4);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_3, function);
-      __pyx_t_6 = 1;
-    }
-  }
-  #if CYTHON_FAST_PYCALL
-  if (PyFunction_Check(__pyx_t_3)) {
-    PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_5, __pyx_int_1};
-    __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 341, __pyx_L1_error)
-    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __Pyx_GOTREF(__pyx_t_1);
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  } else
-  #endif
-  #if CYTHON_FAST_PYCCALL
-  if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) {
-    PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_5, __pyx_int_1};
-    __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 341, __pyx_L1_error)
-    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __Pyx_GOTREF(__pyx_t_1);
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  } else
-  #endif
-  {
-    __pyx_t_2 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 341, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    if (__pyx_t_4) {
-      __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); __pyx_t_4 = NULL;
-    }
-    __Pyx_GIVEREF(__pyx_t_5);
-    PyTuple_SET_ITEM(__pyx_t_2, 0+__pyx_t_6, __pyx_t_5);
-    __Pyx_INCREF(__pyx_int_1);
-    __Pyx_GIVEREF(__pyx_int_1);
-    PyTuple_SET_ITEM(__pyx_t_2, 1+__pyx_t_6, __pyx_int_1);
-    __pyx_t_5 = 0;
-    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 341, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  }
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 341, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_8 = ((!__pyx_t_7) != 0);
-  if (unlikely(__pyx_t_8)) {
-
-    /* "madmom/ml/hmm.pyx":342
- *         # check that it is a probability distribution
- *         if not np.allclose(observation_probabilities.sum(axis=1), 1):
- *             raise ValueError('Not a probability distribution.')             # <<<<<<<<<<<<<<
- *         # instantiate an ObservationModel
- *         super(DiscreteObservationModel, self).__init__(
- */
-    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 342, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __Pyx_Raise(__pyx_t_1, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __PYX_ERR(0, 342, __pyx_L1_error)
-
-    /* "madmom/ml/hmm.pyx":341
- *     def __init__(self, observation_probabilities):
- *         # check that it is a probability distribution
- *         if not np.allclose(observation_probabilities.sum(axis=1), 1):             # <<<<<<<<<<<<<<
- *             raise ValueError('Not a probability distribution.')
- *         # instantiate an ObservationModel
- */
-  }
-
-  /* "madmom/ml/hmm.pyx":344
- *             raise ValueError('Not a probability distribution.')
- *         # instantiate an ObservationModel
- *         super(DiscreteObservationModel, self).__init__(             # <<<<<<<<<<<<<<
- *             np.arange(observation_probabilities.shape[0], dtype=np.uint32))
- *         # save the observation probabilities
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_DiscreteObservationModel); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 344, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 344, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_GIVEREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
-  __Pyx_INCREF(__pyx_v_self);
-  __Pyx_GIVEREF(__pyx_v_self);
-  PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_self);
-  __pyx_t_3 = 0;
-  __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_super, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 344, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_init); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 344, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":345
- *         # instantiate an ObservationModel
- *         super(DiscreteObservationModel, self).__init__(
- *             np.arange(observation_probabilities.shape[0], dtype=np.uint32))             # <<<<<<<<<<<<<<
- *         # save the observation probabilities
- *         self.observation_probabilities = observation_probabilities
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 345, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_arange); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 345, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_observation_probabilities, __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 345, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_3, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 345, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 345, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_GIVEREF(__pyx_t_4);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
-  __pyx_t_4 = 0;
-  __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 345, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 345, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_9);
-  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 345, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-  if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_10) < 0) __PYX_ERR(0, 345, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-  __pyx_t_10 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 345, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __pyx_t_4 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
-    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
-    if (likely(__pyx_t_4)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
-      __Pyx_INCREF(__pyx_t_4);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_2, function);
-    }
-  }
-  __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_t_10) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_10);
-  __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-  if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 344, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":347
- *             np.arange(observation_probabilities.shape[0], dtype=np.uint32))
- *         # save the observation probabilities
- *         self.observation_probabilities = observation_probabilities             # <<<<<<<<<<<<<<
- * 
- *     def densities(self, observations):
- */
-  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_observation_probabilities, __pyx_v_observation_probabilities) < 0) __PYX_ERR(0, 347, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":339
- *     """
- * 
- *     def __init__(self, observation_probabilities):             # <<<<<<<<<<<<<<
- *         # check that it is a probability distribution
- *         if not np.allclose(observation_probabilities.sum(axis=1), 1):
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_9);
-  __Pyx_XDECREF(__pyx_t_10);
-  __Pyx_AddTraceback("madmom.ml.hmm.DiscreteObservationModel.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":349
- *         self.observation_probabilities = observation_probabilities
- * 
- *     def densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Densities of the observations.
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_24DiscreteObservationModel_3densities(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_24DiscreteObservationModel_2densities[] = "DiscreteObservationModel.densities(self, observations)\n\n        Densities of the observations.\n\n        Parameters\n        ----------\n        observations : numpy array\n            Observations.\n\n        Returns\n        -------\n        numpy array\n            Densities of the observations.\n\n        ";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_24DiscreteObservationModel_3densities = {"densities", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_24DiscreteObservationModel_3densities, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_24DiscreteObservationModel_2densities};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_24DiscreteObservationModel_3densities(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_self = 0;
-  PyObject *__pyx_v_observations = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("densities (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_observations,0};
-    PyObject* values[2] = {0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_observations)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("densities", 1, 2, 2, 1); __PYX_ERR(0, 349, __pyx_L3_error)
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "densities") < 0)) __PYX_ERR(0, 349, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-    }
-    __pyx_v_self = values[0];
-    __pyx_v_observations = values[1];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("densities", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 349, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.DiscreteObservationModel.densities", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_24DiscreteObservationModel_2densities(__pyx_self, __pyx_v_self, __pyx_v_observations);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_24DiscreteObservationModel_2densities(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observations) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("densities", 0);
-
-  /* "madmom/ml/hmm.pyx":364
- * 
- *         """
- *         return self.observation_probabilities[:, observations].T             # <<<<<<<<<<<<<<
- * 
- *     def log_densities(self, observations):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_observation_probabilities); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 364, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 364, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_INCREF(__pyx_slice__3);
-  __Pyx_GIVEREF(__pyx_slice__3);
-  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_slice__3);
-  __Pyx_INCREF(__pyx_v_observations);
-  __Pyx_GIVEREF(__pyx_v_observations);
-  PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_observations);
-  __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 364, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_T); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 364, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_r = __pyx_t_2;
-  __pyx_t_2 = 0;
-  goto __pyx_L0;
-
-  /* "madmom/ml/hmm.pyx":349
- *         self.observation_probabilities = observation_probabilities
- * 
- *     def densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Densities of the observations.
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("madmom.ml.hmm.DiscreteObservationModel.densities", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":366
- *         return self.observation_probabilities[:, observations].T
- * 
- *     def log_densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Log densities of the observations.
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_24DiscreteObservationModel_5log_densities(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_24DiscreteObservationModel_4log_densities[] = "DiscreteObservationModel.log_densities(self, observations)\n\n        Log densities of the observations.\n\n        Parameters\n        ----------\n        observations : numpy array\n            Observations.\n\n        Returns\n        -------\n        numpy array\n            Log densities of the observations.\n\n        ";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_24DiscreteObservationModel_5log_densities = {"log_densities", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_24DiscreteObservationModel_5log_densities, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_24DiscreteObservationModel_4log_densities};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_24DiscreteObservationModel_5log_densities(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_self = 0;
-  PyObject *__pyx_v_observations = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("log_densities (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_observations,0};
-    PyObject* values[2] = {0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_observations)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("log_densities", 1, 2, 2, 1); __PYX_ERR(0, 366, __pyx_L3_error)
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "log_densities") < 0)) __PYX_ERR(0, 366, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-    }
-    __pyx_v_self = values[0];
-    __pyx_v_observations = values[1];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("log_densities", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 366, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.DiscreteObservationModel.log_densities", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_24DiscreteObservationModel_4log_densities(__pyx_self, __pyx_v_self, __pyx_v_observations);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_24DiscreteObservationModel_4log_densities(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observations) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("log_densities", 0);
-
-  /* "madmom/ml/hmm.pyx":381
- * 
- *         """
- *         return np.log(self.densities(observations))             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 381, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_log); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 381, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_densities); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 381, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __pyx_t_5 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) {
-    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
-    if (likely(__pyx_t_5)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
-      __Pyx_INCREF(__pyx_t_5);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_4, function);
-    }
-  }
-  __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_v_observations) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_observations);
-  __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
-  if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 381, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __pyx_t_4 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
-    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
-    if (likely(__pyx_t_4)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
-      __Pyx_INCREF(__pyx_t_4);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_3, function);
-    }
-  }
-  __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 381, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "madmom/ml/hmm.pyx":366
- *         return self.observation_probabilities[:, observations].T
- * 
- *     def log_densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Log densities of the observations.
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_AddTraceback("madmom.ml.hmm.DiscreteObservationModel.log_densities", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":435
- *     """
- * 
- *     def __init__(self, transition_model, observation_model,             # <<<<<<<<<<<<<<
- *                  initial_distribution=None):
- *         # save the parameters
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel___init__[] = "HiddenMarkovModel.__init__(self, transition_model, observation_model, initial_distribution=None)";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_1__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_1__init__, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel___init__};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_self = 0;
-  PyObject *__pyx_v_transition_model = 0;
-  PyObject *__pyx_v_observation_model = 0;
-  PyObject *__pyx_v_initial_distribution = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_transition_model,&__pyx_n_s_observation_model,&__pyx_n_s_initial_distribution,0};
-    PyObject* values[4] = {0,0,0,0};
-
-    /* "madmom/ml/hmm.pyx":436
- * 
- *     def __init__(self, transition_model, observation_model,
- *                  initial_distribution=None):             # <<<<<<<<<<<<<<
- *         # save the parameters
- *         self.transition_model = transition_model
- */
-    values[3] = ((PyObject *)((PyObject *)Py_None));
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
-        CYTHON_FALLTHROUGH;
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        CYTHON_FALLTHROUGH;
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_transition_model)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 4, 1); __PYX_ERR(0, 435, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  2:
-        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_observation_model)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 4, 2); __PYX_ERR(0, 435, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  3:
-        if (kw_args > 0) {
-          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_initial_distribution);
-          if (value) { values[3] = value; kw_args--; }
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 435, __pyx_L3_error)
-      }
-    } else {
-      switch (PyTuple_GET_SIZE(__pyx_args)) {
-        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
-        CYTHON_FALLTHROUGH;
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-    }
-    __pyx_v_self = values[0];
-    __pyx_v_transition_model = values[1];
-    __pyx_v_observation_model = values[2];
-    __pyx_v_initial_distribution = values[3];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 435, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.HiddenMarkovModel.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel___init__(__pyx_self, __pyx_v_self, __pyx_v_transition_model, __pyx_v_observation_model, __pyx_v_initial_distribution);
-
-  /* "madmom/ml/hmm.pyx":435
- *     """
- * 
- *     def __init__(self, transition_model, observation_model,             # <<<<<<<<<<<<<<
- *                  initial_distribution=None):
- *         # save the parameters
- */
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_transition_model, PyObject *__pyx_v_observation_model, PyObject *__pyx_v_initial_distribution) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  int __pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  PyObject *__pyx_t_6 = NULL;
-  PyObject *__pyx_t_7 = NULL;
-  int __pyx_t_8;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__init__", 0);
-  __Pyx_INCREF(__pyx_v_initial_distribution);
-
-  /* "madmom/ml/hmm.pyx":438
- *                  initial_distribution=None):
- *         # save the parameters
- *         self.transition_model = transition_model             # <<<<<<<<<<<<<<
- *         self.observation_model = observation_model
- *         if initial_distribution is None:
- */
-  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_transition_model, __pyx_v_transition_model) < 0) __PYX_ERR(0, 438, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":439
- *         # save the parameters
- *         self.transition_model = transition_model
- *         self.observation_model = observation_model             # <<<<<<<<<<<<<<
- *         if initial_distribution is None:
- *             initial_distribution = np.ones(transition_model.num_states,
- */
-  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_observation_model, __pyx_v_observation_model) < 0) __PYX_ERR(0, 439, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":440
- *         self.transition_model = transition_model
- *         self.observation_model = observation_model
- *         if initial_distribution is None:             # <<<<<<<<<<<<<<
- *             initial_distribution = np.ones(transition_model.num_states,
- *                                            dtype=np.float) / \
- */
-  __pyx_t_1 = (__pyx_v_initial_distribution == Py_None);
-  __pyx_t_2 = (__pyx_t_1 != 0);
-  if (__pyx_t_2) {
-
-    /* "madmom/ml/hmm.pyx":441
- *         self.observation_model = observation_model
- *         if initial_distribution is None:
- *             initial_distribution = np.ones(transition_model.num_states,             # <<<<<<<<<<<<<<
- *                                            dtype=np.float) / \
- *                                    transition_model.num_states
- */
-    __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 441, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_ones); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 441, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_transition_model, __pyx_n_s_num_states); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 441, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 441, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_GIVEREF(__pyx_t_3);
-    PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
-    __pyx_t_3 = 0;
-
-    /* "madmom/ml/hmm.pyx":442
- *         if initial_distribution is None:
- *             initial_distribution = np.ones(transition_model.num_states,
- *                                            dtype=np.float) / \             # <<<<<<<<<<<<<<
- *                                    transition_model.num_states
- *         if not np.allclose(initial_distribution.sum(), 1):
- */
-    __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 442, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 442, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_6);
-    __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_float); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 442, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_7);
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_7) < 0) __PYX_ERR(0, 442, __pyx_L1_error)
-    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-
-    /* "madmom/ml/hmm.pyx":441
- *         self.observation_model = observation_model
- *         if initial_distribution is None:
- *             initial_distribution = np.ones(transition_model.num_states,             # <<<<<<<<<<<<<<
- *                                            dtype=np.float) / \
- *                                    transition_model.num_states
- */
-    __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 441, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_7);
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-    /* "madmom/ml/hmm.pyx":443
- *             initial_distribution = np.ones(transition_model.num_states,
- *                                            dtype=np.float) / \
- *                                    transition_model.num_states             # <<<<<<<<<<<<<<
- *         if not np.allclose(initial_distribution.sum(), 1):
- *             raise ValueError('Initial distribution is not a probability '
- */
-    __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_transition_model, __pyx_n_s_num_states); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 443, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-
-    /* "madmom/ml/hmm.pyx":442
- *         if initial_distribution is None:
- *             initial_distribution = np.ones(transition_model.num_states,
- *                                            dtype=np.float) / \             # <<<<<<<<<<<<<<
- *                                    transition_model.num_states
- *         if not np.allclose(initial_distribution.sum(), 1):
- */
-    __pyx_t_5 = __Pyx_PyNumber_Divide(__pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 442, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __Pyx_DECREF_SET(__pyx_v_initial_distribution, __pyx_t_5);
-    __pyx_t_5 = 0;
-
-    /* "madmom/ml/hmm.pyx":440
- *         self.transition_model = transition_model
- *         self.observation_model = observation_model
- *         if initial_distribution is None:             # <<<<<<<<<<<<<<
- *             initial_distribution = np.ones(transition_model.num_states,
- *                                            dtype=np.float) / \
- */
-  }
-
-  /* "madmom/ml/hmm.pyx":444
- *                                            dtype=np.float) / \
- *                                    transition_model.num_states
- *         if not np.allclose(initial_distribution.sum(), 1):             # <<<<<<<<<<<<<<
- *             raise ValueError('Initial distribution is not a probability '
- *                              'distribution.')
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 444, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_allclose); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 444, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_7);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_initial_distribution, __pyx_n_s_sum); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 444, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __pyx_t_6 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) {
-    __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4);
-    if (likely(__pyx_t_6)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
-      __Pyx_INCREF(__pyx_t_6);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_4, function);
-    }
-  }
-  __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_6) : __Pyx_PyObject_CallNoArg(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
-  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 444, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __pyx_t_4 = NULL;
-  __pyx_t_8 = 0;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) {
-    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_7);
-    if (likely(__pyx_t_4)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
-      __Pyx_INCREF(__pyx_t_4);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_7, function);
-      __pyx_t_8 = 1;
-    }
-  }
-  #if CYTHON_FAST_PYCALL
-  if (PyFunction_Check(__pyx_t_7)) {
-    PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_3, __pyx_int_1};
-    __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 444, __pyx_L1_error)
-    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  } else
-  #endif
-  #if CYTHON_FAST_PYCCALL
-  if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) {
-    PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_3, __pyx_int_1};
-    __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 444, __pyx_L1_error)
-    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  } else
-  #endif
-  {
-    __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 444, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_6);
-    if (__pyx_t_4) {
-      __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL;
-    }
-    __Pyx_GIVEREF(__pyx_t_3);
-    PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_8, __pyx_t_3);
-    __Pyx_INCREF(__pyx_int_1);
-    __Pyx_GIVEREF(__pyx_int_1);
-    PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, __pyx_int_1);
-    __pyx_t_3 = 0;
-    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 444, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  }
-  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 444, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __pyx_t_1 = ((!__pyx_t_2) != 0);
-  if (unlikely(__pyx_t_1)) {
-
-    /* "madmom/ml/hmm.pyx":445
- *                                    transition_model.num_states
- *         if not np.allclose(initial_distribution.sum(), 1):
- *             raise ValueError('Initial distribution is not a probability '             # <<<<<<<<<<<<<<
- *                              'distribution.')
- *         self.initial_distribution = initial_distribution
- */
-    __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 445, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_Raise(__pyx_t_5, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __PYX_ERR(0, 445, __pyx_L1_error)
-
-    /* "madmom/ml/hmm.pyx":444
- *                                            dtype=np.float) / \
- *                                    transition_model.num_states
- *         if not np.allclose(initial_distribution.sum(), 1):             # <<<<<<<<<<<<<<
- *             raise ValueError('Initial distribution is not a probability '
- *                              'distribution.')
- */
-  }
-
-  /* "madmom/ml/hmm.pyx":447
- *             raise ValueError('Initial distribution is not a probability '
- *                              'distribution.')
- *         self.initial_distribution = initial_distribution             # <<<<<<<<<<<<<<
- *         # attributes needed for stateful processing (i.e. forward_step())
- *         self._prev = self.initial_distribution.copy()
- */
-  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_initial_distribution, __pyx_v_initial_distribution) < 0) __PYX_ERR(0, 447, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":449
- *         self.initial_distribution = initial_distribution
- *         # attributes needed for stateful processing (i.e. forward_step())
- *         self._prev = self.initial_distribution.copy()             # <<<<<<<<<<<<<<
- * 
- *     def __getstate__(self):
- */
-  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_initial_distribution); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 449, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_7);
-  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_copy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 449, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-  __pyx_t_7 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
-    __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6);
-    if (likely(__pyx_t_7)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
-      __Pyx_INCREF(__pyx_t_7);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_6, function);
-    }
-  }
-  __pyx_t_5 = (__pyx_t_7) ? __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_7) : __Pyx_PyObject_CallNoArg(__pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
-  if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 449, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_prev, __pyx_t_5) < 0) __PYX_ERR(0, 449, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-
-  /* "madmom/ml/hmm.pyx":435
- *     """
- * 
- *     def __init__(self, transition_model, observation_model,             # <<<<<<<<<<<<<<
- *                  initial_distribution=None):
- *         # save the parameters
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_7);
-  __Pyx_AddTraceback("madmom.ml.hmm.HiddenMarkovModel.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_initial_distribution);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":451
- *         self._prev = self.initial_distribution.copy()
- * 
- *     def __getstate__(self):             # <<<<<<<<<<<<<<
- *         # copy everything to a picklable object
- *         state = self.__dict__.copy()
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_3__getstate__(PyObject *__pyx_self, PyObject *__pyx_v_self); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel_2__getstate__[] = "HiddenMarkovModel.__getstate__(self)";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_3__getstate__ = {"__getstate__", (PyCFunction)__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_3__getstate__, METH_O, __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel_2__getstate__};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_3__getstate__(PyObject *__pyx_self, PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__getstate__ (wrapper)", 0);
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_2__getstate__(__pyx_self, ((PyObject *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_2__getstate__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self) {
-  PyObject *__pyx_v_state = NULL;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__getstate__", 0);
-
-  /* "madmom/ml/hmm.pyx":453
- *     def __getstate__(self):
- *         # copy everything to a picklable object
- *         state = self.__dict__.copy()             # <<<<<<<<<<<<<<
- *         # do not pickle attributes needed for stateful processing
- *         state.pop('_prev', None)
- */
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dict); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 453, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_copy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 453, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
-    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
-    if (likely(__pyx_t_2)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
-      __Pyx_INCREF(__pyx_t_2);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_3, function);
-    }
-  }
-  __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2) : __Pyx_PyObject_CallNoArg(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
-  if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 453, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_v_state = __pyx_t_1;
-  __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":455
- *         state = self.__dict__.copy()
- *         # do not pickle attributes needed for stateful processing
- *         state.pop('_prev', None)             # <<<<<<<<<<<<<<
- *         return state
- * 
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_state, __pyx_n_s_pop); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 455, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 455, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":456
- *         # do not pickle attributes needed for stateful processing
- *         state.pop('_prev', None)
- *         return state             # <<<<<<<<<<<<<<
- * 
- *     def __setstate__(self, state):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_INCREF(__pyx_v_state);
-  __pyx_r = __pyx_v_state;
-  goto __pyx_L0;
-
-  /* "madmom/ml/hmm.pyx":451
- *         self._prev = self.initial_distribution.copy()
- * 
- *     def __getstate__(self):             # <<<<<<<<<<<<<<
- *         # copy everything to a picklable object
- *         state = self.__dict__.copy()
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("madmom.ml.hmm.HiddenMarkovModel.__getstate__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_state);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":458
- *         return state
- * 
- *     def __setstate__(self, state):             # <<<<<<<<<<<<<<
- *         # restore pickled instance attributes
- *         self.__dict__.update(state)
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_5__setstate__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel_4__setstate__[] = "HiddenMarkovModel.__setstate__(self, state)";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_5__setstate__ = {"__setstate__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_5__setstate__, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel_4__setstate__};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_5__setstate__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_self = 0;
-  PyObject *__pyx_v_state = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__setstate__ (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_state,0};
-    PyObject* values[2] = {0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_state)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("__setstate__", 1, 2, 2, 1); __PYX_ERR(0, 458, __pyx_L3_error)
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__setstate__") < 0)) __PYX_ERR(0, 458, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-    }
-    __pyx_v_self = values[0];
-    __pyx_v_state = values[1];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__setstate__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 458, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.HiddenMarkovModel.__setstate__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_4__setstate__(__pyx_self, __pyx_v_self, __pyx_v_state);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_4__setstate__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_state) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__setstate__", 0);
-
-  /* "madmom/ml/hmm.pyx":460
- *     def __setstate__(self, state):
- *         # restore pickled instance attributes
- *         self.__dict__.update(state)             # <<<<<<<<<<<<<<
- *         # add non-pickled attributes needed for stateful processing
- *         self._prev = self.initial_distribution.copy()
- */
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_dict); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 460, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_update); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 460, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
-    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
-    if (likely(__pyx_t_2)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
-      __Pyx_INCREF(__pyx_t_2);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_3, function);
-    }
-  }
-  __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_v_state) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_state);
-  __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
-  if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 460, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":462
- *         self.__dict__.update(state)
- *         # add non-pickled attributes needed for stateful processing
- *         self._prev = self.initial_distribution.copy()             # <<<<<<<<<<<<<<
- * 
- *     def reset(self, initial_distribution=None):
- */
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_initial_distribution); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 462, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_copy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 462, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
-    __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
-    if (likely(__pyx_t_3)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
-      __Pyx_INCREF(__pyx_t_3);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_2, function);
-    }
-  }
-  __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 462, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_prev, __pyx_t_1) < 0) __PYX_ERR(0, 462, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":458
- *         return state
- * 
- *     def __setstate__(self, state):             # <<<<<<<<<<<<<<
- *         # restore pickled instance attributes
- *         self.__dict__.update(state)
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("madmom.ml.hmm.HiddenMarkovModel.__setstate__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":464
- *         self._prev = self.initial_distribution.copy()
- * 
- *     def reset(self, initial_distribution=None):             # <<<<<<<<<<<<<<
- *         """
- *         Reset the HMM to its initial state.
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_7reset(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel_6reset[] = "HiddenMarkovModel.reset(self, initial_distribution=None)\n\n        Reset the HMM to its initial state.\n\n        Parameters\n        ----------\n        initial_distribution : numpy array, optional\n            Reset to this initial state distribution.\n\n        ";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_7reset = {"reset", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_7reset, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel_6reset};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_7reset(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_self = 0;
-  PyObject *__pyx_v_initial_distribution = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("reset (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_initial_distribution,0};
-    PyObject* values[2] = {0,0};
-    values[1] = ((PyObject *)((PyObject *)Py_None));
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (kw_args > 0) {
-          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_initial_distribution);
-          if (value) { values[1] = value; kw_args--; }
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "reset") < 0)) __PYX_ERR(0, 464, __pyx_L3_error)
-      }
-    } else {
-      switch (PyTuple_GET_SIZE(__pyx_args)) {
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-    }
-    __pyx_v_self = values[0];
-    __pyx_v_initial_distribution = values[1];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("reset", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 464, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.HiddenMarkovModel.reset", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_6reset(__pyx_self, __pyx_v_self, __pyx_v_initial_distribution);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_6reset(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_initial_distribution) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("reset", 0);
-
-  /* "madmom/ml/hmm.pyx":475
- *         """
- *         # reset initial state distribution
- *         self._prev = initial_distribution or self.initial_distribution.copy()             # <<<<<<<<<<<<<<
- * 
- *     @cython.cdivision(True)
- */
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_initial_distribution); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 475, __pyx_L1_error)
-  if (!__pyx_t_2) {
-  } else {
-    __Pyx_INCREF(__pyx_v_initial_distribution);
-    __pyx_t_1 = __pyx_v_initial_distribution;
-    goto __pyx_L3_bool_binop_done;
-  }
-  __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_initial_distribution); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 475, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_copy); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 475, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __pyx_t_4 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
-    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5);
-    if (likely(__pyx_t_4)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
-      __Pyx_INCREF(__pyx_t_4);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_5, function);
-    }
-  }
-  __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 475, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __Pyx_INCREF(__pyx_t_3);
-  __pyx_t_1 = __pyx_t_3;
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_L3_bool_binop_done:;
-  if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_prev, __pyx_t_1) < 0) __PYX_ERR(0, 475, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":464
- *         self._prev = self.initial_distribution.copy()
- * 
- *     def reset(self, initial_distribution=None):             # <<<<<<<<<<<<<<
- *         """
- *         Reset the HMM to its initial state.
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_AddTraceback("madmom.ml.hmm.HiddenMarkovModel.reset", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":480
- *     @cython.boundscheck(False)
- *     @cython.wraparound(False)
- *     def viterbi(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Determine the best path with the Viterbi algorithm.
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_9viterbi(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel_8viterbi[] = "HiddenMarkovModel.viterbi(self, observations)\n\n        Determine the best path with the Viterbi algorithm.\n\n        Parameters\n        ----------\n        observations : numpy array\n            Observations to decode the optimal path for.\n\n        Returns\n        -------\n        path : numpy array\n            Best state-space path sequence.\n        log_prob : float\n            Corresponding log probability.\n\n        ";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_9viterbi = {"viterbi", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_9viterbi, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel_8viterbi};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_9viterbi(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_self = 0;
-  PyObject *__pyx_v_observations = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("viterbi (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_observations,0};
-    PyObject* values[2] = {0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_observations)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("viterbi", 1, 2, 2, 1); __PYX_ERR(0, 480, __pyx_L3_error)
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "viterbi") < 0)) __PYX_ERR(0, 480, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-    }
-    __pyx_v_self = values[0];
-    __pyx_v_observations = values[1];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("viterbi", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 480, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.HiddenMarkovModel.viterbi", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_8viterbi(__pyx_self, __pyx_v_self, __pyx_v_observations);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_8viterbi(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observations) {
-  PyObject *__pyx_v_tm = NULL;
-  __Pyx_memviewslice __pyx_v_tm_states = { 0, 0, { 0 }, { 0 }, { 0 } };
-  __Pyx_memviewslice __pyx_v_tm_pointers = { 0, 0, { 0 }, { 0 }, { 0 } };
-  __Pyx_memviewslice __pyx_v_tm_probabilities = { 0, 0, { 0 }, { 0 }, { 0 } };
-  unsigned int __pyx_v_num_states;
-  PyObject *__pyx_v_om = NULL;
-  unsigned int __pyx_v_num_observations;
-  __Pyx_memviewslice __pyx_v_om_pointers = { 0, 0, { 0 }, { 0 }, { 0 } };
-  __Pyx_memviewslice __pyx_v_om_densities = { 0, 0, { 0 }, { 0 }, { 0 } };
-  __Pyx_memviewslice __pyx_v_current_viterbi = { 0, 0, { 0 }, { 0 }, { 0 } };
-  __Pyx_memviewslice __pyx_v_previous_viterbi = { 0, 0, { 0 }, { 0 }, { 0 } };
-  __Pyx_memviewslice __pyx_v_bt_pointers = { 0, 0, { 0 }, { 0 }, { 0 } };
-  unsigned int __pyx_v_state;
-  unsigned int __pyx_v_frame;
-  unsigned int __pyx_v_prev_state;
-  unsigned int __pyx_v_pointer;
-  double __pyx_v_density;
-  double __pyx_v_transition_prob;
-  PyObject *__pyx_v_log_probability = NULL;
-  PyObject *__pyx_v_path = NULL;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  __Pyx_memviewslice __pyx_t_2 = { 0, 0, { 0 }, { 0 }, { 0 } };
-  __Pyx_memviewslice __pyx_t_3 = { 0, 0, { 0 }, { 0 }, { 0 } };
-  unsigned int __pyx_t_4;
-  Py_ssize_t __pyx_t_5;
-  PyObject *__pyx_t_6 = NULL;
-  PyObject *__pyx_t_7 = NULL;
-  __Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } };
-  PyObject *__pyx_t_9 = NULL;
-  PyObject *__pyx_t_10 = NULL;
-  __Pyx_memviewslice __pyx_t_11 = { 0, 0, { 0 }, { 0 }, { 0 } };
-  unsigned int __pyx_t_12;
-  unsigned int __pyx_t_13;
-  unsigned int __pyx_t_14;
-  unsigned int __pyx_t_15;
-  unsigned int __pyx_t_16;
-  size_t __pyx_t_17;
-  size_t __pyx_t_18;
-  size_t __pyx_t_19;
-  Py_ssize_t __pyx_t_20;
-  __pyx_t_6madmom_2ml_3hmm_uint32_t __pyx_t_21;
-  __pyx_t_6madmom_2ml_3hmm_uint32_t __pyx_t_22;
-  unsigned int __pyx_t_23;
-  int __pyx_t_24;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("viterbi", 0);
-
-  /* "madmom/ml/hmm.pyx":498
- *         """
- *         # transition model stuff
- *         tm = self.transition_model             # <<<<<<<<<<<<<<
- *         cdef uint32_t [::1] tm_states = tm.states
- *         cdef uint32_t [::1] tm_pointers = tm.pointers
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_transition_model); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 498, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_v_tm = __pyx_t_1;
-  __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":499
- *         # transition model stuff
- *         tm = self.transition_model
- *         cdef uint32_t [::1] tm_states = tm.states             # <<<<<<<<<<<<<<
- *         cdef uint32_t [::1] tm_pointers = tm.pointers
- *         cdef double [::1] tm_probabilities = tm.log_probabilities
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_tm, __pyx_n_s_states); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 499, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_2.memview)) __PYX_ERR(0, 499, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_tm_states = __pyx_t_2;
-  __pyx_t_2.memview = NULL;
-  __pyx_t_2.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":500
- *         tm = self.transition_model
- *         cdef uint32_t [::1] tm_states = tm.states
- *         cdef uint32_t [::1] tm_pointers = tm.pointers             # <<<<<<<<<<<<<<
- *         cdef double [::1] tm_probabilities = tm.log_probabilities
- *         cdef unsigned int num_states = tm.num_states
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_tm, __pyx_n_s_pointers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 500, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_2.memview)) __PYX_ERR(0, 500, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_tm_pointers = __pyx_t_2;
-  __pyx_t_2.memview = NULL;
-  __pyx_t_2.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":501
- *         cdef uint32_t [::1] tm_states = tm.states
- *         cdef uint32_t [::1] tm_pointers = tm.pointers
- *         cdef double [::1] tm_probabilities = tm.log_probabilities             # <<<<<<<<<<<<<<
- *         cdef unsigned int num_states = tm.num_states
- * 
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_tm, __pyx_n_s_log_probabilities); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 501, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(0, 501, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_tm_probabilities = __pyx_t_3;
-  __pyx_t_3.memview = NULL;
-  __pyx_t_3.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":502
- *         cdef uint32_t [::1] tm_pointers = tm.pointers
- *         cdef double [::1] tm_probabilities = tm.log_probabilities
- *         cdef unsigned int num_states = tm.num_states             # <<<<<<<<<<<<<<
- * 
- *         # observation model stuff
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_tm, __pyx_n_s_num_states); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 502, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_4 = __Pyx_PyInt_As_unsigned_int(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 502, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_num_states = __pyx_t_4;
-
-  /* "madmom/ml/hmm.pyx":505
- * 
- *         # observation model stuff
- *         om = self.observation_model             # <<<<<<<<<<<<<<
- *         cdef unsigned int num_observations = len(observations)
- *         cdef uint32_t [::1] om_pointers = om.pointers
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_observation_model); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 505, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_v_om = __pyx_t_1;
-  __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":506
- *         # observation model stuff
- *         om = self.observation_model
- *         cdef unsigned int num_observations = len(observations)             # <<<<<<<<<<<<<<
- *         cdef uint32_t [::1] om_pointers = om.pointers
- *         cdef double [:, ::1] om_densities = om.log_densities(observations)
- */
-  __pyx_t_5 = PyObject_Length(__pyx_v_observations); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 506, __pyx_L1_error)
-  __pyx_v_num_observations = __pyx_t_5;
-
-  /* "madmom/ml/hmm.pyx":507
- *         om = self.observation_model
- *         cdef unsigned int num_observations = len(observations)
- *         cdef uint32_t [::1] om_pointers = om.pointers             # <<<<<<<<<<<<<<
- *         cdef double [:, ::1] om_densities = om.log_densities(observations)
- * 
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_om, __pyx_n_s_pointers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 507, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_2.memview)) __PYX_ERR(0, 507, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_om_pointers = __pyx_t_2;
-  __pyx_t_2.memview = NULL;
-  __pyx_t_2.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":508
- *         cdef unsigned int num_observations = len(observations)
- *         cdef uint32_t [::1] om_pointers = om.pointers
- *         cdef double [:, ::1] om_densities = om.log_densities(observations)             # <<<<<<<<<<<<<<
- * 
- *         # current viterbi variables
- */
-  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_om, __pyx_n_s_log_densities); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 508, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __pyx_t_7 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
-    __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6);
-    if (likely(__pyx_t_7)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
-      __Pyx_INCREF(__pyx_t_7);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_6, function);
-    }
-  }
-  __pyx_t_1 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_7, __pyx_v_observations) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_observations);
-  __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
-  if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 508, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 508, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_om_densities = __pyx_t_8;
-  __pyx_t_8.memview = NULL;
-  __pyx_t_8.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":511
- * 
- *         # current viterbi variables
- *         cdef double [::1] current_viterbi = np.empty(num_states,             # <<<<<<<<<<<<<<
- *                                                      dtype=np.float)
- * 
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 511, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 511, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyInt_From_unsigned_int(__pyx_v_num_states); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 511, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 511, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_7);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":512
- *         # current viterbi variables
- *         cdef double [::1] current_viterbi = np.empty(num_states,
- *                                                      dtype=np.float)             # <<<<<<<<<<<<<<
- * 
- *         # previous viterbi variables, init with the initial state distribution
- */
-  __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 512, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 512, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_9);
-  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_float); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 512, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-  if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_10) < 0) __PYX_ERR(0, 512, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-
-  /* "madmom/ml/hmm.pyx":511
- * 
- *         # current viterbi variables
- *         cdef double [::1] current_viterbi = np.empty(num_states,             # <<<<<<<<<<<<<<
- *                                                      dtype=np.float)
- * 
- */
-  __pyx_t_10 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 511, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_t_10, PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(0, 511, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-  __pyx_v_current_viterbi = __pyx_t_3;
-  __pyx_t_3.memview = NULL;
-  __pyx_t_3.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":515
- * 
- *         # previous viterbi variables, init with the initial state distribution
- *         cdef double [::1] previous_viterbi = np.log(self.initial_distribution)             # <<<<<<<<<<<<<<
- * 
- *         # back-tracking pointers
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 515, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_log); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 515, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_7);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_initial_distribution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 515, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_6 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) {
-    __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_7);
-    if (likely(__pyx_t_6)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
-      __Pyx_INCREF(__pyx_t_6);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_7, function);
-    }
-  }
-  __pyx_t_10 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_6, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 515, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-  __pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_t_10, PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(0, 515, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-  __pyx_v_previous_viterbi = __pyx_t_3;
-  __pyx_t_3.memview = NULL;
-  __pyx_t_3.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":518
- * 
- *         # back-tracking pointers
- *         cdef uint32_t [:, ::1] bt_pointers = np.empty((num_observations,             # <<<<<<<<<<<<<<
- *                                                        num_states),
- *                                                       dtype=np.uint32)
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_n_s_np); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 518, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_empty); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 518, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_7);
-  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-  __pyx_t_10 = __Pyx_PyInt_From_unsigned_int(__pyx_v_num_observations); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 518, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-
-  /* "madmom/ml/hmm.pyx":519
- *         # back-tracking pointers
- *         cdef uint32_t [:, ::1] bt_pointers = np.empty((num_observations,
- *                                                        num_states),             # <<<<<<<<<<<<<<
- *                                                       dtype=np.uint32)
- *         # define counters etc.
- */
-  __pyx_t_1 = __Pyx_PyInt_From_unsigned_int(__pyx_v_num_states); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 519, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-
-  /* "madmom/ml/hmm.pyx":518
- * 
- *         # back-tracking pointers
- *         cdef uint32_t [:, ::1] bt_pointers = np.empty((num_observations,             # <<<<<<<<<<<<<<
- *                                                        num_states),
- *                                                       dtype=np.uint32)
- */
-  __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 518, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_GIVEREF(__pyx_t_10);
-  PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_10);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_1);
-  __pyx_t_10 = 0;
-  __pyx_t_1 = 0;
-  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 518, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_6);
-  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6);
-  __pyx_t_6 = 0;
-
-  /* "madmom/ml/hmm.pyx":520
- *         cdef uint32_t [:, ::1] bt_pointers = np.empty((num_observations,
- *                                                        num_states),
- *                                                       dtype=np.uint32)             # <<<<<<<<<<<<<<
- *         # define counters etc.
- *         cdef unsigned int state, frame, prev_state, pointer
- */
-  __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 520, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_n_s_np); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 520, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_uint32); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 520, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_9);
-  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-  if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_t_9) < 0) __PYX_ERR(0, 520, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-
-  /* "madmom/ml/hmm.pyx":518
- * 
- *         # back-tracking pointers
- *         cdef uint32_t [:, ::1] bt_pointers = np.empty((num_observations,             # <<<<<<<<<<<<<<
- *                                                        num_states),
- *                                                       dtype=np.uint32)
- */
-  __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_1, __pyx_t_6); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 518, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_9);
-  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __pyx_t_11 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(__pyx_t_9, PyBUF_WRITABLE); if (unlikely(!__pyx_t_11.memview)) __PYX_ERR(0, 518, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-  __pyx_v_bt_pointers = __pyx_t_11;
-  __pyx_t_11.memview = NULL;
-  __pyx_t_11.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":526
- * 
- *         # iterate over all observations
- *         for frame in range(num_observations):             # <<<<<<<<<<<<<<
- *             # search for the best transition
- *             for state in range(num_states):
- */
-  __pyx_t_4 = __pyx_v_num_observations;
-  __pyx_t_12 = __pyx_t_4;
-  for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) {
-    __pyx_v_frame = __pyx_t_13;
-
-    /* "madmom/ml/hmm.pyx":528
- *         for frame in range(num_observations):
- *             # search for the best transition
- *             for state in range(num_states):             # <<<<<<<<<<<<<<
- *                 # reset the current viterbi variable
- *                 current_viterbi[state] = -INFINITY
- */
-    __pyx_t_14 = __pyx_v_num_states;
-    __pyx_t_15 = __pyx_t_14;
-    for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) {
-      __pyx_v_state = __pyx_t_16;
-
-      /* "madmom/ml/hmm.pyx":530
- *             for state in range(num_states):
- *                 # reset the current viterbi variable
- *                 current_viterbi[state] = -INFINITY             # <<<<<<<<<<<<<<
- *                 # get the observation model probability density value
- *                 # the om_pointers array holds pointers to the correct
- */
-      __pyx_t_17 = __pyx_v_state;
-      *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_current_viterbi.data) + __pyx_t_17)) )) = (-NPY_INFINITY);
-
-      /* "madmom/ml/hmm.pyx":536
- *                 # (i.e. column in the om_densities array)
- *                 # Note: defining density here gives a 5% speed-up!?
- *                 density = om_densities[frame, om_pointers[state]]             # <<<<<<<<<<<<<<
- *                 # iterate over all possible previous states
- *                 # the tm_pointers array holds pointers to the states which are
- */
-      __pyx_t_17 = __pyx_v_state;
-      __pyx_t_18 = __pyx_v_frame;
-      __pyx_t_19 = (*((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) __pyx_v_om_pointers.data) + __pyx_t_17)) )));
-      __pyx_v_density = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_om_densities.data + __pyx_t_18 * __pyx_v_om_densities.strides[0]) )) + __pyx_t_19)) )));
-
-      /* "madmom/ml/hmm.pyx":541
- *                 # stored in the tm_states array
- *                 for pointer in range(tm_pointers[state],
- *                                      tm_pointers[state + 1]):             # <<<<<<<<<<<<<<
- *                     # get the previous state
- *                     prev_state = tm_states[pointer]
- */
-      __pyx_t_20 = (__pyx_v_state + 1);
-      __pyx_t_21 = (*((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) __pyx_v_tm_pointers.data) + __pyx_t_20)) )));
-
-      /* "madmom/ml/hmm.pyx":540
- *                 # the tm_pointers array holds pointers to the states which are
- *                 # stored in the tm_states array
- *                 for pointer in range(tm_pointers[state],             # <<<<<<<<<<<<<<
- *                                      tm_pointers[state + 1]):
- *                     # get the previous state
- */
-      __pyx_t_17 = __pyx_v_state;
-
-      /* "madmom/ml/hmm.pyx":541
- *                 # stored in the tm_states array
- *                 for pointer in range(tm_pointers[state],
- *                                      tm_pointers[state + 1]):             # <<<<<<<<<<<<<<
- *                     # get the previous state
- *                     prev_state = tm_states[pointer]
- */
-      __pyx_t_22 = __pyx_t_21;
-      for (__pyx_t_23 = (*((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) __pyx_v_tm_pointers.data) + __pyx_t_17)) ))); __pyx_t_23 < __pyx_t_22; __pyx_t_23+=1) {
-
-        /* "madmom/ml/hmm.pyx":540
- *                 # the tm_pointers array holds pointers to the states which are
- *                 # stored in the tm_states array
- *                 for pointer in range(tm_pointers[state],             # <<<<<<<<<<<<<<
- *                                      tm_pointers[state + 1]):
- *                     # get the previous state
- */
-        __pyx_v_pointer = __pyx_t_23;
-
-        /* "madmom/ml/hmm.pyx":543
- *                                      tm_pointers[state + 1]):
- *                     # get the previous state
- *                     prev_state = tm_states[pointer]             # <<<<<<<<<<<<<<
- *                     # weight the previous state with the transition probability
- *                     # and the current observation probability density
- */
-        __pyx_t_19 = __pyx_v_pointer;
-        __pyx_v_prev_state = (*((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) __pyx_v_tm_states.data) + __pyx_t_19)) )));
-
-        /* "madmom/ml/hmm.pyx":546
- *                     # weight the previous state with the transition probability
- *                     # and the current observation probability density
- *                     transition_prob = previous_viterbi[prev_state] + \             # <<<<<<<<<<<<<<
- *                                       tm_probabilities[pointer] + density
- *                     # if this transition probability is greater than the
- */
-        __pyx_t_19 = __pyx_v_prev_state;
-
-        /* "madmom/ml/hmm.pyx":547
- *                     # and the current observation probability density
- *                     transition_prob = previous_viterbi[prev_state] + \
- *                                       tm_probabilities[pointer] + density             # <<<<<<<<<<<<<<
- *                     # if this transition probability is greater than the
- *                     # current one, overwrite it and save the previous state
- */
-        __pyx_t_18 = __pyx_v_pointer;
-
-        /* "madmom/ml/hmm.pyx":546
- *                     # weight the previous state with the transition probability
- *                     # and the current observation probability density
- *                     transition_prob = previous_viterbi[prev_state] + \             # <<<<<<<<<<<<<<
- *                                       tm_probabilities[pointer] + density
- *                     # if this transition probability is greater than the
- */
-        __pyx_v_transition_prob = (((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_previous_viterbi.data) + __pyx_t_19)) ))) + (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_tm_probabilities.data) + __pyx_t_18)) )))) + __pyx_v_density);
-
-        /* "madmom/ml/hmm.pyx":551
- *                     # current one, overwrite it and save the previous state
- *                     # in the back tracking pointers
- *                     if transition_prob > current_viterbi[state]:             # <<<<<<<<<<<<<<
- *                         # update the transition probability
- *                         current_viterbi[state] = transition_prob
- */
-        __pyx_t_18 = __pyx_v_state;
-        __pyx_t_24 = ((__pyx_v_transition_prob > (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_current_viterbi.data) + __pyx_t_18)) )))) != 0);
-        if (__pyx_t_24) {
-
-          /* "madmom/ml/hmm.pyx":553
- *                     if transition_prob > current_viterbi[state]:
- *                         # update the transition probability
- *                         current_viterbi[state] = transition_prob             # <<<<<<<<<<<<<<
- *                         # update the back tracking pointers
- *                         bt_pointers[frame, state] = prev_state
- */
-          __pyx_t_18 = __pyx_v_state;
-          *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_current_viterbi.data) + __pyx_t_18)) )) = __pyx_v_transition_prob;
-
-          /* "madmom/ml/hmm.pyx":555
- *                         current_viterbi[state] = transition_prob
- *                         # update the back tracking pointers
- *                         bt_pointers[frame, state] = prev_state             # <<<<<<<<<<<<<<
- * 
- *             # overwrite the old states with the current ones
- */
-          __pyx_t_18 = __pyx_v_frame;
-          __pyx_t_19 = __pyx_v_state;
-          *((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=1 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ (__pyx_v_bt_pointers.data + __pyx_t_18 * __pyx_v_bt_pointers.strides[0]) )) + __pyx_t_19)) )) = __pyx_v_prev_state;
-
-          /* "madmom/ml/hmm.pyx":551
- *                     # current one, overwrite it and save the previous state
- *                     # in the back tracking pointers
- *                     if transition_prob > current_viterbi[state]:             # <<<<<<<<<<<<<<
- *                         # update the transition probability
- *                         current_viterbi[state] = transition_prob
- */
-        }
-      }
-    }
-
-    /* "madmom/ml/hmm.pyx":558
- * 
- *             # overwrite the old states with the current ones
- *             previous_viterbi[:] = current_viterbi             # <<<<<<<<<<<<<<
- * 
- *         # fetch the final best state
- */
-    if (unlikely(__pyx_memoryview_copy_contents(__pyx_v_current_viterbi, __pyx_v_previous_viterbi, 1, 1, 0) < 0)) __PYX_ERR(0, 558, __pyx_L1_error)
-  }
-
-  /* "madmom/ml/hmm.pyx":561
- * 
- *         # fetch the final best state
- *         state = np.asarray(current_viterbi).argmax()             # <<<<<<<<<<<<<<
- *         # set the path's probability to that of the best state
- *         log_probability = current_viterbi[state]
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 561, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_asarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 561, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_7);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_current_viterbi, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 561, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_10 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) {
-    __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_7);
-    if (likely(__pyx_t_10)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
-      __Pyx_INCREF(__pyx_t_10);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_7, function);
-    }
-  }
-  __pyx_t_6 = (__pyx_t_10) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_10, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 561, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_argmax); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 561, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_7);
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __pyx_t_6 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
-    __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_7);
-    if (likely(__pyx_t_6)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
-      __Pyx_INCREF(__pyx_t_6);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_7, function);
-    }
-  }
-  __pyx_t_9 = (__pyx_t_6) ? __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6) : __Pyx_PyObject_CallNoArg(__pyx_t_7);
-  __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
-  if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 561, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_9);
-  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-  __pyx_t_4 = __Pyx_PyInt_As_unsigned_int(__pyx_t_9); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 561, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-  __pyx_v_state = __pyx_t_4;
-
-  /* "madmom/ml/hmm.pyx":563
- *         state = np.asarray(current_viterbi).argmax()
- *         # set the path's probability to that of the best state
- *         log_probability = current_viterbi[state]             # <<<<<<<<<<<<<<
- * 
- *         # raise warning if the sequence has -inf probability
- */
-  __pyx_t_17 = __pyx_v_state;
-  __pyx_t_9 = PyFloat_FromDouble((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_current_viterbi.data) + __pyx_t_17)) )))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 563, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_9);
-  __pyx_v_log_probability = __pyx_t_9;
-  __pyx_t_9 = 0;
-
-  /* "madmom/ml/hmm.pyx":566
- * 
- *         # raise warning if the sequence has -inf probability
- *         if np.isinf(log_probability):             # <<<<<<<<<<<<<<
- *             warnings.warn('-inf log probability during Viterbi decoding '
- *                           'cannot find a valid path', RuntimeWarning)
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 566, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_7);
-  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_isinf); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 566, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-  __pyx_t_7 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
-    __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6);
-    if (likely(__pyx_t_7)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
-      __Pyx_INCREF(__pyx_t_7);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_6, function);
-    }
-  }
-  __pyx_t_9 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_7, __pyx_v_log_probability) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_log_probability);
-  __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
-  if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 566, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_9);
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __pyx_t_24 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_24 < 0)) __PYX_ERR(0, 566, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-  if (__pyx_t_24) {
-
-    /* "madmom/ml/hmm.pyx":567
- *         # raise warning if the sequence has -inf probability
- *         if np.isinf(log_probability):
- *             warnings.warn('-inf log probability during Viterbi decoding '             # <<<<<<<<<<<<<<
- *                           'cannot find a valid path', RuntimeWarning)
- *             # return empty path sequence
- */
-    __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_warnings); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 567, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_9);
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_warn); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 567, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_6);
-    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-
-    /* "madmom/ml/hmm.pyx":568
- *         if np.isinf(log_probability):
- *             warnings.warn('-inf log probability during Viterbi decoding '
- *                           'cannot find a valid path', RuntimeWarning)             # <<<<<<<<<<<<<<
- *             # return empty path sequence
- *             return np.empty(0, dtype=np.uint32), log_probability
- */
-    __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 567, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_9);
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-
-    /* "madmom/ml/hmm.pyx":570
- *                           'cannot find a valid path', RuntimeWarning)
- *             # return empty path sequence
- *             return np.empty(0, dtype=np.uint32), log_probability             # <<<<<<<<<<<<<<
- * 
- *         # back tracked path, a.k.a. path sequence
- */
-    __Pyx_XDECREF(__pyx_r);
-    __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 570, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_9);
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_empty); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 570, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_6);
-    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-    __pyx_t_9 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 570, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_9);
-    __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 570, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_7);
-    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_uint32); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 570, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-    if (PyDict_SetItem(__pyx_t_9, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 570, __pyx_L1_error)
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_tuple__7, __pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 570, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-    __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 570, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_9);
-    __Pyx_GIVEREF(__pyx_t_1);
-    PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_1);
-    __Pyx_INCREF(__pyx_v_log_probability);
-    __Pyx_GIVEREF(__pyx_v_log_probability);
-    PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_v_log_probability);
-    __pyx_t_1 = 0;
-    __pyx_r = __pyx_t_9;
-    __pyx_t_9 = 0;
-    goto __pyx_L0;
-
-    /* "madmom/ml/hmm.pyx":566
- * 
- *         # raise warning if the sequence has -inf probability
- *         if np.isinf(log_probability):             # <<<<<<<<<<<<<<
- *             warnings.warn('-inf log probability during Viterbi decoding '
- *                           'cannot find a valid path', RuntimeWarning)
- */
-  }
-
-  /* "madmom/ml/hmm.pyx":573
- * 
- *         # back tracked path, a.k.a. path sequence
- *         path = np.empty(num_observations, dtype=np.uint32)             # <<<<<<<<<<<<<<
- *         # track the path backwards, start with the last frame and do not
- *         # include the pointer for frame 0, since it includes the transitions
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 573, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_9);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_empty); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 573, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-  __pyx_t_9 = __Pyx_PyInt_From_unsigned_int(__pyx_v_num_observations); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 573, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_9);
-  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 573, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_GIVEREF(__pyx_t_9);
-  PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_9);
-  __pyx_t_9 = 0;
-  __pyx_t_9 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 573, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_9);
-  __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 573, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_7);
-  __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_uint32); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 573, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-  if (PyDict_SetItem(__pyx_t_9, __pyx_n_s_dtype, __pyx_t_10) < 0) __PYX_ERR(0, 573, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-  __pyx_t_10 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 573, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-  __pyx_v_path = __pyx_t_10;
-  __pyx_t_10 = 0;
-
-  /* "madmom/ml/hmm.pyx":577
- *         # include the pointer for frame 0, since it includes the transitions
- *         # to the prior distribution states
- *         for frame in range(num_observations -1, -1, -1):             # <<<<<<<<<<<<<<
- *             # save the state in the path
- *             path[frame] = state
- */
-  for (__pyx_t_4 = (__pyx_v_num_observations - 1) + 1; __pyx_t_4 > -1 + 1; ) { __pyx_t_4-=1;
-    __pyx_v_frame = __pyx_t_4;
-
-    /* "madmom/ml/hmm.pyx":579
- *         for frame in range(num_observations -1, -1, -1):
- *             # save the state in the path
- *             path[frame] = state             # <<<<<<<<<<<<<<
- *             # fetch the next previous one
- *             state = bt_pointers[frame, state]
- */
-    __pyx_t_10 = __Pyx_PyInt_From_unsigned_int(__pyx_v_state); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 579, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_10);
-    if (unlikely(__Pyx_SetItemInt(__pyx_v_path, __pyx_v_frame, __pyx_t_10, unsigned int, 0, __Pyx_PyInt_From_unsigned_int, 0, 0, 0) < 0)) __PYX_ERR(0, 579, __pyx_L1_error)
-    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-
-    /* "madmom/ml/hmm.pyx":581
- *             path[frame] = state
- *             # fetch the next previous one
- *             state = bt_pointers[frame, state]             # <<<<<<<<<<<<<<
- * 
- *         # return the tracked path and its probability
- */
-    __pyx_t_17 = __pyx_v_frame;
-    __pyx_t_19 = __pyx_v_state;
-    __pyx_v_state = (*((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=1 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ (__pyx_v_bt_pointers.data + __pyx_t_17 * __pyx_v_bt_pointers.strides[0]) )) + __pyx_t_19)) )));
-  }
-
-  /* "madmom/ml/hmm.pyx":584
- * 
- *         # return the tracked path and its probability
- *         return path, log_probability             # <<<<<<<<<<<<<<
- * 
- *     @cython.cdivision(True)
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 584, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __Pyx_INCREF(__pyx_v_path);
-  __Pyx_GIVEREF(__pyx_v_path);
-  PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_v_path);
-  __Pyx_INCREF(__pyx_v_log_probability);
-  __Pyx_GIVEREF(__pyx_v_log_probability);
-  PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_v_log_probability);
-  __pyx_r = __pyx_t_10;
-  __pyx_t_10 = 0;
-  goto __pyx_L0;
-
-  /* "madmom/ml/hmm.pyx":480
- *     @cython.boundscheck(False)
- *     @cython.wraparound(False)
- *     def viterbi(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Determine the best path with the Viterbi algorithm.
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __PYX_XDEC_MEMVIEW(&__pyx_t_2, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_t_3, 1);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_7);
-  __PYX_XDEC_MEMVIEW(&__pyx_t_8, 1);
-  __Pyx_XDECREF(__pyx_t_9);
-  __Pyx_XDECREF(__pyx_t_10);
-  __PYX_XDEC_MEMVIEW(&__pyx_t_11, 1);
-  __Pyx_AddTraceback("madmom.ml.hmm.HiddenMarkovModel.viterbi", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_tm);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_tm_states, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_tm_pointers, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_tm_probabilities, 1);
-  __Pyx_XDECREF(__pyx_v_om);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_om_pointers, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_om_densities, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_current_viterbi, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_previous_viterbi, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_bt_pointers, 1);
-  __Pyx_XDECREF(__pyx_v_log_probability);
-  __Pyx_XDECREF(__pyx_v_path);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "madmom/ml/hmm.pyx":590
- *     @cython.wraparound(False)
- *     @cython.initializedcheck(False)
- *     def forward(self, observations, reset=True):             # <<<<<<<<<<<<<<
- *         """
- *         Compute the forward variables at each time step. Instead of computing
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_11forward(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel_10forward[] = "HiddenMarkovModel.forward(self, observations, reset=True)\n\n        Compute the forward variables at each time step. Instead of computing\n        in the log domain, we normalise at each step, which is faster for the\n        forward algorithm.\n\n        Parameters\n        ----------\n        observations : numpy array, shape (num_frames, num_densities)\n            Observations to compute the forward variables for.\n        reset : bool, optional\n            Reset the HMM to its initial state before computing the forward\n            variables.\n\n        Returns\n        -------\n        numpy array, shape (num_observations, num_states)\n            Forward variables.\n\n        ";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_11forward = {"forward", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_11forward, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel_10forward};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_11forward(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_self = 0;
-  PyObject *__pyx_v_observations = 0;
-  PyObject *__pyx_v_reset = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("forward (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_observations,&__pyx_n_s_reset,0};
-    PyObject* values[3] = {0,0,0};
-    values[2] = ((PyObject *)((PyObject *)Py_True));
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        CYTHON_FALLTHROUGH;
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_observations)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("forward", 0, 2, 3, 1); __PYX_ERR(0, 590, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  2:
-        if (kw_args > 0) {
-          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_reset);
-          if (value) { values[2] = value; kw_args--; }
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "forward") < 0)) __PYX_ERR(0, 590, __pyx_L3_error)
-      }
-    } else {
-      switch (PyTuple_GET_SIZE(__pyx_args)) {
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        CYTHON_FALLTHROUGH;
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-    }
-    __pyx_v_self = values[0];
-    __pyx_v_observations = values[1];
-    __pyx_v_reset = values[2];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("forward", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 590, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.HiddenMarkovModel.forward", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_10forward(__pyx_self, __pyx_v_self, __pyx_v_observations, __pyx_v_reset);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_10forward(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observations, PyObject *__pyx_v_reset) {
-  PyObject *__pyx_v_tm = NULL;
-  __Pyx_memviewslice __pyx_v_tm_states = { 0, 0, { 0 }, { 0 }, { 0 } };
-  __Pyx_memviewslice __pyx_v_tm_pointers = { 0, 0, { 0 }, { 0 }, { 0 } };
-  __Pyx_memviewslice __pyx_v_tm_probabilities = { 0, 0, { 0 }, { 0 }, { 0 } };
-  unsigned int __pyx_v_num_states;
-  PyObject *__pyx_v_om = NULL;
-  __Pyx_memviewslice __pyx_v_om_pointers = { 0, 0, { 0 }, { 0 }, { 0 } };
-  __Pyx_memviewslice __pyx_v_om_densities = { 0, 0, { 0 }, { 0 }, { 0 } };
-  unsigned int __pyx_v_num_observations;
-  __Pyx_memviewslice __pyx_v_fwd_prev = { 0, 0, { 0 }, { 0 }, { 0 } };
-  __Pyx_memviewslice __pyx_v_fwd = { 0, 0, { 0 }, { 0 }, { 0 } };
-  unsigned int __pyx_v_prev_pointer;
-  unsigned int __pyx_v_frame;
-  unsigned int __pyx_v_state;
-  double __pyx_v_prob_sum;
-  double __pyx_v_norm_factor;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  __Pyx_memviewslice __pyx_t_2 = { 0, 0, { 0 }, { 0 }, { 0 } };
-  __Pyx_memviewslice __pyx_t_3 = { 0, 0, { 0 }, { 0 }, { 0 } };
-  unsigned int __pyx_t_4;
-  PyObject *__pyx_t_5 = NULL;
-  PyObject *__pyx_t_6 = NULL;
-  __Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } };
-  size_t __pyx_t_8;
-  int __pyx_t_9;
-  PyObject *__pyx_t_10 = NULL;
-  PyObject *__pyx_t_11 = NULL;
-  unsigned int __pyx_t_12;
-  unsigned int __pyx_t_13;
-  unsigned int __pyx_t_14;
-  unsigned int __pyx_t_15;
-  unsigned int __pyx_t_16;
-  Py_ssize_t __pyx_t_17;
-  __pyx_t_6madmom_2ml_3hmm_uint32_t __pyx_t_18;
-  __pyx_t_6madmom_2ml_3hmm_uint32_t __pyx_t_19;
-  unsigned int __pyx_t_20;
-  size_t __pyx_t_21;
-  size_t __pyx_t_22;
-  size_t __pyx_t_23;
-  size_t __pyx_t_24;
-  size_t __pyx_t_25;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("forward", 0);
-
-  /* "madmom/ml/hmm.pyx":611
- *         """
- *         # transition model stuff
- *         tm = self.transition_model             # <<<<<<<<<<<<<<
- *         cdef uint32_t [::1] tm_states = tm.states
- *         cdef uint32_t [::1] tm_pointers = tm.pointers
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_transition_model); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 611, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_v_tm = __pyx_t_1;
-  __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":612
- *         # transition model stuff
- *         tm = self.transition_model
- *         cdef uint32_t [::1] tm_states = tm.states             # <<<<<<<<<<<<<<
- *         cdef uint32_t [::1] tm_pointers = tm.pointers
- *         cdef double [::1] tm_probabilities = tm.probabilities
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_tm, __pyx_n_s_states); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 612, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_2.memview)) __PYX_ERR(0, 612, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_tm_states = __pyx_t_2;
-  __pyx_t_2.memview = NULL;
-  __pyx_t_2.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":613
- *         tm = self.transition_model
- *         cdef uint32_t [::1] tm_states = tm.states
- *         cdef uint32_t [::1] tm_pointers = tm.pointers             # <<<<<<<<<<<<<<
- *         cdef double [::1] tm_probabilities = tm.probabilities
- *         cdef unsigned int num_states = tm.num_states
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_tm, __pyx_n_s_pointers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 613, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_2.memview)) __PYX_ERR(0, 613, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_tm_pointers = __pyx_t_2;
-  __pyx_t_2.memview = NULL;
-  __pyx_t_2.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":614
- *         cdef uint32_t [::1] tm_states = tm.states
- *         cdef uint32_t [::1] tm_pointers = tm.pointers
- *         cdef double [::1] tm_probabilities = tm.probabilities             # <<<<<<<<<<<<<<
- *         cdef unsigned int num_states = tm.num_states
- * 
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_tm, __pyx_n_s_probabilities); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 614, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(0, 614, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_tm_probabilities = __pyx_t_3;
-  __pyx_t_3.memview = NULL;
-  __pyx_t_3.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":615
- *         cdef uint32_t [::1] tm_pointers = tm.pointers
- *         cdef double [::1] tm_probabilities = tm.probabilities
- *         cdef unsigned int num_states = tm.num_states             # <<<<<<<<<<<<<<
- * 
- *         # observation model stuff
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_tm, __pyx_n_s_num_states); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 615, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_4 = __Pyx_PyInt_As_unsigned_int(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 615, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_num_states = __pyx_t_4;
-
-  /* "madmom/ml/hmm.pyx":618
- * 
- *         # observation model stuff
- *         om = self.observation_model             # <<<<<<<<<<<<<<
- *         cdef uint32_t [::1] om_pointers = om.pointers
- *         cdef double [:, ::1] om_densities = om.densities(observations)
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_observation_model); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 618, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_v_om = __pyx_t_1;
-  __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":619
- *         # observation model stuff
- *         om = self.observation_model
- *         cdef uint32_t [::1] om_pointers = om.pointers             # <<<<<<<<<<<<<<
- *         cdef double [:, ::1] om_densities = om.densities(observations)
- *         cdef unsigned int num_observations = len(om_densities)
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_om, __pyx_n_s_pointers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 619, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_2.memview)) __PYX_ERR(0, 619, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_om_pointers = __pyx_t_2;
-  __pyx_t_2.memview = NULL;
-  __pyx_t_2.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":620
- *         om = self.observation_model
- *         cdef uint32_t [::1] om_pointers = om.pointers
- *         cdef double [:, ::1] om_densities = om.densities(observations)             # <<<<<<<<<<<<<<
- *         cdef unsigned int num_observations = len(om_densities)
- * 
- */
-  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_om, __pyx_n_s_densities); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 620, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __pyx_t_6 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
-    __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
-    if (likely(__pyx_t_6)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
-      __Pyx_INCREF(__pyx_t_6);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_5, function);
-    }
-  }
-  __pyx_t_1 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_v_observations) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_observations);
-  __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
-  if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 620, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 620, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_om_densities = __pyx_t_7;
-  __pyx_t_7.memview = NULL;
-  __pyx_t_7.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":621
- *         cdef uint32_t [::1] om_pointers = om.pointers
- *         cdef double [:, ::1] om_densities = om.densities(observations)
- *         cdef unsigned int num_observations = len(om_densities)             # <<<<<<<<<<<<<<
- * 
- *         # reset HMM
- */
-  __pyx_t_8 = __Pyx_MemoryView_Len(__pyx_v_om_densities); 
-  __pyx_v_num_observations = __pyx_t_8;
-
-  /* "madmom/ml/hmm.pyx":624
- * 
- *         # reset HMM
- *         if reset:             # <<<<<<<<<<<<<<
- *             self.reset()
- * 
- */
-  __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_v_reset); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 624, __pyx_L1_error)
-  if (__pyx_t_9) {
-
-    /* "madmom/ml/hmm.pyx":625
- *         # reset HMM
- *         if reset:
- *             self.reset()             # <<<<<<<<<<<<<<
- * 
- *         # forward variables
- */
-    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_reset); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 625, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __pyx_t_6 = NULL;
-    if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
-      __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
-      if (likely(__pyx_t_6)) {
-        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
-        __Pyx_INCREF(__pyx_t_6);
-        __Pyx_INCREF(function);
-        __Pyx_DECREF_SET(__pyx_t_5, function);
-      }
-    }
-    __pyx_t_1 = (__pyx_t_6) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_6) : __Pyx_PyObject_CallNoArg(__pyx_t_5);
-    __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
-    if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 625, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-    /* "madmom/ml/hmm.pyx":624
- * 
- *         # reset HMM
- *         if reset:             # <<<<<<<<<<<<<<
- *             self.reset()
- * 
- */
-  }
-
-  /* "madmom/ml/hmm.pyx":628
- * 
- *         # forward variables
- *         cdef double[::1] fwd_prev = self._prev             # <<<<<<<<<<<<<<
- *         cdef double[:, ::1] fwd = np.zeros((num_observations, num_states),
- *                                            dtype=np.float)
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self, __pyx_n_s_prev); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 628, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(0, 628, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_v_fwd_prev = __pyx_t_3;
-  __pyx_t_3.memview = NULL;
-  __pyx_t_3.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":629
- *         # forward variables
- *         cdef double[::1] fwd_prev = self._prev
- *         cdef double[:, ::1] fwd = np.zeros((num_observations, num_states),             # <<<<<<<<<<<<<<
- *                                            dtype=np.float)
- * 
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 629, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 629, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyInt_From_unsigned_int(__pyx_v_num_observations); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 629, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_6 = __Pyx_PyInt_From_unsigned_int(__pyx_v_num_states); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 629, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 629, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_6);
-  PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6);
-  __pyx_t_1 = 0;
-  __pyx_t_6 = 0;
-  __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 629, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_GIVEREF(__pyx_t_10);
-  PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_10);
-  __pyx_t_10 = 0;
-
-  /* "madmom/ml/hmm.pyx":630
- *         cdef double[::1] fwd_prev = self._prev
- *         cdef double[:, ::1] fwd = np.zeros((num_observations, num_states),
- *                                            dtype=np.float)             # <<<<<<<<<<<<<<
- * 
- *         # define counters etc.
- */
-  __pyx_t_10 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 630, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 630, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 630, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_11);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  if (PyDict_SetItem(__pyx_t_10, __pyx_n_s_dtype, __pyx_t_11) < 0) __PYX_ERR(0, 630, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
-
-  /* "madmom/ml/hmm.pyx":629
- *         # forward variables
- *         cdef double[::1] fwd_prev = self._prev
- *         cdef double[:, ::1] fwd = np.zeros((num_observations, num_states),             # <<<<<<<<<<<<<<
- *                                            dtype=np.float)
- * 
- */
-  __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_6, __pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 629, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_11);
-  __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-  __pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_11, PyBUF_WRITABLE); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 629, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
-  __pyx_v_fwd = __pyx_t_7;
-  __pyx_t_7.memview = NULL;
-  __pyx_t_7.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":637
- * 
- *         # iterate over all observations
- *         for frame in range(num_observations):             # <<<<<<<<<<<<<<
- *             # keep track of the normalisation sum
- *             prob_sum = 0
- */
-  __pyx_t_4 = __pyx_v_num_observations;
-  __pyx_t_12 = __pyx_t_4;
-  for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) {
-    __pyx_v_frame = __pyx_t_13;
-
-    /* "madmom/ml/hmm.pyx":639
- *         for frame in range(num_observations):
- *             # keep track of the normalisation sum
- *             prob_sum = 0             # <<<<<<<<<<<<<<
- *             # iterate over all states
- *             for state in range(num_states):
- */
-    __pyx_v_prob_sum = 0.0;
-
-    /* "madmom/ml/hmm.pyx":641
- *             prob_sum = 0
- *             # iterate over all states
- *             for state in range(num_states):             # <<<<<<<<<<<<<<
- *                 # sum over all possible predecessors
- *                 for prev_pointer in range(tm_pointers[state],
- */
-    __pyx_t_14 = __pyx_v_num_states;
-    __pyx_t_15 = __pyx_t_14;
-    for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) {
-      __pyx_v_state = __pyx_t_16;
-
-      /* "madmom/ml/hmm.pyx":644
- *                 # sum over all possible predecessors
- *                 for prev_pointer in range(tm_pointers[state],
- *                                           tm_pointers[state + 1]):             # <<<<<<<<<<<<<<
- *                     fwd[frame, state] += (fwd_prev[tm_states[prev_pointer]] *
- *                                           tm_probabilities[prev_pointer])
- */
-      __pyx_t_17 = (__pyx_v_state + 1);
-      __pyx_t_18 = (*((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) __pyx_v_tm_pointers.data) + __pyx_t_17)) )));
-
-      /* "madmom/ml/hmm.pyx":643
- *             for state in range(num_states):
- *                 # sum over all possible predecessors
- *                 for prev_pointer in range(tm_pointers[state],             # <<<<<<<<<<<<<<
- *                                           tm_pointers[state + 1]):
- *                     fwd[frame, state] += (fwd_prev[tm_states[prev_pointer]] *
- */
-      __pyx_t_8 = __pyx_v_state;
-
-      /* "madmom/ml/hmm.pyx":644
- *                 # sum over all possible predecessors
- *                 for prev_pointer in range(tm_pointers[state],
- *                                           tm_pointers[state + 1]):             # <<<<<<<<<<<<<<
- *                     fwd[frame, state] += (fwd_prev[tm_states[prev_pointer]] *
- *                                           tm_probabilities[prev_pointer])
- */
-      __pyx_t_19 = __pyx_t_18;
-      for (__pyx_t_20 = (*((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) __pyx_v_tm_pointers.data) + __pyx_t_8)) ))); __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) {
-
-        /* "madmom/ml/hmm.pyx":643
- *             for state in range(num_states):
- *                 # sum over all possible predecessors
- *                 for prev_pointer in range(tm_pointers[state],             # <<<<<<<<<<<<<<
- *                                           tm_pointers[state + 1]):
- *                     fwd[frame, state] += (fwd_prev[tm_states[prev_pointer]] *
- */
-        __pyx_v_prev_pointer = __pyx_t_20;
-
-        /* "madmom/ml/hmm.pyx":645
- *                 for prev_pointer in range(tm_pointers[state],
- *                                           tm_pointers[state + 1]):
- *                     fwd[frame, state] += (fwd_prev[tm_states[prev_pointer]] *             # <<<<<<<<<<<<<<
- *                                           tm_probabilities[prev_pointer])
- *                 # multiply with the observation probability
- */
-        __pyx_t_21 = __pyx_v_prev_pointer;
-        __pyx_t_22 = (*((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) __pyx_v_tm_states.data) + __pyx_t_21)) )));
-
-        /* "madmom/ml/hmm.pyx":646
- *                                           tm_pointers[state + 1]):
- *                     fwd[frame, state] += (fwd_prev[tm_states[prev_pointer]] *
- *                                           tm_probabilities[prev_pointer])             # <<<<<<<<<<<<<<
- *                 # multiply with the observation probability
- *                 fwd[frame, state] *= om_densities[frame, om_pointers[state]]
- */
-        __pyx_t_23 = __pyx_v_prev_pointer;
-
-        /* "madmom/ml/hmm.pyx":645
- *                 for prev_pointer in range(tm_pointers[state],
- *                                           tm_pointers[state + 1]):
- *                     fwd[frame, state] += (fwd_prev[tm_states[prev_pointer]] *             # <<<<<<<<<<<<<<
- *                                           tm_probabilities[prev_pointer])
- *                 # multiply with the observation probability
- */
-        __pyx_t_24 = __pyx_v_frame;
-        __pyx_t_25 = __pyx_v_state;
-        *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_fwd.data + __pyx_t_24 * __pyx_v_fwd.strides[0]) )) + __pyx_t_25)) )) += ((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_fwd_prev.data) + __pyx_t_22)) ))) * (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_tm_probabilities.data) + __pyx_t_23)) ))));
-      }
-
-      /* "madmom/ml/hmm.pyx":648
- *                                           tm_probabilities[prev_pointer])
- *                 # multiply with the observation probability
- *                 fwd[frame, state] *= om_densities[frame, om_pointers[state]]             # <<<<<<<<<<<<<<
- *                 prob_sum += fwd[frame, state]
- *             # normalise
- */
-      __pyx_t_8 = __pyx_v_state;
-      __pyx_t_23 = __pyx_v_frame;
-      __pyx_t_21 = (*((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) __pyx_v_om_pointers.data) + __pyx_t_8)) )));
-      __pyx_t_22 = __pyx_v_frame;
-      __pyx_t_25 = __pyx_v_state;
-      *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_fwd.data + __pyx_t_22 * __pyx_v_fwd.strides[0]) )) + __pyx_t_25)) )) *= (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_om_densities.data + __pyx_t_23 * __pyx_v_om_densities.strides[0]) )) + __pyx_t_21)) )));
-
-      /* "madmom/ml/hmm.pyx":649
- *                 # multiply with the observation probability
- *                 fwd[frame, state] *= om_densities[frame, om_pointers[state]]
- *                 prob_sum += fwd[frame, state]             # <<<<<<<<<<<<<<
- *             # normalise
- *             norm_factor = 1. / prob_sum
- */
-      __pyx_t_8 = __pyx_v_frame;
-      __pyx_t_21 = __pyx_v_state;
-      __pyx_v_prob_sum = (__pyx_v_prob_sum + (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_fwd.data + __pyx_t_8 * __pyx_v_fwd.strides[0]) )) + __pyx_t_21)) ))));
-    }
-
-    /* "madmom/ml/hmm.pyx":651
- *                 prob_sum += fwd[frame, state]
- *             # normalise
- *             norm_factor = 1. / prob_sum             # <<<<<<<<<<<<<<
- *             for state in range(num_states):
- *                 fwd[frame, state] *= norm_factor
- */
-    __pyx_v_norm_factor = (1. / __pyx_v_prob_sum);
-
-    /* "madmom/ml/hmm.pyx":652
- *             # normalise
- *             norm_factor = 1. / prob_sum
- *             for state in range(num_states):             # <<<<<<<<<<<<<<
- *                 fwd[frame, state] *= norm_factor
- *                 # also save it as the previous variables for the next frame
- */
-    __pyx_t_14 = __pyx_v_num_states;
-    __pyx_t_15 = __pyx_t_14;
-    for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) {
-      __pyx_v_state = __pyx_t_16;
-
-      /* "madmom/ml/hmm.pyx":653
- *             norm_factor = 1. / prob_sum
- *             for state in range(num_states):
- *                 fwd[frame, state] *= norm_factor             # <<<<<<<<<<<<<<
- *                 # also save it as the previous variables for the next frame
- *                 fwd_prev[state] = fwd[frame, state]
- */
-      __pyx_t_21 = __pyx_v_frame;
-      __pyx_t_8 = __pyx_v_state;
-      *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_fwd.data + __pyx_t_21 * __pyx_v_fwd.strides[0]) )) + __pyx_t_8)) )) *= __pyx_v_norm_factor;
-
-      /* "madmom/ml/hmm.pyx":655
- *                 fwd[frame, state] *= norm_factor
- *                 # also save it as the previous variables for the next frame
- *                 fwd_prev[state] = fwd[frame, state]             # <<<<<<<<<<<<<<
- * 
- *         # return the forward variables
- */
-      __pyx_t_8 = __pyx_v_frame;
-      __pyx_t_21 = __pyx_v_state;
-      __pyx_t_23 = __pyx_v_state;
-      *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_fwd_prev.data) + __pyx_t_23)) )) = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_fwd.data + __pyx_t_8 * __pyx_v_fwd.strides[0]) )) + __pyx_t_21)) )));
-    }
-  }
-
-  /* "madmom/ml/hmm.pyx":658
- * 
- *         # return the forward variables
- *         return np.asarray(fwd)             # <<<<<<<<<<<<<<
- * 
- *     @cython.cdivision(True)
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_n_s_np); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 658, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_asarray); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 658, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-  __pyx_t_10 = __pyx_memoryview_fromslice(__pyx_v_fwd, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 658, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_10);
-  __pyx_t_5 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
-    __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
-    if (likely(__pyx_t_5)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
-      __Pyx_INCREF(__pyx_t_5);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_6, function);
-    }
-  }
-  __pyx_t_11 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_5, __pyx_t_10) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_10);
-  __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
-  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-  if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 658, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_11);
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __pyx_r = __pyx_t_11;
-  __pyx_t_11 = 0;
-  goto __pyx_L0;
-
-  /* "madmom/ml/hmm.pyx":590
- *     @cython.wraparound(False)
- *     @cython.initializedcheck(False)
- *     def forward(self, observations, reset=True):             # <<<<<<<<<<<<<<
- *         """
- *         Compute the forward variables at each time step. Instead of computing
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __PYX_XDEC_MEMVIEW(&__pyx_t_2, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_t_3, 1);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_6);
-  __PYX_XDEC_MEMVIEW(&__pyx_t_7, 1);
-  __Pyx_XDECREF(__pyx_t_10);
-  __Pyx_XDECREF(__pyx_t_11);
-  __Pyx_AddTraceback("madmom.ml.hmm.HiddenMarkovModel.forward", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_tm);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_tm_states, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_tm_pointers, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_tm_probabilities, 1);
-  __Pyx_XDECREF(__pyx_v_om);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_om_pointers, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_om_densities, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_fwd_prev, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_v_fwd, 1);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-static PyObject *__pyx_gb_6madmom_2ml_3hmm_17HiddenMarkovModel_14generator(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */
-
-/* "madmom/ml/hmm.pyx":664
- *     @cython.wraparound(False)
- *     @cython.initializedcheck(False)
- *     def forward_generator(self, observations, block_size=None):             # <<<<<<<<<<<<<<
- *         """
- *         Compute the forward variables at each time step. Instead of computing
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_13forward_generator(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel_12forward_generator[] = "HiddenMarkovModel.forward_generator(self, observations, block_size=None)\n\n        Compute the forward variables at each time step. Instead of computing\n        in the log domain, we normalise at each step, which is faster for\n        the forward algorithm. This function is a generator that yields the\n        forward variables for each time step individually to save memory.\n        The observation densities are computed block-wise to save Python calls\n        in the inner loops.\n\n        Parameters\n        ----------\n        observations : numpy array\n            Observations to compute the forward variables for.\n        block_size : int, optional\n            Block size for the block-wise computation of observation densities.\n            If 'None', all observation densities will be computed at once.\n\n        Yields\n        ------\n        numpy array, shape (num_states,)\n            Forward variables.\n\n        ";
-static PyMethodDef __pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_13forward_generator = {"forward_generator", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_13forward_generator, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6madmom_2ml_3hmm_17HiddenMarkovModel_12forward_generator};
-static PyObject *__pyx_pw_6madmom_2ml_3hmm_17HiddenMarkovModel_13forward_generator(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_self = 0;
-  PyObject *__pyx_v_observations = 0;
-  PyObject *__pyx_v_block_size = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("forward_generator (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_observations,&__pyx_n_s_block_size,0};
-    PyObject* values[3] = {0,0,0};
-    values[2] = ((PyObject *)((PyObject *)Py_None));
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        CYTHON_FALLTHROUGH;
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_observations)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("forward_generator", 0, 2, 3, 1); __PYX_ERR(0, 664, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  2:
-        if (kw_args > 0) {
-          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_block_size);
-          if (value) { values[2] = value; kw_args--; }
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "forward_generator") < 0)) __PYX_ERR(0, 664, __pyx_L3_error)
-      }
-    } else {
-      switch (PyTuple_GET_SIZE(__pyx_args)) {
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        CYTHON_FALLTHROUGH;
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-    }
-    __pyx_v_self = values[0];
-    __pyx_v_observations = values[1];
-    __pyx_v_block_size = values[2];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("forward_generator", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 664, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.HiddenMarkovModel.forward_generator", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_12forward_generator(__pyx_self, __pyx_v_self, __pyx_v_observations, __pyx_v_block_size);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_6madmom_2ml_3hmm_17HiddenMarkovModel_12forward_generator(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_observations, PyObject *__pyx_v_block_size) {
-  struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator *__pyx_cur_scope;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("forward_generator", 0);
-  __pyx_cur_scope = (struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator *)__pyx_tp_new_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator(__pyx_ptype_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator, __pyx_empty_tuple, NULL);
-  if (unlikely(!__pyx_cur_scope)) {
-    __pyx_cur_scope = ((struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator *)Py_None);
-    __Pyx_INCREF(Py_None);
-    __PYX_ERR(0, 664, __pyx_L1_error)
-  } else {
-    __Pyx_GOTREF(__pyx_cur_scope);
-  }
-  __pyx_cur_scope->__pyx_v_self = __pyx_v_self;
-  __Pyx_INCREF(__pyx_cur_scope->__pyx_v_self);
-  __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_self);
-  __pyx_cur_scope->__pyx_v_observations = __pyx_v_observations;
-  __Pyx_INCREF(__pyx_cur_scope->__pyx_v_observations);
-  __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_observations);
-  __pyx_cur_scope->__pyx_v_block_size = __pyx_v_block_size;
-  __Pyx_INCREF(__pyx_cur_scope->__pyx_v_block_size);
-  __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_block_size);
-  {
-    __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_6madmom_2ml_3hmm_17HiddenMarkovModel_14generator, __pyx_codeobj__8, (PyObject *) __pyx_cur_scope, __pyx_n_s_forward_generator, __pyx_n_s_HiddenMarkovModel_forward_genera, __pyx_n_s_madmom_ml_hmm); if (unlikely(!gen)) __PYX_ERR(0, 664, __pyx_L1_error)
-    __Pyx_DECREF(__pyx_cur_scope);
-    __Pyx_RefNannyFinishContext();
-    return (PyObject *) gen;
-  }
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_AddTraceback("madmom.ml.hmm.HiddenMarkovModel.forward_generator", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __Pyx_DECREF(((PyObject *)__pyx_cur_scope));
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_gb_6madmom_2ml_3hmm_17HiddenMarkovModel_14generator(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */
-{
-  struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator *__pyx_cur_scope = ((struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator *)__pyx_generator->closure);
-  PyObject *__pyx_r = NULL;
-  PyObject *__pyx_t_1 = NULL;
-  __Pyx_memviewslice __pyx_t_2 = { 0, 0, { 0 }, { 0 }, { 0 } };
-  __Pyx_memviewslice __pyx_t_3 = { 0, 0, { 0 }, { 0 }, { 0 } };
-  unsigned int __pyx_t_4;
-  Py_ssize_t __pyx_t_5;
-  PyObject *__pyx_t_6 = NULL;
-  PyObject *__pyx_t_7 = NULL;
-  PyObject *__pyx_t_8 = NULL;
-  PyObject *__pyx_t_9 = NULL;
-  int __pyx_t_10;
-  unsigned int __pyx_t_11;
-  unsigned int __pyx_t_12;
-  __Pyx_memviewslice __pyx_t_13 = { 0, 0, { 0 }, { 0 }, { 0 } };
-  unsigned int __pyx_t_14;
-  unsigned int __pyx_t_15;
-  unsigned int __pyx_t_16;
-  Py_ssize_t __pyx_t_17;
-  __pyx_t_6madmom_2ml_3hmm_uint32_t __pyx_t_18;
-  size_t __pyx_t_19;
-  __pyx_t_6madmom_2ml_3hmm_uint32_t __pyx_t_20;
-  unsigned int __pyx_t_21;
-  size_t __pyx_t_22;
-  size_t __pyx_t_23;
-  size_t __pyx_t_24;
-  size_t __pyx_t_25;
-  __Pyx_memviewslice __pyx_t_26 = { 0, 0, { 0 }, { 0 }, { 0 } };
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("forward_generator", 0);
-  switch (__pyx_generator->resume_label) {
-    case 0: goto __pyx_L3_first_run;
-    case 1: goto __pyx_L13_resume_from_yield;
-    default: /* CPython raises the right error here */
-    __Pyx_RefNannyFinishContext();
-    return NULL;
-  }
-  __pyx_L3_first_run:;
-  if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 664, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":688
- *         """
- *         # transition model stuff
- *         tm = self.transition_model             # <<<<<<<<<<<<<<
- *         cdef uint32_t [::1] tm_states = tm.states
- *         cdef uint32_t [::1] tm_ptrs = tm.pointers
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_self, __pyx_n_s_transition_model); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 688, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_1);
-  __pyx_cur_scope->__pyx_v_tm = __pyx_t_1;
-  __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":689
- *         # transition model stuff
- *         tm = self.transition_model
- *         cdef uint32_t [::1] tm_states = tm.states             # <<<<<<<<<<<<<<
- *         cdef uint32_t [::1] tm_ptrs = tm.pointers
- *         cdef double [::1] tm_probabilities = tm.probabilities
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_tm, __pyx_n_s_states); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 689, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_2.memview)) __PYX_ERR(0, 689, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_cur_scope->__pyx_v_tm_states = __pyx_t_2;
-  __pyx_t_2.memview = NULL;
-  __pyx_t_2.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":690
- *         tm = self.transition_model
- *         cdef uint32_t [::1] tm_states = tm.states
- *         cdef uint32_t [::1] tm_ptrs = tm.pointers             # <<<<<<<<<<<<<<
- *         cdef double [::1] tm_probabilities = tm.probabilities
- *         cdef unsigned int num_states = tm.num_states
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_tm, __pyx_n_s_pointers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 690, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_2.memview)) __PYX_ERR(0, 690, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_cur_scope->__pyx_v_tm_ptrs = __pyx_t_2;
-  __pyx_t_2.memview = NULL;
-  __pyx_t_2.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":691
- *         cdef uint32_t [::1] tm_states = tm.states
- *         cdef uint32_t [::1] tm_ptrs = tm.pointers
- *         cdef double [::1] tm_probabilities = tm.probabilities             # <<<<<<<<<<<<<<
- *         cdef unsigned int num_states = tm.num_states
- * 
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_tm, __pyx_n_s_probabilities); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 691, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(0, 691, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_cur_scope->__pyx_v_tm_probabilities = __pyx_t_3;
-  __pyx_t_3.memview = NULL;
-  __pyx_t_3.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":692
- *         cdef uint32_t [::1] tm_ptrs = tm.pointers
- *         cdef double [::1] tm_probabilities = tm.probabilities
- *         cdef unsigned int num_states = tm.num_states             # <<<<<<<<<<<<<<
- * 
- *         # observation model stuff
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_tm, __pyx_n_s_num_states); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 692, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_4 = __Pyx_PyInt_As_unsigned_int(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 692, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_cur_scope->__pyx_v_num_states = __pyx_t_4;
-
-  /* "madmom/ml/hmm.pyx":695
- * 
- *         # observation model stuff
- *         om = self.observation_model             # <<<<<<<<<<<<<<
- *         cdef unsigned int num_observations = len(observations)
- *         cdef uint32_t [::1] om_pointers = om.pointers
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_self, __pyx_n_s_observation_model); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 695, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_1);
-  __pyx_cur_scope->__pyx_v_om = __pyx_t_1;
-  __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":696
- *         # observation model stuff
- *         om = self.observation_model
- *         cdef unsigned int num_observations = len(observations)             # <<<<<<<<<<<<<<
- *         cdef uint32_t [::1] om_pointers = om.pointers
- *         cdef double [:, ::1] om_densities
- */
-  __pyx_t_5 = PyObject_Length(__pyx_cur_scope->__pyx_v_observations); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 696, __pyx_L1_error)
-  __pyx_cur_scope->__pyx_v_num_observations = __pyx_t_5;
-
-  /* "madmom/ml/hmm.pyx":697
- *         om = self.observation_model
- *         cdef unsigned int num_observations = len(observations)
- *         cdef uint32_t [::1] om_pointers = om.pointers             # <<<<<<<<<<<<<<
- *         cdef double [:, ::1] om_densities
- * 
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_om, __pyx_n_s_pointers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 697, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_2.memview)) __PYX_ERR(0, 697, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_cur_scope->__pyx_v_om_pointers = __pyx_t_2;
-  __pyx_t_2.memview = NULL;
-  __pyx_t_2.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":701
- * 
- *         # forward variables
- *         cdef double[::1] fwd_cur = np.zeros(num_states, dtype=np.float)             # <<<<<<<<<<<<<<
- *         cdef double[::1] fwd_prev = self.initial_distribution.copy()
- * 
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 701, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 701, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyInt_From_unsigned_int(__pyx_cur_scope->__pyx_v_num_states); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 701, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 701, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_7);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 701, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 701, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_8);
-  __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_float); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 701, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_9);
-  __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-  if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_9) < 0) __PYX_ERR(0, 701, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-  __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 701, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_9);
-  __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_t_9, PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(0, 701, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-  __pyx_cur_scope->__pyx_v_fwd_cur = __pyx_t_3;
-  __pyx_t_3.memview = NULL;
-  __pyx_t_3.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":702
- *         # forward variables
- *         cdef double[::1] fwd_cur = np.zeros(num_states, dtype=np.float)
- *         cdef double[::1] fwd_prev = self.initial_distribution.copy()             # <<<<<<<<<<<<<<
- * 
- *         # define counters etc.
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_self, __pyx_n_s_initial_distribution); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 702, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_copy); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 702, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_7);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
-    __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_7);
-    if (likely(__pyx_t_1)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
-      __Pyx_INCREF(__pyx_t_1);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_7, function);
-    }
-  }
-  __pyx_t_9 = (__pyx_t_1) ? __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1) : __Pyx_PyObject_CallNoArg(__pyx_t_7);
-  __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
-  if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 702, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_9);
-  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-  __pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_t_9, PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(0, 702, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-  __pyx_cur_scope->__pyx_v_fwd_prev = __pyx_t_3;
-  __pyx_t_3.memview = NULL;
-  __pyx_t_3.data = NULL;
-
-  /* "madmom/ml/hmm.pyx":711
- *         # keep track which observations om_densities currently contains
- *         # obs_start is the first observation index, obs_end the last one
- *         obs_start = 0             # <<<<<<<<<<<<<<
- *         obs_end = 0
- * 
- */
-  __pyx_cur_scope->__pyx_v_obs_start = 0;
-
-  /* "madmom/ml/hmm.pyx":712
- *         # obs_start is the first observation index, obs_end the last one
- *         obs_start = 0
- *         obs_end = 0             # <<<<<<<<<<<<<<
- * 
- *         # compute everything at once if block_size was set to None
- */
-  __pyx_cur_scope->__pyx_v_obs_end = 0;
-
-  /* "madmom/ml/hmm.pyx":715
- * 
- *         # compute everything at once if block_size was set to None
- *         block_sz = num_observations if block_size is None else block_size             # <<<<<<<<<<<<<<
- * 
- *         # iterate over all observations
- */
-  __pyx_t_10 = (__pyx_cur_scope->__pyx_v_block_size == Py_None);
-  if ((__pyx_t_10 != 0)) {
-    __pyx_t_4 = __pyx_cur_scope->__pyx_v_num_observations;
-  } else {
-    __pyx_t_11 = __Pyx_PyInt_As_unsigned_int(__pyx_cur_scope->__pyx_v_block_size); if (unlikely((__pyx_t_11 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 715, __pyx_L1_error)
-    __pyx_t_4 = __pyx_t_11;
-  }
-  __pyx_cur_scope->__pyx_v_block_sz = __pyx_t_4;
-
-  /* "madmom/ml/hmm.pyx":718
- * 
- *         # iterate over all observations
- *         for frame in range(num_observations):             # <<<<<<<<<<<<<<
- *             # keep track of the normalisation sum
- *             prob_sum = 0
- */
-  __pyx_t_4 = __pyx_cur_scope->__pyx_v_num_observations;
-  __pyx_t_11 = __pyx_t_4;
-  for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
-    __pyx_cur_scope->__pyx_v_frame = __pyx_t_12;
-
-    /* "madmom/ml/hmm.pyx":720
- *         for frame in range(num_observations):
- *             # keep track of the normalisation sum
- *             prob_sum = 0             # <<<<<<<<<<<<<<
- * 
- *             # initialise forward variables
- */
-    __pyx_cur_scope->__pyx_v_prob_sum = 0.0;
-
-    /* "madmom/ml/hmm.pyx":723
- * 
- *             # initialise forward variables
- *             fwd_cur[:] = 0.0             # <<<<<<<<<<<<<<
- * 
- *             # check if we have to compute another block of observation densities
- */
-    {
-        double __pyx_temp_scalar = 0.0;
-        {
-            Py_ssize_t __pyx_temp_extent = __pyx_cur_scope->__pyx_v_fwd_cur.shape[0];
-            Py_ssize_t __pyx_temp_idx;
-            double *__pyx_temp_pointer = (double *) __pyx_cur_scope->__pyx_v_fwd_cur.data;
-            for (__pyx_temp_idx = 0; __pyx_temp_idx < __pyx_temp_extent; __pyx_temp_idx++) {
-              *((double *) __pyx_temp_pointer) = __pyx_temp_scalar;
-              __pyx_temp_pointer += 1;
-            }
-        }
-    }
-
-    /* "madmom/ml/hmm.pyx":726
- * 
- *             # check if we have to compute another block of observation densities
- *             if frame >= obs_end:             # <<<<<<<<<<<<<<
- *                 obs_start = frame
- *                 obs_end = obs_start + block_sz
- */
-    __pyx_t_10 = ((__pyx_cur_scope->__pyx_v_frame >= __pyx_cur_scope->__pyx_v_obs_end) != 0);
-    if (__pyx_t_10) {
-
-      /* "madmom/ml/hmm.pyx":727
- *             # check if we have to compute another block of observation densities
- *             if frame >= obs_end:
- *                 obs_start = frame             # <<<<<<<<<<<<<<
- *                 obs_end = obs_start + block_sz
- *                 om_densities = om.densities(observations[obs_start:obs_end])
- */
-      __pyx_cur_scope->__pyx_v_obs_start = __pyx_cur_scope->__pyx_v_frame;
-
-      /* "madmom/ml/hmm.pyx":728
- *             if frame >= obs_end:
- *                 obs_start = frame
- *                 obs_end = obs_start + block_sz             # <<<<<<<<<<<<<<
- *                 om_densities = om.densities(observations[obs_start:obs_end])
- * 
- */
-      __pyx_cur_scope->__pyx_v_obs_end = (__pyx_cur_scope->__pyx_v_obs_start + __pyx_cur_scope->__pyx_v_block_sz);
-
-      /* "madmom/ml/hmm.pyx":729
- *                 obs_start = frame
- *                 obs_end = obs_start + block_sz
- *                 om_densities = om.densities(observations[obs_start:obs_end])             # <<<<<<<<<<<<<<
- * 
- *             # iterate over all states
- */
-      __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_om, __pyx_n_s_densities); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 729, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_7);
-      __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_cur_scope->__pyx_v_observations, __pyx_cur_scope->__pyx_v_obs_start, __pyx_cur_scope->__pyx_v_obs_end, NULL, NULL, NULL, 1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 729, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_1);
-      __pyx_t_6 = NULL;
-      if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
-        __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_7);
-        if (likely(__pyx_t_6)) {
-          PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
-          __Pyx_INCREF(__pyx_t_6);
-          __Pyx_INCREF(function);
-          __Pyx_DECREF_SET(__pyx_t_7, function);
-        }
-      }
-      __pyx_t_9 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_6, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1);
-      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
-      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-      if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 729, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_9);
-      __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-      __pyx_t_13 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_9, PyBUF_WRITABLE); if (unlikely(!__pyx_t_13.memview)) __PYX_ERR(0, 729, __pyx_L1_error)
-      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-      __PYX_XDEC_MEMVIEW(&__pyx_cur_scope->__pyx_v_om_densities, 1);
-      __pyx_cur_scope->__pyx_v_om_densities = __pyx_t_13;
-      __pyx_t_13.memview = NULL;
-      __pyx_t_13.data = NULL;
-
-      /* "madmom/ml/hmm.pyx":726
- * 
- *             # check if we have to compute another block of observation densities
- *             if frame >= obs_end:             # <<<<<<<<<<<<<<
- *                 obs_start = frame
- *                 obs_end = obs_start + block_sz
- */
-    }
-
-    /* "madmom/ml/hmm.pyx":732
- * 
- *             # iterate over all states
- *             for state in range(num_states):             # <<<<<<<<<<<<<<
- *                 # sum over all possible predecessors
- *                 for prev_pointer in range(tm_ptrs[state], tm_ptrs[state + 1]):
- */
-    __pyx_t_14 = __pyx_cur_scope->__pyx_v_num_states;
-    __pyx_t_15 = __pyx_t_14;
-    for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) {
-      __pyx_cur_scope->__pyx_v_state = __pyx_t_16;
-
-      /* "madmom/ml/hmm.pyx":734
- *             for state in range(num_states):
- *                 # sum over all possible predecessors
- *                 for prev_pointer in range(tm_ptrs[state], tm_ptrs[state + 1]):             # <<<<<<<<<<<<<<
- *                     fwd_cur[state] += fwd_prev[tm_states[prev_pointer]] * \
- *                                       tm_probabilities[prev_pointer]
- */
-      __pyx_t_17 = (__pyx_cur_scope->__pyx_v_state + 1);
-      __pyx_t_18 = (*((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) __pyx_cur_scope->__pyx_v_tm_ptrs.data) + __pyx_t_17)) )));
-      __pyx_t_19 = __pyx_cur_scope->__pyx_v_state;
-      __pyx_t_20 = __pyx_t_18;
-      for (__pyx_t_21 = (*((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) __pyx_cur_scope->__pyx_v_tm_ptrs.data) + __pyx_t_19)) ))); __pyx_t_21 < __pyx_t_20; __pyx_t_21+=1) {
-        __pyx_cur_scope->__pyx_v_prev_pointer = __pyx_t_21;
-
-        /* "madmom/ml/hmm.pyx":735
- *                 # sum over all possible predecessors
- *                 for prev_pointer in range(tm_ptrs[state], tm_ptrs[state + 1]):
- *                     fwd_cur[state] += fwd_prev[tm_states[prev_pointer]] * \             # <<<<<<<<<<<<<<
- *                                       tm_probabilities[prev_pointer]
- *                 # multiply with the observation probability
- */
-        __pyx_t_22 = __pyx_cur_scope->__pyx_v_prev_pointer;
-        __pyx_t_23 = (*((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) __pyx_cur_scope->__pyx_v_tm_states.data) + __pyx_t_22)) )));
-
-        /* "madmom/ml/hmm.pyx":736
- *                 for prev_pointer in range(tm_ptrs[state], tm_ptrs[state + 1]):
- *                     fwd_cur[state] += fwd_prev[tm_states[prev_pointer]] * \
- *                                       tm_probabilities[prev_pointer]             # <<<<<<<<<<<<<<
- *                 # multiply with the observation probability
- *                 fwd_cur[state] *= om_densities[frame - obs_start,
- */
-        __pyx_t_24 = __pyx_cur_scope->__pyx_v_prev_pointer;
-
-        /* "madmom/ml/hmm.pyx":735
- *                 # sum over all possible predecessors
- *                 for prev_pointer in range(tm_ptrs[state], tm_ptrs[state + 1]):
- *                     fwd_cur[state] += fwd_prev[tm_states[prev_pointer]] * \             # <<<<<<<<<<<<<<
- *                                       tm_probabilities[prev_pointer]
- *                 # multiply with the observation probability
- */
-        __pyx_t_25 = __pyx_cur_scope->__pyx_v_state;
-        *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_cur_scope->__pyx_v_fwd_cur.data) + __pyx_t_25)) )) += ((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_cur_scope->__pyx_v_fwd_prev.data) + __pyx_t_23)) ))) * (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_cur_scope->__pyx_v_tm_probabilities.data) + __pyx_t_24)) ))));
-      }
-
-      /* "madmom/ml/hmm.pyx":739
- *                 # multiply with the observation probability
- *                 fwd_cur[state] *= om_densities[frame - obs_start,
- *                                                om_pointers[state]]             # <<<<<<<<<<<<<<
- *                 prob_sum += fwd_cur[state]
- *             # normalise
- */
-      __pyx_t_19 = __pyx_cur_scope->__pyx_v_state;
-
-      /* "madmom/ml/hmm.pyx":738
- *                                       tm_probabilities[prev_pointer]
- *                 # multiply with the observation probability
- *                 fwd_cur[state] *= om_densities[frame - obs_start,             # <<<<<<<<<<<<<<
- *                                                om_pointers[state]]
- *                 prob_sum += fwd_cur[state]
- */
-      __pyx_t_24 = (__pyx_cur_scope->__pyx_v_frame - __pyx_cur_scope->__pyx_v_obs_start);
-      __pyx_t_22 = (*((__pyx_t_6madmom_2ml_3hmm_uint32_t *) ( /* dim=0 */ ((char *) (((__pyx_t_6madmom_2ml_3hmm_uint32_t *) __pyx_cur_scope->__pyx_v_om_pointers.data) + __pyx_t_19)) )));
-      __pyx_t_23 = __pyx_cur_scope->__pyx_v_state;
-      *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_cur_scope->__pyx_v_fwd_cur.data) + __pyx_t_23)) )) *= (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_cur_scope->__pyx_v_om_densities.data + __pyx_t_24 * __pyx_cur_scope->__pyx_v_om_densities.strides[0]) )) + __pyx_t_22)) )));
-
-      /* "madmom/ml/hmm.pyx":740
- *                 fwd_cur[state] *= om_densities[frame - obs_start,
- *                                                om_pointers[state]]
- *                 prob_sum += fwd_cur[state]             # <<<<<<<<<<<<<<
- *             # normalise
- *             norm_factor = 1. / prob_sum
- */
-      __pyx_t_19 = __pyx_cur_scope->__pyx_v_state;
-      __pyx_cur_scope->__pyx_v_prob_sum = (__pyx_cur_scope->__pyx_v_prob_sum + (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_cur_scope->__pyx_v_fwd_cur.data) + __pyx_t_19)) ))));
-    }
-
-    /* "madmom/ml/hmm.pyx":742
- *                 prob_sum += fwd_cur[state]
- *             # normalise
- *             norm_factor = 1. / prob_sum             # <<<<<<<<<<<<<<
- *             for state in range(num_states):
- *                 fwd_cur[state] *= norm_factor
- */
-    __pyx_cur_scope->__pyx_v_norm_factor = (1. / __pyx_cur_scope->__pyx_v_prob_sum);
-
-    /* "madmom/ml/hmm.pyx":743
- *             # normalise
- *             norm_factor = 1. / prob_sum
- *             for state in range(num_states):             # <<<<<<<<<<<<<<
- *                 fwd_cur[state] *= norm_factor
- * 
- */
-    __pyx_t_14 = __pyx_cur_scope->__pyx_v_num_states;
-    __pyx_t_15 = __pyx_t_14;
-    for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) {
-      __pyx_cur_scope->__pyx_v_state = __pyx_t_16;
-
-      /* "madmom/ml/hmm.pyx":744
- *             norm_factor = 1. / prob_sum
- *             for state in range(num_states):
- *                 fwd_cur[state] *= norm_factor             # <<<<<<<<<<<<<<
- * 
- *             # yield the current forward variables
- */
-      __pyx_t_19 = __pyx_cur_scope->__pyx_v_state;
-      *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_cur_scope->__pyx_v_fwd_cur.data) + __pyx_t_19)) )) *= __pyx_cur_scope->__pyx_v_norm_factor;
-    }
-
-    /* "madmom/ml/hmm.pyx":747
- * 
- *             # yield the current forward variables
- *             yield np.asarray(fwd_cur).copy()             # <<<<<<<<<<<<<<
- * 
- *             fwd_cur, fwd_prev = fwd_prev, fwd_cur
- */
-    __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 747, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_asarray); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 747, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_6);
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_cur_scope->__pyx_v_fwd_cur, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 747, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_8 = NULL;
-    if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
-      __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_6);
-      if (likely(__pyx_t_8)) {
-        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
-        __Pyx_INCREF(__pyx_t_8);
-        __Pyx_INCREF(function);
-        __Pyx_DECREF_SET(__pyx_t_6, function);
-      }
-    }
-    __pyx_t_7 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_8, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_1);
-    __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 747, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_7);
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_copy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 747, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_6);
-    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-    __pyx_t_7 = NULL;
-    if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
-      __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6);
-      if (likely(__pyx_t_7)) {
-        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
-        __Pyx_INCREF(__pyx_t_7);
-        __Pyx_INCREF(function);
-        __Pyx_DECREF_SET(__pyx_t_6, function);
-      }
-    }
-    __pyx_t_9 = (__pyx_t_7) ? __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_7) : __Pyx_PyObject_CallNoArg(__pyx_t_6);
-    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
-    if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 747, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_9);
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    __pyx_r = __pyx_t_9;
-    __pyx_t_9 = 0;
-    __pyx_cur_scope->__pyx_t_0 = __pyx_t_4;
-    __pyx_cur_scope->__pyx_t_1 = __pyx_t_11;
-    __pyx_cur_scope->__pyx_t_2 = __pyx_t_12;
-    __Pyx_XGIVEREF(__pyx_r);
-    __Pyx_RefNannyFinishContext();
-    __Pyx_Coroutine_ResetAndClearException(__pyx_generator);
-    /* return from generator, yielding value */
-    __pyx_generator->resume_label = 1;
-    return __pyx_r;
-    __pyx_L13_resume_from_yield:;
-    __pyx_t_4 = __pyx_cur_scope->__pyx_t_0;
-    __pyx_t_11 = __pyx_cur_scope->__pyx_t_1;
-    __pyx_t_12 = __pyx_cur_scope->__pyx_t_2;
-    if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 747, __pyx_L1_error)
-
-    /* "madmom/ml/hmm.pyx":749
- *             yield np.asarray(fwd_cur).copy()
- * 
- *             fwd_cur, fwd_prev = fwd_prev, fwd_cur             # <<<<<<<<<<<<<<
- * 
- * # alias
- */
-    __pyx_t_3 = __pyx_cur_scope->__pyx_v_fwd_prev;
-    __PYX_INC_MEMVIEW(&__pyx_t_3, 1);
-    __pyx_t_26 = __pyx_cur_scope->__pyx_v_fwd_cur;
-    __PYX_INC_MEMVIEW(&__pyx_t_26, 1);
-    __PYX_XDEC_MEMVIEW(&__pyx_cur_scope->__pyx_v_fwd_cur, 1);
-    __pyx_cur_scope->__pyx_v_fwd_cur = __pyx_t_3;
-    __pyx_t_3.memview = NULL;
-    __pyx_t_3.data = NULL;
-    __PYX_XDEC_MEMVIEW(&__pyx_cur_scope->__pyx_v_fwd_prev, 1);
-    __pyx_cur_scope->__pyx_v_fwd_prev = __pyx_t_26;
-    __pyx_t_26.memview = NULL;
-    __pyx_t_26.data = NULL;
-  }
-  CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope);
-
-  /* "madmom/ml/hmm.pyx":664
- *     @cython.wraparound(False)
- *     @cython.initializedcheck(False)
- *     def forward_generator(self, observations, block_size=None):             # <<<<<<<<<<<<<<
- *         """
- *         Compute the forward variables at each time step. Instead of computing
- */
-
-  /* function exit code */
-  PyErr_SetNone(PyExc_StopIteration);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __PYX_XDEC_MEMVIEW(&__pyx_t_2, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_t_3, 1);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_7);
-  __Pyx_XDECREF(__pyx_t_8);
-  __Pyx_XDECREF(__pyx_t_9);
-  __PYX_XDEC_MEMVIEW(&__pyx_t_13, 1);
-  __PYX_XDEC_MEMVIEW(&__pyx_t_26, 1);
-  __Pyx_AddTraceback("forward_generator", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_r); __pyx_r = 0;
-  #if !CYTHON_USE_EXC_INFO_STACK
-  __Pyx_Coroutine_ResetAndClearException(__pyx_generator);
-  #endif
-  __pyx_generator->resume_label = -1;
-  __Pyx_Coroutine_clear((PyObject*)__pyx_generator);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":735
- * ctypedef npy_cdouble     complex_t
- * 
- * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
- *     return PyArray_MultiIterNew(1, <void*>a)
- * 
- */
-
-static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":736
- * 
- * cdef inline object PyArray_MultiIterNew1(a):
- *     return PyArray_MultiIterNew(1, <void*>a)             # <<<<<<<<<<<<<<
- * 
- * cdef inline object PyArray_MultiIterNew2(a, b):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 736, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":735
- * ctypedef npy_cdouble     complex_t
- * 
- * cdef inline object PyArray_MultiIterNew1(a):             # <<<<<<<<<<<<<<
- *     return PyArray_MultiIterNew(1, <void*>a)
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":738
- *     return PyArray_MultiIterNew(1, <void*>a)
- * 
- * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
- *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
- * 
- */
-
-static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":739
- * 
- * cdef inline object PyArray_MultiIterNew2(a, b):
- *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)             # <<<<<<<<<<<<<<
- * 
- * cdef inline object PyArray_MultiIterNew3(a, b, c):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 739, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":738
- *     return PyArray_MultiIterNew(1, <void*>a)
- * 
- * cdef inline object PyArray_MultiIterNew2(a, b):             # <<<<<<<<<<<<<<
- *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":741
- *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
- * 
- * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
- *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
- * 
- */
-
-static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":742
- * 
- * cdef inline object PyArray_MultiIterNew3(a, b, c):
- *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)             # <<<<<<<<<<<<<<
- * 
- * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 742, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":741
- *     return PyArray_MultiIterNew(2, <void*>a, <void*>b)
- * 
- * cdef inline object PyArray_MultiIterNew3(a, b, c):             # <<<<<<<<<<<<<<
- *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":744
- *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
- * 
- * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
- *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
- * 
- */
-
-static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":745
- * 
- * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
- *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)             # <<<<<<<<<<<<<<
- * 
- * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 745, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":744
- *     return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
- * 
- * cdef inline object PyArray_MultiIterNew4(a, b, c, d):             # <<<<<<<<<<<<<<
- *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":747
- *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
- * 
- * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
- *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
- * 
- */
-
-static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":748
- * 
- * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
- *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)             # <<<<<<<<<<<<<<
- * 
- * cdef inline tuple PyDataType_SHAPE(dtype d):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 748, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":747
- *     return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
- * 
- * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):             # <<<<<<<<<<<<<<
- *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":750
- *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
- * 
- * cdef inline tuple PyDataType_SHAPE(dtype d):             # <<<<<<<<<<<<<<
- *     if PyDataType_HASSUBARRAY(d):
- *         return <tuple>d.subarray.shape
- */
-
-static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":751
- * 
- * cdef inline tuple PyDataType_SHAPE(dtype d):
- *     if PyDataType_HASSUBARRAY(d):             # <<<<<<<<<<<<<<
- *         return <tuple>d.subarray.shape
- *     else:
- */
-  __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0);
-  if (__pyx_t_1) {
-
-    /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":752
- * cdef inline tuple PyDataType_SHAPE(dtype d):
- *     if PyDataType_HASSUBARRAY(d):
- *         return <tuple>d.subarray.shape             # <<<<<<<<<<<<<<
- *     else:
- *         return ()
- */
-    __Pyx_XDECREF(__pyx_r);
-    __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape));
-    __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape);
-    goto __pyx_L0;
-
-    /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":751
- * 
- * cdef inline tuple PyDataType_SHAPE(dtype d):
- *     if PyDataType_HASSUBARRAY(d):             # <<<<<<<<<<<<<<
- *         return <tuple>d.subarray.shape
- *     else:
- */
-  }
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":754
- *         return <tuple>d.subarray.shape
- *     else:
- *         return ()             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  /*else*/ {
-    __Pyx_XDECREF(__pyx_r);
-    __Pyx_INCREF(__pyx_empty_tuple);
-    __pyx_r = __pyx_empty_tuple;
-    goto __pyx_L0;
-  }
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":750
- *     return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
- * 
- * cdef inline tuple PyDataType_SHAPE(dtype d):             # <<<<<<<<<<<<<<
- *     if PyDataType_HASSUBARRAY(d):
- *         return <tuple>d.subarray.shape
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":931
- *     int _import_umath() except -1
- * 
- * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
- *     Py_INCREF(base) # important to do this before stealing the reference below!
- *     PyArray_SetBaseObject(arr, base)
- */
-
-static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("set_array_base", 0);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":932
- * 
- * cdef inline void set_array_base(ndarray arr, object base):
- *     Py_INCREF(base) # important to do this before stealing the reference below!             # <<<<<<<<<<<<<<
- *     PyArray_SetBaseObject(arr, base)
- * 
- */
-  Py_INCREF(__pyx_v_base);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":933
- * cdef inline void set_array_base(ndarray arr, object base):
- *     Py_INCREF(base) # important to do this before stealing the reference below!
- *     PyArray_SetBaseObject(arr, base)             # <<<<<<<<<<<<<<
- * 
- * cdef inline object get_array_base(ndarray arr):
- */
-  (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base));
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":931
- *     int _import_umath() except -1
- * 
- * cdef inline void set_array_base(ndarray arr, object base):             # <<<<<<<<<<<<<<
- *     Py_INCREF(base) # important to do this before stealing the reference below!
- *     PyArray_SetBaseObject(arr, base)
- */
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":935
- *     PyArray_SetBaseObject(arr, base)
- * 
- * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
- *     base = PyArray_BASE(arr)
- *     if base is NULL:
- */
-
-static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
-  PyObject *__pyx_v_base;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  __Pyx_RefNannySetupContext("get_array_base", 0);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":936
- * 
- * cdef inline object get_array_base(ndarray arr):
- *     base = PyArray_BASE(arr)             # <<<<<<<<<<<<<<
- *     if base is NULL:
- *         return None
- */
-  __pyx_v_base = PyArray_BASE(__pyx_v_arr);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":937
- * cdef inline object get_array_base(ndarray arr):
- *     base = PyArray_BASE(arr)
- *     if base is NULL:             # <<<<<<<<<<<<<<
- *         return None
- *     return <object>base
- */
-  __pyx_t_1 = ((__pyx_v_base == NULL) != 0);
-  if (__pyx_t_1) {
-
-    /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":938
- *     base = PyArray_BASE(arr)
- *     if base is NULL:
- *         return None             # <<<<<<<<<<<<<<
- *     return <object>base
- * 
- */
-    __Pyx_XDECREF(__pyx_r);
-    __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-    goto __pyx_L0;
-
-    /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":937
- * cdef inline object get_array_base(ndarray arr):
- *     base = PyArray_BASE(arr)
- *     if base is NULL:             # <<<<<<<<<<<<<<
- *         return None
- *     return <object>base
- */
-  }
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":939
- *     if base is NULL:
- *         return None
- *     return <object>base             # <<<<<<<<<<<<<<
- * 
- * # Versions of the import_* functions which are more suitable for
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_INCREF(((PyObject *)__pyx_v_base));
-  __pyx_r = ((PyObject *)__pyx_v_base);
-  goto __pyx_L0;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":935
- *     PyArray_SetBaseObject(arr, base)
- * 
- * cdef inline object get_array_base(ndarray arr):             # <<<<<<<<<<<<<<
- *     base = PyArray_BASE(arr)
- *     if base is NULL:
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":943
- * # Versions of the import_* functions which are more suitable for
- * # Cython code.
- * cdef inline int import_array() except -1:             # <<<<<<<<<<<<<<
- *     try:
- *         __pyx_import_array()
- */
-
-static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_t_4;
-  PyObject *__pyx_t_5 = NULL;
-  PyObject *__pyx_t_6 = NULL;
-  PyObject *__pyx_t_7 = NULL;
-  PyObject *__pyx_t_8 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("import_array", 0);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":944
- * # Cython code.
- * cdef inline int import_array() except -1:
- *     try:             # <<<<<<<<<<<<<<
- *         __pyx_import_array()
- *     except Exception:
- */
-  {
-    __Pyx_PyThreadState_declare
-    __Pyx_PyThreadState_assign
-    __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
-    __Pyx_XGOTREF(__pyx_t_1);
-    __Pyx_XGOTREF(__pyx_t_2);
-    __Pyx_XGOTREF(__pyx_t_3);
-    /*try:*/ {
-
-      /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":945
- * cdef inline int import_array() except -1:
- *     try:
- *         __pyx_import_array()             # <<<<<<<<<<<<<<
- *     except Exception:
- *         raise ImportError("numpy.core.multiarray failed to import")
- */
-      __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 945, __pyx_L3_error)
-
-      /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":944
- * # Cython code.
- * cdef inline int import_array() except -1:
- *     try:             # <<<<<<<<<<<<<<
- *         __pyx_import_array()
- *     except Exception:
- */
-    }
-    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
-    goto __pyx_L8_try_end;
-    __pyx_L3_error:;
-
-    /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":946
- *     try:
- *         __pyx_import_array()
- *     except Exception:             # <<<<<<<<<<<<<<
- *         raise ImportError("numpy.core.multiarray failed to import")
- * 
- */
-    __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
-    if (__pyx_t_4) {
-      __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
-      if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 946, __pyx_L5_except_error)
-      __Pyx_GOTREF(__pyx_t_5);
-      __Pyx_GOTREF(__pyx_t_6);
-      __Pyx_GOTREF(__pyx_t_7);
-
-      /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":947
- *         __pyx_import_array()
- *     except Exception:
- *         raise ImportError("numpy.core.multiarray failed to import")             # <<<<<<<<<<<<<<
- * 
- * cdef inline int import_umath() except -1:
- */
-      __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 947, __pyx_L5_except_error)
-      __Pyx_GOTREF(__pyx_t_8);
-      __Pyx_Raise(__pyx_t_8, 0, 0, 0);
-      __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-      __PYX_ERR(1, 947, __pyx_L5_except_error)
-    }
-    goto __pyx_L5_except_error;
-    __pyx_L5_except_error:;
-
-    /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":944
- * # Cython code.
- * cdef inline int import_array() except -1:
- *     try:             # <<<<<<<<<<<<<<
- *         __pyx_import_array()
- *     except Exception:
- */
-    __Pyx_XGIVEREF(__pyx_t_1);
-    __Pyx_XGIVEREF(__pyx_t_2);
-    __Pyx_XGIVEREF(__pyx_t_3);
-    __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
-    goto __pyx_L1_error;
-    __pyx_L8_try_end:;
-  }
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":943
- * # Versions of the import_* functions which are more suitable for
- * # Cython code.
- * cdef inline int import_array() except -1:             # <<<<<<<<<<<<<<
- *     try:
- *         __pyx_import_array()
- */
-
-  /* function exit code */
-  __pyx_r = 0;
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_7);
-  __Pyx_XDECREF(__pyx_t_8);
-  __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = -1;
-  __pyx_L0:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":949
- *         raise ImportError("numpy.core.multiarray failed to import")
- * 
- * cdef inline int import_umath() except -1:             # <<<<<<<<<<<<<<
- *     try:
- *         _import_umath()
- */
-
-static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_t_4;
-  PyObject *__pyx_t_5 = NULL;
-  PyObject *__pyx_t_6 = NULL;
-  PyObject *__pyx_t_7 = NULL;
-  PyObject *__pyx_t_8 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("import_umath", 0);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":950
- * 
- * cdef inline int import_umath() except -1:
- *     try:             # <<<<<<<<<<<<<<
- *         _import_umath()
- *     except Exception:
- */
-  {
-    __Pyx_PyThreadState_declare
-    __Pyx_PyThreadState_assign
-    __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
-    __Pyx_XGOTREF(__pyx_t_1);
-    __Pyx_XGOTREF(__pyx_t_2);
-    __Pyx_XGOTREF(__pyx_t_3);
-    /*try:*/ {
-
-      /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":951
- * cdef inline int import_umath() except -1:
- *     try:
- *         _import_umath()             # <<<<<<<<<<<<<<
- *     except Exception:
- *         raise ImportError("numpy.core.umath failed to import")
- */
-      __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 951, __pyx_L3_error)
-
-      /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":950
- * 
- * cdef inline int import_umath() except -1:
- *     try:             # <<<<<<<<<<<<<<
- *         _import_umath()
- *     except Exception:
- */
-    }
-    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
-    goto __pyx_L8_try_end;
-    __pyx_L3_error:;
-
-    /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":952
- *     try:
- *         _import_umath()
- *     except Exception:             # <<<<<<<<<<<<<<
- *         raise ImportError("numpy.core.umath failed to import")
- * 
- */
-    __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
-    if (__pyx_t_4) {
-      __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
-      if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 952, __pyx_L5_except_error)
-      __Pyx_GOTREF(__pyx_t_5);
-      __Pyx_GOTREF(__pyx_t_6);
-      __Pyx_GOTREF(__pyx_t_7);
-
-      /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":953
- *         _import_umath()
- *     except Exception:
- *         raise ImportError("numpy.core.umath failed to import")             # <<<<<<<<<<<<<<
- * 
- * cdef inline int import_ufunc() except -1:
- */
-      __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 953, __pyx_L5_except_error)
-      __Pyx_GOTREF(__pyx_t_8);
-      __Pyx_Raise(__pyx_t_8, 0, 0, 0);
-      __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-      __PYX_ERR(1, 953, __pyx_L5_except_error)
-    }
-    goto __pyx_L5_except_error;
-    __pyx_L5_except_error:;
-
-    /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":950
- * 
- * cdef inline int import_umath() except -1:
- *     try:             # <<<<<<<<<<<<<<
- *         _import_umath()
- *     except Exception:
- */
-    __Pyx_XGIVEREF(__pyx_t_1);
-    __Pyx_XGIVEREF(__pyx_t_2);
-    __Pyx_XGIVEREF(__pyx_t_3);
-    __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
-    goto __pyx_L1_error;
-    __pyx_L8_try_end:;
-  }
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":949
- *         raise ImportError("numpy.core.multiarray failed to import")
- * 
- * cdef inline int import_umath() except -1:             # <<<<<<<<<<<<<<
- *     try:
- *         _import_umath()
- */
-
-  /* function exit code */
-  __pyx_r = 0;
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_7);
-  __Pyx_XDECREF(__pyx_t_8);
-  __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = -1;
-  __pyx_L0:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":955
- *         raise ImportError("numpy.core.umath failed to import")
- * 
- * cdef inline int import_ufunc() except -1:             # <<<<<<<<<<<<<<
- *     try:
- *         _import_umath()
- */
-
-static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_t_4;
-  PyObject *__pyx_t_5 = NULL;
-  PyObject *__pyx_t_6 = NULL;
-  PyObject *__pyx_t_7 = NULL;
-  PyObject *__pyx_t_8 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("import_ufunc", 0);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":956
- * 
- * cdef inline int import_ufunc() except -1:
- *     try:             # <<<<<<<<<<<<<<
- *         _import_umath()
- *     except Exception:
- */
-  {
-    __Pyx_PyThreadState_declare
-    __Pyx_PyThreadState_assign
-    __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
-    __Pyx_XGOTREF(__pyx_t_1);
-    __Pyx_XGOTREF(__pyx_t_2);
-    __Pyx_XGOTREF(__pyx_t_3);
-    /*try:*/ {
-
-      /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":957
- * cdef inline int import_ufunc() except -1:
- *     try:
- *         _import_umath()             # <<<<<<<<<<<<<<
- *     except Exception:
- *         raise ImportError("numpy.core.umath failed to import")
- */
-      __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L3_error)
-
-      /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":956
- * 
- * cdef inline int import_ufunc() except -1:
- *     try:             # <<<<<<<<<<<<<<
- *         _import_umath()
- *     except Exception:
- */
-    }
-    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
-    goto __pyx_L8_try_end;
-    __pyx_L3_error:;
-
-    /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":958
- *     try:
- *         _import_umath()
- *     except Exception:             # <<<<<<<<<<<<<<
- *         raise ImportError("numpy.core.umath failed to import")
- * 
- */
-    __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
-    if (__pyx_t_4) {
-      __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
-      if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 958, __pyx_L5_except_error)
-      __Pyx_GOTREF(__pyx_t_5);
-      __Pyx_GOTREF(__pyx_t_6);
-      __Pyx_GOTREF(__pyx_t_7);
-
-      /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":959
- *         _import_umath()
- *     except Exception:
- *         raise ImportError("numpy.core.umath failed to import")             # <<<<<<<<<<<<<<
- * 
- * cdef extern from *:
- */
-      __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 959, __pyx_L5_except_error)
-      __Pyx_GOTREF(__pyx_t_8);
-      __Pyx_Raise(__pyx_t_8, 0, 0, 0);
-      __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-      __PYX_ERR(1, 959, __pyx_L5_except_error)
-    }
-    goto __pyx_L5_except_error;
-    __pyx_L5_except_error:;
-
-    /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":956
- * 
- * cdef inline int import_ufunc() except -1:
- *     try:             # <<<<<<<<<<<<<<
- *         _import_umath()
- *     except Exception:
- */
-    __Pyx_XGIVEREF(__pyx_t_1);
-    __Pyx_XGIVEREF(__pyx_t_2);
-    __Pyx_XGIVEREF(__pyx_t_3);
-    __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
-    goto __pyx_L1_error;
-    __pyx_L8_try_end:;
-  }
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":955
- *         raise ImportError("numpy.core.umath failed to import")
- * 
- * cdef inline int import_ufunc() except -1:             # <<<<<<<<<<<<<<
- *     try:
- *         _import_umath()
- */
-
-  /* function exit code */
-  __pyx_r = 0;
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_7);
-  __Pyx_XDECREF(__pyx_t_8);
-  __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = -1;
-  __pyx_L0:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":969
- * 
- * 
- * cdef inline bint is_timedelta64_object(object obj):             # <<<<<<<<<<<<<<
- *     """
- *     Cython equivalent of `isinstance(obj, np.timedelta64)`
- */
-
-static CYTHON_INLINE int __pyx_f_5numpy_is_timedelta64_object(PyObject *__pyx_v_obj) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("is_timedelta64_object", 0);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":981
- *     bool
- *     """
- *     return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type)             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_r = PyObject_TypeCheck(__pyx_v_obj, (&PyTimedeltaArrType_Type));
-  goto __pyx_L0;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":969
- * 
- * 
- * cdef inline bint is_timedelta64_object(object obj):             # <<<<<<<<<<<<<<
- *     """
- *     Cython equivalent of `isinstance(obj, np.timedelta64)`
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":984
- * 
- * 
- * cdef inline bint is_datetime64_object(object obj):             # <<<<<<<<<<<<<<
- *     """
- *     Cython equivalent of `isinstance(obj, np.datetime64)`
- */
-
-static CYTHON_INLINE int __pyx_f_5numpy_is_datetime64_object(PyObject *__pyx_v_obj) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("is_datetime64_object", 0);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":996
- *     bool
- *     """
- *     return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type)             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_r = PyObject_TypeCheck(__pyx_v_obj, (&PyDatetimeArrType_Type));
-  goto __pyx_L0;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":984
- * 
- * 
- * cdef inline bint is_datetime64_object(object obj):             # <<<<<<<<<<<<<<
- *     """
- *     Cython equivalent of `isinstance(obj, np.datetime64)`
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":999
- * 
- * 
- * cdef inline npy_datetime get_datetime64_value(object obj) nogil:             # <<<<<<<<<<<<<<
- *     """
- *     returns the int64 value underlying scalar numpy datetime64 object
- */
-
-static CYTHON_INLINE npy_datetime __pyx_f_5numpy_get_datetime64_value(PyObject *__pyx_v_obj) {
-  npy_datetime __pyx_r;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1006
- *     also needed.  That can be found using `get_datetime64_unit`.
- *     """
- *     return (<PyDatetimeScalarObject*>obj).obval             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_r = ((PyDatetimeScalarObject *)__pyx_v_obj)->obval;
-  goto __pyx_L0;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":999
- * 
- * 
- * cdef inline npy_datetime get_datetime64_value(object obj) nogil:             # <<<<<<<<<<<<<<
- *     """
- *     returns the int64 value underlying scalar numpy datetime64 object
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1009
- * 
- * 
- * cdef inline npy_timedelta get_timedelta64_value(object obj) nogil:             # <<<<<<<<<<<<<<
- *     """
- *     returns the int64 value underlying scalar numpy timedelta64 object
- */
-
-static CYTHON_INLINE npy_timedelta __pyx_f_5numpy_get_timedelta64_value(PyObject *__pyx_v_obj) {
-  npy_timedelta __pyx_r;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1013
- *     returns the int64 value underlying scalar numpy timedelta64 object
- *     """
- *     return (<PyTimedeltaScalarObject*>obj).obval             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_r = ((PyTimedeltaScalarObject *)__pyx_v_obj)->obval;
-  goto __pyx_L0;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1009
- * 
- * 
- * cdef inline npy_timedelta get_timedelta64_value(object obj) nogil:             # <<<<<<<<<<<<<<
- *     """
- *     returns the int64 value underlying scalar numpy timedelta64 object
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  return __pyx_r;
-}
-
-/* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1016
- * 
- * 
- * cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil:             # <<<<<<<<<<<<<<
- *     """
- *     returns the unit part of the dtype for a numpy datetime64 object.
- */
-
-static CYTHON_INLINE NPY_DATETIMEUNIT __pyx_f_5numpy_get_datetime64_unit(PyObject *__pyx_v_obj) {
-  NPY_DATETIMEUNIT __pyx_r;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1020
- *     returns the unit part of the dtype for a numpy datetime64 object.
- *     """
- *     return <NPY_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base             # <<<<<<<<<<<<<<
- */
-  __pyx_r = ((NPY_DATETIMEUNIT)((PyDatetimeScalarObject *)__pyx_v_obj)->obmeta.base);
-  goto __pyx_L0;
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1016
- * 
- * 
- * cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil:             # <<<<<<<<<<<<<<
- *     """
- *     returns the unit part of the dtype for a numpy datetime64 object.
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  return __pyx_r;
-}
-
-/* "View.MemoryView":122
- *         cdef bint dtype_is_object
- * 
- *     def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,             # <<<<<<<<<<<<<<
- *                   mode="c", bint allocate_buffer=True):
- * 
- */
-
-/* Python wrapper */
-static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_shape = 0;
-  Py_ssize_t __pyx_v_itemsize;
-  PyObject *__pyx_v_format = 0;
-  PyObject *__pyx_v_mode = 0;
-  int __pyx_v_allocate_buffer;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
-    PyObject* values[5] = {0,0,0,0,0};
-    values[3] = ((PyObject *)__pyx_n_s_c);
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
-        CYTHON_FALLTHROUGH;
-        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
-        CYTHON_FALLTHROUGH;
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        CYTHON_FALLTHROUGH;
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(2, 122, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  2:
-        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(2, 122, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  3:
-        if (kw_args > 0) {
-          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
-          if (value) { values[3] = value; kw_args--; }
-        }
-        CYTHON_FALLTHROUGH;
-        case  4:
-        if (kw_args > 0) {
-          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer);
-          if (value) { values[4] = value; kw_args--; }
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(2, 122, __pyx_L3_error)
-      }
-    } else {
-      switch (PyTuple_GET_SIZE(__pyx_args)) {
-        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
-        CYTHON_FALLTHROUGH;
-        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
-        CYTHON_FALLTHROUGH;
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-    }
-    __pyx_v_shape = ((PyObject*)values[0]);
-    __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 122, __pyx_L3_error)
-    __pyx_v_format = values[2];
-    __pyx_v_mode = values[3];
-    if (values[4]) {
-      __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 123, __pyx_L3_error)
-    } else {
-
-      /* "View.MemoryView":123
- * 
- *     def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
- *                   mode="c", bint allocate_buffer=True):             # <<<<<<<<<<<<<<
- * 
- *         cdef int idx
- */
-      __pyx_v_allocate_buffer = ((int)1);
-    }
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 122, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return -1;
-  __pyx_L4_argument_unpacking_done:;
-  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(2, 122, __pyx_L1_error)
-  if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
-    PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(2, 122, __pyx_L1_error)
-  }
-  __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
-
-  /* "View.MemoryView":122
- *         cdef bint dtype_is_object
- * 
- *     def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,             # <<<<<<<<<<<<<<
- *                   mode="c", bint allocate_buffer=True):
- * 
- */
-
-  /* function exit code */
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __pyx_r = -1;
-  __pyx_L0:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
-  int __pyx_v_idx;
-  Py_ssize_t __pyx_v_i;
-  Py_ssize_t __pyx_v_dim;
-  PyObject **__pyx_v_p;
-  char __pyx_v_order;
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  Py_ssize_t __pyx_t_1;
-  int __pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_t_4;
-  PyObject *__pyx_t_5 = NULL;
-  PyObject *__pyx_t_6 = NULL;
-  char *__pyx_t_7;
-  int __pyx_t_8;
-  Py_ssize_t __pyx_t_9;
-  PyObject *__pyx_t_10 = NULL;
-  Py_ssize_t __pyx_t_11;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__cinit__", 0);
-  __Pyx_INCREF(__pyx_v_format);
-
-  /* "View.MemoryView":129
- *         cdef PyObject **p
- * 
- *         self.ndim = <int> len(shape)             # <<<<<<<<<<<<<<
- *         self.itemsize = itemsize
- * 
- */
-  if (unlikely(__pyx_v_shape == Py_None)) {
-    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
-    __PYX_ERR(2, 129, __pyx_L1_error)
-  }
-  __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(2, 129, __pyx_L1_error)
-  __pyx_v_self->ndim = ((int)__pyx_t_1);
-
-  /* "View.MemoryView":130
- * 
- *         self.ndim = <int> len(shape)
- *         self.itemsize = itemsize             # <<<<<<<<<<<<<<
- * 
- *         if not self.ndim:
- */
-  __pyx_v_self->itemsize = __pyx_v_itemsize;
-
-  /* "View.MemoryView":132
- *         self.itemsize = itemsize
- * 
- *         if not self.ndim:             # <<<<<<<<<<<<<<
- *             raise ValueError("Empty shape tuple for cython.array")
- * 
- */
-  __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
-  if (unlikely(__pyx_t_2)) {
-
-    /* "View.MemoryView":133
- * 
- *         if not self.ndim:
- *             raise ValueError("Empty shape tuple for cython.array")             # <<<<<<<<<<<<<<
- * 
- *         if itemsize <= 0:
- */
-    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 133, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __PYX_ERR(2, 133, __pyx_L1_error)
-
-    /* "View.MemoryView":132
- *         self.itemsize = itemsize
- * 
- *         if not self.ndim:             # <<<<<<<<<<<<<<
- *             raise ValueError("Empty shape tuple for cython.array")
- * 
- */
-  }
-
-  /* "View.MemoryView":135
- *             raise ValueError("Empty shape tuple for cython.array")
- * 
- *         if itemsize <= 0:             # <<<<<<<<<<<<<<
- *             raise ValueError("itemsize <= 0 for cython.array")
- * 
- */
-  __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
-  if (unlikely(__pyx_t_2)) {
-
-    /* "View.MemoryView":136
- * 
- *         if itemsize <= 0:
- *             raise ValueError("itemsize <= 0 for cython.array")             # <<<<<<<<<<<<<<
- * 
- *         if not isinstance(format, bytes):
- */
-    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 136, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __PYX_ERR(2, 136, __pyx_L1_error)
-
-    /* "View.MemoryView":135
- *             raise ValueError("Empty shape tuple for cython.array")
- * 
- *         if itemsize <= 0:             # <<<<<<<<<<<<<<
- *             raise ValueError("itemsize <= 0 for cython.array")
- * 
- */
-  }
-
-  /* "View.MemoryView":138
- *             raise ValueError("itemsize <= 0 for cython.array")
- * 
- *         if not isinstance(format, bytes):             # <<<<<<<<<<<<<<
- *             format = format.encode('ASCII')
- *         self._format = format  # keep a reference to the byte string
- */
-  __pyx_t_2 = PyBytes_Check(__pyx_v_format); 
-  __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
-  if (__pyx_t_4) {
-
-    /* "View.MemoryView":139
- * 
- *         if not isinstance(format, bytes):
- *             format = format.encode('ASCII')             # <<<<<<<<<<<<<<
- *         self._format = format  # keep a reference to the byte string
- *         self.format = self._format
- */
-    __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 139, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __pyx_t_6 = NULL;
-    if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
-      __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
-      if (likely(__pyx_t_6)) {
-        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
-        __Pyx_INCREF(__pyx_t_6);
-        __Pyx_INCREF(function);
-        __Pyx_DECREF_SET(__pyx_t_5, function);
-      }
-    }
-    __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII);
-    __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
-    if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 139, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
-    __pyx_t_3 = 0;
-
-    /* "View.MemoryView":138
- *             raise ValueError("itemsize <= 0 for cython.array")
- * 
- *         if not isinstance(format, bytes):             # <<<<<<<<<<<<<<
- *             format = format.encode('ASCII')
- *         self._format = format  # keep a reference to the byte string
- */
-  }
-
-  /* "View.MemoryView":140
- *         if not isinstance(format, bytes):
- *             format = format.encode('ASCII')
- *         self._format = format  # keep a reference to the byte string             # <<<<<<<<<<<<<<
- *         self.format = self._format
- * 
- */
-  if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(2, 140, __pyx_L1_error)
-  __pyx_t_3 = __pyx_v_format;
-  __Pyx_INCREF(__pyx_t_3);
-  __Pyx_GIVEREF(__pyx_t_3);
-  __Pyx_GOTREF(__pyx_v_self->_format);
-  __Pyx_DECREF(__pyx_v_self->_format);
-  __pyx_v_self->_format = ((PyObject*)__pyx_t_3);
-  __pyx_t_3 = 0;
-
-  /* "View.MemoryView":141
- *             format = format.encode('ASCII')
- *         self._format = format  # keep a reference to the byte string
- *         self.format = self._format             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  if (unlikely(__pyx_v_self->_format == Py_None)) {
-    PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
-    __PYX_ERR(2, 141, __pyx_L1_error)
-  }
-  __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(2, 141, __pyx_L1_error)
-  __pyx_v_self->format = __pyx_t_7;
-
-  /* "View.MemoryView":144
- * 
- * 
- *         self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)             # <<<<<<<<<<<<<<
- *         self._strides = self._shape + self.ndim
- * 
- */
-  __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
-
-  /* "View.MemoryView":145
- * 
- *         self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
- *         self._strides = self._shape + self.ndim             # <<<<<<<<<<<<<<
- * 
- *         if not self._shape:
- */
-  __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
-
-  /* "View.MemoryView":147
- *         self._strides = self._shape + self.ndim
- * 
- *         if not self._shape:             # <<<<<<<<<<<<<<
- *             raise MemoryError("unable to allocate shape and strides.")
- * 
- */
-  __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
-  if (unlikely(__pyx_t_4)) {
-
-    /* "View.MemoryView":148
- * 
- *         if not self._shape:
- *             raise MemoryError("unable to allocate shape and strides.")             # <<<<<<<<<<<<<<
- * 
- * 
- */
-    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 148, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __PYX_ERR(2, 148, __pyx_L1_error)
-
-    /* "View.MemoryView":147
- *         self._strides = self._shape + self.ndim
- * 
- *         if not self._shape:             # <<<<<<<<<<<<<<
- *             raise MemoryError("unable to allocate shape and strides.")
- * 
- */
-  }
-
-  /* "View.MemoryView":151
- * 
- * 
- *         for idx, dim in enumerate(shape):             # <<<<<<<<<<<<<<
- *             if dim <= 0:
- *                 raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
- */
-  __pyx_t_8 = 0;
-  __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
-  for (;;) {
-    if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
-    #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-    __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(2, 151, __pyx_L1_error)
-    #else
-    __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 151, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    #endif
-    __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 151, __pyx_L1_error)
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __pyx_v_dim = __pyx_t_9;
-    __pyx_v_idx = __pyx_t_8;
-    __pyx_t_8 = (__pyx_t_8 + 1);
-
-    /* "View.MemoryView":152
- * 
- *         for idx, dim in enumerate(shape):
- *             if dim <= 0:             # <<<<<<<<<<<<<<
- *                 raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
- *             self._shape[idx] = dim
- */
-    __pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
-    if (unlikely(__pyx_t_4)) {
-
-      /* "View.MemoryView":153
- *         for idx, dim in enumerate(shape):
- *             if dim <= 0:
- *                 raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))             # <<<<<<<<<<<<<<
- *             self._shape[idx] = dim
- * 
- */
-      __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 153, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_5);
-      __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 153, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_6);
-      __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 153, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_10);
-      __Pyx_GIVEREF(__pyx_t_5);
-      PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5);
-      __Pyx_GIVEREF(__pyx_t_6);
-      PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6);
-      __pyx_t_5 = 0;
-      __pyx_t_6 = 0;
-      __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 153, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_6);
-      __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-      __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 153, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_10);
-      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-      __Pyx_Raise(__pyx_t_10, 0, 0, 0);
-      __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-      __PYX_ERR(2, 153, __pyx_L1_error)
-
-      /* "View.MemoryView":152
- * 
- *         for idx, dim in enumerate(shape):
- *             if dim <= 0:             # <<<<<<<<<<<<<<
- *                 raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
- *             self._shape[idx] = dim
- */
-    }
-
-    /* "View.MemoryView":154
- *             if dim <= 0:
- *                 raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
- *             self._shape[idx] = dim             # <<<<<<<<<<<<<<
- * 
- *         cdef char order
- */
-    (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
-
-    /* "View.MemoryView":151
- * 
- * 
- *         for idx, dim in enumerate(shape):             # <<<<<<<<<<<<<<
- *             if dim <= 0:
- *                 raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
- */
-  }
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "View.MemoryView":157
- * 
- *         cdef char order
- *         if mode == 'fortran':             # <<<<<<<<<<<<<<
- *             order = b'F'
- *             self.mode = u'fortran'
- */
-  __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(2, 157, __pyx_L1_error)
-  if (__pyx_t_4) {
-
-    /* "View.MemoryView":158
- *         cdef char order
- *         if mode == 'fortran':
- *             order = b'F'             # <<<<<<<<<<<<<<
- *             self.mode = u'fortran'
- *         elif mode == 'c':
- */
-    __pyx_v_order = 'F';
-
-    /* "View.MemoryView":159
- *         if mode == 'fortran':
- *             order = b'F'
- *             self.mode = u'fortran'             # <<<<<<<<<<<<<<
- *         elif mode == 'c':
- *             order = b'C'
- */
-    __Pyx_INCREF(__pyx_n_u_fortran);
-    __Pyx_GIVEREF(__pyx_n_u_fortran);
-    __Pyx_GOTREF(__pyx_v_self->mode);
-    __Pyx_DECREF(__pyx_v_self->mode);
-    __pyx_v_self->mode = __pyx_n_u_fortran;
-
-    /* "View.MemoryView":157
- * 
- *         cdef char order
- *         if mode == 'fortran':             # <<<<<<<<<<<<<<
- *             order = b'F'
- *             self.mode = u'fortran'
- */
-    goto __pyx_L10;
-  }
-
-  /* "View.MemoryView":160
- *             order = b'F'
- *             self.mode = u'fortran'
- *         elif mode == 'c':             # <<<<<<<<<<<<<<
- *             order = b'C'
- *             self.mode = u'c'
- */
-  __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(2, 160, __pyx_L1_error)
-  if (likely(__pyx_t_4)) {
-
-    /* "View.MemoryView":161
- *             self.mode = u'fortran'
- *         elif mode == 'c':
- *             order = b'C'             # <<<<<<<<<<<<<<
- *             self.mode = u'c'
- *         else:
- */
-    __pyx_v_order = 'C';
-
-    /* "View.MemoryView":162
- *         elif mode == 'c':
- *             order = b'C'
- *             self.mode = u'c'             # <<<<<<<<<<<<<<
- *         else:
- *             raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
- */
-    __Pyx_INCREF(__pyx_n_u_c);
-    __Pyx_GIVEREF(__pyx_n_u_c);
-    __Pyx_GOTREF(__pyx_v_self->mode);
-    __Pyx_DECREF(__pyx_v_self->mode);
-    __pyx_v_self->mode = __pyx_n_u_c;
-
-    /* "View.MemoryView":160
- *             order = b'F'
- *             self.mode = u'fortran'
- *         elif mode == 'c':             # <<<<<<<<<<<<<<
- *             order = b'C'
- *             self.mode = u'c'
- */
-    goto __pyx_L10;
-  }
-
-  /* "View.MemoryView":164
- *             self.mode = u'c'
- *         else:
- *             raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)             # <<<<<<<<<<<<<<
- * 
- *         self.len = fill_contig_strides_array(self._shape, self._strides,
- */
-  /*else*/ {
-    __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 164, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 164, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_10);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __Pyx_Raise(__pyx_t_10, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-    __PYX_ERR(2, 164, __pyx_L1_error)
-  }
-  __pyx_L10:;
-
-  /* "View.MemoryView":166
- *             raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
- * 
- *         self.len = fill_contig_strides_array(self._shape, self._strides,             # <<<<<<<<<<<<<<
- *                                              itemsize, self.ndim, order)
- * 
- */
-  __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
-
-  /* "View.MemoryView":169
- *                                              itemsize, self.ndim, order)
- * 
- *         self.free_data = allocate_buffer             # <<<<<<<<<<<<<<
- *         self.dtype_is_object = format == b'O'
- *         if allocate_buffer:
- */
-  __pyx_v_self->free_data = __pyx_v_allocate_buffer;
-
-  /* "View.MemoryView":170
- * 
- *         self.free_data = allocate_buffer
- *         self.dtype_is_object = format == b'O'             # <<<<<<<<<<<<<<
- *         if allocate_buffer:
- * 
- */
-  __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 170, __pyx_L1_error)
-  __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 170, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-  __pyx_v_self->dtype_is_object = __pyx_t_4;
-
-  /* "View.MemoryView":171
- *         self.free_data = allocate_buffer
- *         self.dtype_is_object = format == b'O'
- *         if allocate_buffer:             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_t_4 = (__pyx_v_allocate_buffer != 0);
-  if (__pyx_t_4) {
-
-    /* "View.MemoryView":174
- * 
- * 
- *             self.data = <char *>malloc(self.len)             # <<<<<<<<<<<<<<
- *             if not self.data:
- *                 raise MemoryError("unable to allocate array data.")
- */
-    __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
-
-    /* "View.MemoryView":175
- * 
- *             self.data = <char *>malloc(self.len)
- *             if not self.data:             # <<<<<<<<<<<<<<
- *                 raise MemoryError("unable to allocate array data.")
- * 
- */
-    __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
-    if (unlikely(__pyx_t_4)) {
-
-      /* "View.MemoryView":176
- *             self.data = <char *>malloc(self.len)
- *             if not self.data:
- *                 raise MemoryError("unable to allocate array data.")             # <<<<<<<<<<<<<<
- * 
- *             if self.dtype_is_object:
- */
-      __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 176, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_10);
-      __Pyx_Raise(__pyx_t_10, 0, 0, 0);
-      __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-      __PYX_ERR(2, 176, __pyx_L1_error)
-
-      /* "View.MemoryView":175
- * 
- *             self.data = <char *>malloc(self.len)
- *             if not self.data:             # <<<<<<<<<<<<<<
- *                 raise MemoryError("unable to allocate array data.")
- * 
- */
-    }
-
-    /* "View.MemoryView":178
- *                 raise MemoryError("unable to allocate array data.")
- * 
- *             if self.dtype_is_object:             # <<<<<<<<<<<<<<
- *                 p = <PyObject **> self.data
- *                 for i in range(self.len / itemsize):
- */
-    __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
-    if (__pyx_t_4) {
-
-      /* "View.MemoryView":179
- * 
- *             if self.dtype_is_object:
- *                 p = <PyObject **> self.data             # <<<<<<<<<<<<<<
- *                 for i in range(self.len / itemsize):
- *                     p[i] = Py_None
- */
-      __pyx_v_p = ((PyObject **)__pyx_v_self->data);
-
-      /* "View.MemoryView":180
- *             if self.dtype_is_object:
- *                 p = <PyObject **> self.data
- *                 for i in range(self.len / itemsize):             # <<<<<<<<<<<<<<
- *                     p[i] = Py_None
- *                     Py_INCREF(Py_None)
- */
-      if (unlikely(__pyx_v_itemsize == 0)) {
-        PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
-        __PYX_ERR(2, 180, __pyx_L1_error)
-      }
-      else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1)  && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
-        PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
-        __PYX_ERR(2, 180, __pyx_L1_error)
-      }
-      __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
-      __pyx_t_9 = __pyx_t_1;
-      for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) {
-        __pyx_v_i = __pyx_t_11;
-
-        /* "View.MemoryView":181
- *                 p = <PyObject **> self.data
- *                 for i in range(self.len / itemsize):
- *                     p[i] = Py_None             # <<<<<<<<<<<<<<
- *                     Py_INCREF(Py_None)
- * 
- */
-        (__pyx_v_p[__pyx_v_i]) = Py_None;
-
-        /* "View.MemoryView":182
- *                 for i in range(self.len / itemsize):
- *                     p[i] = Py_None
- *                     Py_INCREF(Py_None)             # <<<<<<<<<<<<<<
- * 
- *     @cname('getbuffer')
- */
-        Py_INCREF(Py_None);
-      }
-
-      /* "View.MemoryView":178
- *                 raise MemoryError("unable to allocate array data.")
- * 
- *             if self.dtype_is_object:             # <<<<<<<<<<<<<<
- *                 p = <PyObject **> self.data
- *                 for i in range(self.len / itemsize):
- */
-    }
-
-    /* "View.MemoryView":171
- *         self.free_data = allocate_buffer
- *         self.dtype_is_object = format == b'O'
- *         if allocate_buffer:             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  }
-
-  /* "View.MemoryView":122
- *         cdef bint dtype_is_object
- * 
- *     def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,             # <<<<<<<<<<<<<<
- *                   mode="c", bint allocate_buffer=True):
- * 
- */
-
-  /* function exit code */
-  __pyx_r = 0;
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_10);
-  __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = -1;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_format);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":185
- * 
- *     @cname('getbuffer')
- *     def __getbuffer__(self, Py_buffer *info, int flags):             # <<<<<<<<<<<<<<
- *         cdef int bufmode = -1
- *         if self.mode == u"c":
- */
-
-/* Python wrapper */
-static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
-static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
-  __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
-  int __pyx_v_bufmode;
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  int __pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  char *__pyx_t_4;
-  Py_ssize_t __pyx_t_5;
-  int __pyx_t_6;
-  Py_ssize_t *__pyx_t_7;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  if (__pyx_v_info == NULL) {
-    PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
-    return -1;
-  }
-  __Pyx_RefNannySetupContext("__getbuffer__", 0);
-  __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
-  __Pyx_GIVEREF(__pyx_v_info->obj);
-
-  /* "View.MemoryView":186
- *     @cname('getbuffer')
- *     def __getbuffer__(self, Py_buffer *info, int flags):
- *         cdef int bufmode = -1             # <<<<<<<<<<<<<<
- *         if self.mode == u"c":
- *             bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- */
-  __pyx_v_bufmode = -1;
-
-  /* "View.MemoryView":187
- *     def __getbuffer__(self, Py_buffer *info, int flags):
- *         cdef int bufmode = -1
- *         if self.mode == u"c":             # <<<<<<<<<<<<<<
- *             bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- *         elif self.mode == u"fortran":
- */
-  __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 187, __pyx_L1_error)
-  __pyx_t_2 = (__pyx_t_1 != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":188
- *         cdef int bufmode = -1
- *         if self.mode == u"c":
- *             bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS             # <<<<<<<<<<<<<<
- *         elif self.mode == u"fortran":
- *             bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- */
-    __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
-
-    /* "View.MemoryView":187
- *     def __getbuffer__(self, Py_buffer *info, int flags):
- *         cdef int bufmode = -1
- *         if self.mode == u"c":             # <<<<<<<<<<<<<<
- *             bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- *         elif self.mode == u"fortran":
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":189
- *         if self.mode == u"c":
- *             bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- *         elif self.mode == u"fortran":             # <<<<<<<<<<<<<<
- *             bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- *         if not (flags & bufmode):
- */
-  __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(2, 189, __pyx_L1_error)
-  __pyx_t_1 = (__pyx_t_2 != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":190
- *             bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- *         elif self.mode == u"fortran":
- *             bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS             # <<<<<<<<<<<<<<
- *         if not (flags & bufmode):
- *             raise ValueError("Can only create a buffer that is contiguous in memory.")
- */
-    __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
-
-    /* "View.MemoryView":189
- *         if self.mode == u"c":
- *             bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- *         elif self.mode == u"fortran":             # <<<<<<<<<<<<<<
- *             bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- *         if not (flags & bufmode):
- */
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":191
- *         elif self.mode == u"fortran":
- *             bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- *         if not (flags & bufmode):             # <<<<<<<<<<<<<<
- *             raise ValueError("Can only create a buffer that is contiguous in memory.")
- *         info.buf = self.data
- */
-  __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
-  if (unlikely(__pyx_t_1)) {
-
-    /* "View.MemoryView":192
- *             bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- *         if not (flags & bufmode):
- *             raise ValueError("Can only create a buffer that is contiguous in memory.")             # <<<<<<<<<<<<<<
- *         info.buf = self.data
- *         info.len = self.len
- */
-    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 192, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __PYX_ERR(2, 192, __pyx_L1_error)
-
-    /* "View.MemoryView":191
- *         elif self.mode == u"fortran":
- *             bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- *         if not (flags & bufmode):             # <<<<<<<<<<<<<<
- *             raise ValueError("Can only create a buffer that is contiguous in memory.")
- *         info.buf = self.data
- */
-  }
-
-  /* "View.MemoryView":193
- *         if not (flags & bufmode):
- *             raise ValueError("Can only create a buffer that is contiguous in memory.")
- *         info.buf = self.data             # <<<<<<<<<<<<<<
- *         info.len = self.len
- *         info.ndim = self.ndim
- */
-  __pyx_t_4 = __pyx_v_self->data;
-  __pyx_v_info->buf = __pyx_t_4;
-
-  /* "View.MemoryView":194
- *             raise ValueError("Can only create a buffer that is contiguous in memory.")
- *         info.buf = self.data
- *         info.len = self.len             # <<<<<<<<<<<<<<
- *         info.ndim = self.ndim
- *         info.shape = self._shape
- */
-  __pyx_t_5 = __pyx_v_self->len;
-  __pyx_v_info->len = __pyx_t_5;
-
-  /* "View.MemoryView":195
- *         info.buf = self.data
- *         info.len = self.len
- *         info.ndim = self.ndim             # <<<<<<<<<<<<<<
- *         info.shape = self._shape
- *         info.strides = self._strides
- */
-  __pyx_t_6 = __pyx_v_self->ndim;
-  __pyx_v_info->ndim = __pyx_t_6;
-
-  /* "View.MemoryView":196
- *         info.len = self.len
- *         info.ndim = self.ndim
- *         info.shape = self._shape             # <<<<<<<<<<<<<<
- *         info.strides = self._strides
- *         info.suboffsets = NULL
- */
-  __pyx_t_7 = __pyx_v_self->_shape;
-  __pyx_v_info->shape = __pyx_t_7;
-
-  /* "View.MemoryView":197
- *         info.ndim = self.ndim
- *         info.shape = self._shape
- *         info.strides = self._strides             # <<<<<<<<<<<<<<
- *         info.suboffsets = NULL
- *         info.itemsize = self.itemsize
- */
-  __pyx_t_7 = __pyx_v_self->_strides;
-  __pyx_v_info->strides = __pyx_t_7;
-
-  /* "View.MemoryView":198
- *         info.shape = self._shape
- *         info.strides = self._strides
- *         info.suboffsets = NULL             # <<<<<<<<<<<<<<
- *         info.itemsize = self.itemsize
- *         info.readonly = 0
- */
-  __pyx_v_info->suboffsets = NULL;
-
-  /* "View.MemoryView":199
- *         info.strides = self._strides
- *         info.suboffsets = NULL
- *         info.itemsize = self.itemsize             # <<<<<<<<<<<<<<
- *         info.readonly = 0
- * 
- */
-  __pyx_t_5 = __pyx_v_self->itemsize;
-  __pyx_v_info->itemsize = __pyx_t_5;
-
-  /* "View.MemoryView":200
- *         info.suboffsets = NULL
- *         info.itemsize = self.itemsize
- *         info.readonly = 0             # <<<<<<<<<<<<<<
- * 
- *         if flags & PyBUF_FORMAT:
- */
-  __pyx_v_info->readonly = 0;
-
-  /* "View.MemoryView":202
- *         info.readonly = 0
- * 
- *         if flags & PyBUF_FORMAT:             # <<<<<<<<<<<<<<
- *             info.format = self.format
- *         else:
- */
-  __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":203
- * 
- *         if flags & PyBUF_FORMAT:
- *             info.format = self.format             # <<<<<<<<<<<<<<
- *         else:
- *             info.format = NULL
- */
-    __pyx_t_4 = __pyx_v_self->format;
-    __pyx_v_info->format = __pyx_t_4;
-
-    /* "View.MemoryView":202
- *         info.readonly = 0
- * 
- *         if flags & PyBUF_FORMAT:             # <<<<<<<<<<<<<<
- *             info.format = self.format
- *         else:
- */
-    goto __pyx_L5;
-  }
-
-  /* "View.MemoryView":205
- *             info.format = self.format
- *         else:
- *             info.format = NULL             # <<<<<<<<<<<<<<
- * 
- *         info.obj = self
- */
-  /*else*/ {
-    __pyx_v_info->format = NULL;
-  }
-  __pyx_L5:;
-
-  /* "View.MemoryView":207
- *             info.format = NULL
- * 
- *         info.obj = self             # <<<<<<<<<<<<<<
- * 
- *     __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
- */
-  __Pyx_INCREF(((PyObject *)__pyx_v_self));
-  __Pyx_GIVEREF(((PyObject *)__pyx_v_self));
-  __Pyx_GOTREF(__pyx_v_info->obj);
-  __Pyx_DECREF(__pyx_v_info->obj);
-  __pyx_v_info->obj = ((PyObject *)__pyx_v_self);
-
-  /* "View.MemoryView":185
- * 
- *     @cname('getbuffer')
- *     def __getbuffer__(self, Py_buffer *info, int flags):             # <<<<<<<<<<<<<<
- *         cdef int bufmode = -1
- *         if self.mode == u"c":
- */
-
-  /* function exit code */
-  __pyx_r = 0;
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = -1;
-  if (__pyx_v_info->obj != NULL) {
-    __Pyx_GOTREF(__pyx_v_info->obj);
-    __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
-  }
-  goto __pyx_L2;
-  __pyx_L0:;
-  if (__pyx_v_info->obj == Py_None) {
-    __Pyx_GOTREF(__pyx_v_info->obj);
-    __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
-  }
-  __pyx_L2:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":211
- *     __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
- * 
- *     def __dealloc__(array self):             # <<<<<<<<<<<<<<
- *         if self.callback_free_data != NULL:
- *             self.callback_free_data(self.data)
- */
-
-/* Python wrapper */
-static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
-static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
-  __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-}
-
-static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  __Pyx_RefNannySetupContext("__dealloc__", 0);
-
-  /* "View.MemoryView":212
- * 
- *     def __dealloc__(array self):
- *         if self.callback_free_data != NULL:             # <<<<<<<<<<<<<<
- *             self.callback_free_data(self.data)
- *         elif self.free_data:
- */
-  __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":213
- *     def __dealloc__(array self):
- *         if self.callback_free_data != NULL:
- *             self.callback_free_data(self.data)             # <<<<<<<<<<<<<<
- *         elif self.free_data:
- *             if self.dtype_is_object:
- */
-    __pyx_v_self->callback_free_data(__pyx_v_self->data);
-
-    /* "View.MemoryView":212
- * 
- *     def __dealloc__(array self):
- *         if self.callback_free_data != NULL:             # <<<<<<<<<<<<<<
- *             self.callback_free_data(self.data)
- *         elif self.free_data:
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":214
- *         if self.callback_free_data != NULL:
- *             self.callback_free_data(self.data)
- *         elif self.free_data:             # <<<<<<<<<<<<<<
- *             if self.dtype_is_object:
- *                 refcount_objects_in_slice(self.data, self._shape,
- */
-  __pyx_t_1 = (__pyx_v_self->free_data != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":215
- *             self.callback_free_data(self.data)
- *         elif self.free_data:
- *             if self.dtype_is_object:             # <<<<<<<<<<<<<<
- *                 refcount_objects_in_slice(self.data, self._shape,
- *                                           self._strides, self.ndim, False)
- */
-    __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
-    if (__pyx_t_1) {
-
-      /* "View.MemoryView":216
- *         elif self.free_data:
- *             if self.dtype_is_object:
- *                 refcount_objects_in_slice(self.data, self._shape,             # <<<<<<<<<<<<<<
- *                                           self._strides, self.ndim, False)
- *             free(self.data)
- */
-      __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
-
-      /* "View.MemoryView":215
- *             self.callback_free_data(self.data)
- *         elif self.free_data:
- *             if self.dtype_is_object:             # <<<<<<<<<<<<<<
- *                 refcount_objects_in_slice(self.data, self._shape,
- *                                           self._strides, self.ndim, False)
- */
-    }
-
-    /* "View.MemoryView":218
- *                 refcount_objects_in_slice(self.data, self._shape,
- *                                           self._strides, self.ndim, False)
- *             free(self.data)             # <<<<<<<<<<<<<<
- *         PyObject_Free(self._shape)
- * 
- */
-    free(__pyx_v_self->data);
-
-    /* "View.MemoryView":214
- *         if self.callback_free_data != NULL:
- *             self.callback_free_data(self.data)
- *         elif self.free_data:             # <<<<<<<<<<<<<<
- *             if self.dtype_is_object:
- *                 refcount_objects_in_slice(self.data, self._shape,
- */
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":219
- *                                           self._strides, self.ndim, False)
- *             free(self.data)
- *         PyObject_Free(self._shape)             # <<<<<<<<<<<<<<
- * 
- *     @property
- */
-  PyObject_Free(__pyx_v_self->_shape);
-
-  /* "View.MemoryView":211
- *     __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
- * 
- *     def __dealloc__(array self):             # <<<<<<<<<<<<<<
- *         if self.callback_free_data != NULL:
- *             self.callback_free_data(self.data)
- */
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-}
-
-/* "View.MemoryView":222
- * 
- *     @property
- *     def memview(self):             # <<<<<<<<<<<<<<
- *         return self.get_memview()
- * 
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
-  __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__get__", 0);
-
-  /* "View.MemoryView":223
- *     @property
- *     def memview(self):
- *         return self.get_memview()             # <<<<<<<<<<<<<<
- * 
- *     @cname('get_memview')
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 223, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":222
- * 
- *     @property
- *     def memview(self):             # <<<<<<<<<<<<<<
- *         return self.get_memview()
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":226
- * 
- *     @cname('get_memview')
- *     cdef get_memview(self):             # <<<<<<<<<<<<<<
- *         flags =  PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
- *         return  memoryview(self, flags, self.dtype_is_object)
- */
-
-static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
-  int __pyx_v_flags;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("get_memview", 0);
-
-  /* "View.MemoryView":227
- *     @cname('get_memview')
- *     cdef get_memview(self):
- *         flags =  PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE             # <<<<<<<<<<<<<<
- *         return  memoryview(self, flags, self.dtype_is_object)
- * 
- */
-  __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
-
-  /* "View.MemoryView":228
- *     cdef get_memview(self):
- *         flags =  PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
- *         return  memoryview(self, flags, self.dtype_is_object)             # <<<<<<<<<<<<<<
- * 
- *     def __len__(self):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 228, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 228, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 228, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_INCREF(((PyObject *)__pyx_v_self));
-  __Pyx_GIVEREF(((PyObject *)__pyx_v_self));
-  PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_2);
-  PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
-  __pyx_t_1 = 0;
-  __pyx_t_2 = 0;
-  __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 228, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_r = __pyx_t_2;
-  __pyx_t_2 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":226
- * 
- *     @cname('get_memview')
- *     cdef get_memview(self):             # <<<<<<<<<<<<<<
- *         flags =  PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
- *         return  memoryview(self, flags, self.dtype_is_object)
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":230
- *         return  memoryview(self, flags, self.dtype_is_object)
- * 
- *     def __len__(self):             # <<<<<<<<<<<<<<
- *         return self._shape[0]
- * 
- */
-
-/* Python wrapper */
-static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
-static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
-  Py_ssize_t __pyx_r;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
-  __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
-  Py_ssize_t __pyx_r;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__len__", 0);
-
-  /* "View.MemoryView":231
- * 
- *     def __len__(self):
- *         return self._shape[0]             # <<<<<<<<<<<<<<
- * 
- *     def __getattr__(self, attr):
- */
-  __pyx_r = (__pyx_v_self->_shape[0]);
-  goto __pyx_L0;
-
-  /* "View.MemoryView":230
- *         return  memoryview(self, flags, self.dtype_is_object)
- * 
- *     def __len__(self):             # <<<<<<<<<<<<<<
- *         return self._shape[0]
- * 
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":233
- *         return self._shape[0]
- * 
- *     def __getattr__(self, attr):             # <<<<<<<<<<<<<<
- *         return getattr(self.memview, attr)
- * 
- */
-
-/* Python wrapper */
-static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
-static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
-  __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__getattr__", 0);
-
-  /* "View.MemoryView":234
- * 
- *     def __getattr__(self, attr):
- *         return getattr(self.memview, attr)             # <<<<<<<<<<<<<<
- * 
- *     def __getitem__(self, item):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 234, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 234, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_r = __pyx_t_2;
-  __pyx_t_2 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":233
- *         return self._shape[0]
- * 
- *     def __getattr__(self, attr):             # <<<<<<<<<<<<<<
- *         return getattr(self.memview, attr)
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":236
- *         return getattr(self.memview, attr)
- * 
- *     def __getitem__(self, item):             # <<<<<<<<<<<<<<
- *         return self.memview[item]
- * 
- */
-
-/* Python wrapper */
-static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
-static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
-  __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__getitem__", 0);
-
-  /* "View.MemoryView":237
- * 
- *     def __getitem__(self, item):
- *         return self.memview[item]             # <<<<<<<<<<<<<<
- * 
- *     def __setitem__(self, item, value):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 237, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 237, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_r = __pyx_t_2;
-  __pyx_t_2 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":236
- *         return getattr(self.memview, attr)
- * 
- *     def __getitem__(self, item):             # <<<<<<<<<<<<<<
- *         return self.memview[item]
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":239
- *         return self.memview[item]
- * 
- *     def __setitem__(self, item, value):             # <<<<<<<<<<<<<<
- *         self.memview[item] = value
- * 
- */
-
-/* Python wrapper */
-static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
-static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
-  __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__setitem__", 0);
-
-  /* "View.MemoryView":240
- * 
- *     def __setitem__(self, item, value):
- *         self.memview[item] = value             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 240, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(2, 240, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-  /* "View.MemoryView":239
- *         return self.memview[item]
- * 
- *     def __setitem__(self, item, value):             # <<<<<<<<<<<<<<
- *         self.memview[item] = value
- * 
- */
-
-  /* function exit code */
-  __pyx_r = 0;
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = -1;
-  __pyx_L0:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "(tree fragment)":1
- * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
-static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
-  __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__reduce_cython__", 0);
-
-  /* "(tree fragment)":2
- * def __reduce_cython__(self):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")             # <<<<<<<<<<<<<<
- * def __setstate_cython__(self, __pyx_state):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- */
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_Raise(__pyx_t_1, 0, 0, 0);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __PYX_ERR(2, 2, __pyx_L1_error)
-
-  /* "(tree fragment)":1
- * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "(tree fragment)":3
- * def __reduce_cython__(self):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
-static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
-  __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__setstate_cython__", 0);
-
-  /* "(tree fragment)":4
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")             # <<<<<<<<<<<<<<
- */
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_Raise(__pyx_t_1, 0, 0, 0);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __PYX_ERR(2, 4, __pyx_L1_error)
-
-  /* "(tree fragment)":3
- * def __reduce_cython__(self):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":244
- * 
- * @cname("__pyx_array_new")
- * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format,             # <<<<<<<<<<<<<<
- *                           char *mode, char *buf):
- *     cdef array result
- */
-
-static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
-  struct __pyx_array_obj *__pyx_v_result = 0;
-  struct __pyx_array_obj *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("array_cwrapper", 0);
-
-  /* "View.MemoryView":248
- *     cdef array result
- * 
- *     if buf == NULL:             # <<<<<<<<<<<<<<
- *         result = array(shape, itemsize, format, mode.decode('ASCII'))
- *     else:
- */
-  __pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":249
- * 
- *     if buf == NULL:
- *         result = array(shape, itemsize, format, mode.decode('ASCII'))             # <<<<<<<<<<<<<<
- *     else:
- *         result = array(shape, itemsize, format, mode.decode('ASCII'),
- */
-    __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 249, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 249, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 249, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 249, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_INCREF(__pyx_v_shape);
-    __Pyx_GIVEREF(__pyx_v_shape);
-    PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
-    __Pyx_GIVEREF(__pyx_t_2);
-    PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
-    __Pyx_GIVEREF(__pyx_t_3);
-    PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
-    __Pyx_GIVEREF(__pyx_t_4);
-    PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
-    __pyx_t_2 = 0;
-    __pyx_t_3 = 0;
-    __pyx_t_4 = 0;
-    __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 249, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
-    __pyx_t_4 = 0;
-
-    /* "View.MemoryView":248
- *     cdef array result
- * 
- *     if buf == NULL:             # <<<<<<<<<<<<<<
- *         result = array(shape, itemsize, format, mode.decode('ASCII'))
- *     else:
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":251
- *         result = array(shape, itemsize, format, mode.decode('ASCII'))
- *     else:
- *         result = array(shape, itemsize, format, mode.decode('ASCII'),             # <<<<<<<<<<<<<<
- *                        allocate_buffer=False)
- *         result.data = buf
- */
-  /*else*/ {
-    __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 251, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 251, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 251, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 251, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __Pyx_INCREF(__pyx_v_shape);
-    __Pyx_GIVEREF(__pyx_v_shape);
-    PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
-    __Pyx_GIVEREF(__pyx_t_4);
-    PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
-    __Pyx_GIVEREF(__pyx_t_5);
-    PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
-    __Pyx_GIVEREF(__pyx_t_3);
-    PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
-    __pyx_t_4 = 0;
-    __pyx_t_5 = 0;
-    __pyx_t_3 = 0;
-
-    /* "View.MemoryView":252
- *     else:
- *         result = array(shape, itemsize, format, mode.decode('ASCII'),
- *                        allocate_buffer=False)             # <<<<<<<<<<<<<<
- *         result.data = buf
- * 
- */
-    __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 252, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(2, 252, __pyx_L1_error)
-
-    /* "View.MemoryView":251
- *         result = array(shape, itemsize, format, mode.decode('ASCII'))
- *     else:
- *         result = array(shape, itemsize, format, mode.decode('ASCII'),             # <<<<<<<<<<<<<<
- *                        allocate_buffer=False)
- *         result.data = buf
- */
-    __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 251, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
-    __pyx_t_5 = 0;
-
-    /* "View.MemoryView":253
- *         result = array(shape, itemsize, format, mode.decode('ASCII'),
- *                        allocate_buffer=False)
- *         result.data = buf             # <<<<<<<<<<<<<<
- * 
- *     return result
- */
-    __pyx_v_result->data = __pyx_v_buf;
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":255
- *         result.data = buf
- * 
- *     return result             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __Pyx_XDECREF(((PyObject *)__pyx_r));
-  __Pyx_INCREF(((PyObject *)__pyx_v_result));
-  __pyx_r = __pyx_v_result;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":244
- * 
- * @cname("__pyx_array_new")
- * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format,             # <<<<<<<<<<<<<<
- *                           char *mode, char *buf):
- *     cdef array result
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XDECREF((PyObject *)__pyx_v_result);
-  __Pyx_XGIVEREF((PyObject *)__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":281
- * cdef class Enum(object):
- *     cdef object name
- *     def __init__(self, name):             # <<<<<<<<<<<<<<
- *         self.name = name
- *     def __repr__(self):
- */
-
-/* Python wrapper */
-static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_name = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
-    PyObject* values[1] = {0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(2, 281, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-    }
-    __pyx_v_name = values[0];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 281, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return -1;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__init__", 0);
-
-  /* "View.MemoryView":282
- *     cdef object name
- *     def __init__(self, name):
- *         self.name = name             # <<<<<<<<<<<<<<
- *     def __repr__(self):
- *         return self.name
- */
-  __Pyx_INCREF(__pyx_v_name);
-  __Pyx_GIVEREF(__pyx_v_name);
-  __Pyx_GOTREF(__pyx_v_self->name);
-  __Pyx_DECREF(__pyx_v_self->name);
-  __pyx_v_self->name = __pyx_v_name;
-
-  /* "View.MemoryView":281
- * cdef class Enum(object):
- *     cdef object name
- *     def __init__(self, name):             # <<<<<<<<<<<<<<
- *         self.name = name
- *     def __repr__(self):
- */
-
-  /* function exit code */
-  __pyx_r = 0;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":283
- *     def __init__(self, name):
- *         self.name = name
- *     def __repr__(self):             # <<<<<<<<<<<<<<
- *         return self.name
- * 
- */
-
-/* Python wrapper */
-static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
-  __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__repr__", 0);
-
-  /* "View.MemoryView":284
- *         self.name = name
- *     def __repr__(self):
- *         return self.name             # <<<<<<<<<<<<<<
- * 
- * cdef generic = Enum("<strided and direct or indirect>")
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_INCREF(__pyx_v_self->name);
-  __pyx_r = __pyx_v_self->name;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":283
- *     def __init__(self, name):
- *         self.name = name
- *     def __repr__(self):             # <<<<<<<<<<<<<<
- *         return self.name
- * 
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "(tree fragment)":1
- * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
- *     cdef tuple state
- *     cdef object _dict
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
-static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
-  __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
-  PyObject *__pyx_v_state = 0;
-  PyObject *__pyx_v__dict = 0;
-  int __pyx_v_use_setstate;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_t_2;
-  int __pyx_t_3;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__reduce_cython__", 0);
-
-  /* "(tree fragment)":5
- *     cdef object _dict
- *     cdef bint use_setstate
- *     state = (self.name,)             # <<<<<<<<<<<<<<
- *     _dict = getattr(self, '__dict__', None)
- *     if _dict is not None:
- */
-  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 5, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_INCREF(__pyx_v_self->name);
-  __Pyx_GIVEREF(__pyx_v_self->name);
-  PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
-  __pyx_v_state = ((PyObject*)__pyx_t_1);
-  __pyx_t_1 = 0;
-
-  /* "(tree fragment)":6
- *     cdef bint use_setstate
- *     state = (self.name,)
- *     _dict = getattr(self, '__dict__', None)             # <<<<<<<<<<<<<<
- *     if _dict is not None:
- *         state += (_dict,)
- */
-  __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 6, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_v__dict = __pyx_t_1;
-  __pyx_t_1 = 0;
-
-  /* "(tree fragment)":7
- *     state = (self.name,)
- *     _dict = getattr(self, '__dict__', None)
- *     if _dict is not None:             # <<<<<<<<<<<<<<
- *         state += (_dict,)
- *         use_setstate = True
- */
-  __pyx_t_2 = (__pyx_v__dict != Py_None);
-  __pyx_t_3 = (__pyx_t_2 != 0);
-  if (__pyx_t_3) {
-
-    /* "(tree fragment)":8
- *     _dict = getattr(self, '__dict__', None)
- *     if _dict is not None:
- *         state += (_dict,)             # <<<<<<<<<<<<<<
- *         use_setstate = True
- *     else:
- */
-    __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 8, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __Pyx_INCREF(__pyx_v__dict);
-    __Pyx_GIVEREF(__pyx_v__dict);
-    PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
-    __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 8, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
-    __pyx_t_4 = 0;
-
-    /* "(tree fragment)":9
- *     if _dict is not None:
- *         state += (_dict,)
- *         use_setstate = True             # <<<<<<<<<<<<<<
- *     else:
- *         use_setstate = self.name is not None
- */
-    __pyx_v_use_setstate = 1;
-
-    /* "(tree fragment)":7
- *     state = (self.name,)
- *     _dict = getattr(self, '__dict__', None)
- *     if _dict is not None:             # <<<<<<<<<<<<<<
- *         state += (_dict,)
- *         use_setstate = True
- */
-    goto __pyx_L3;
-  }
-
-  /* "(tree fragment)":11
- *         use_setstate = True
- *     else:
- *         use_setstate = self.name is not None             # <<<<<<<<<<<<<<
- *     if use_setstate:
- *         return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
- */
-  /*else*/ {
-    __pyx_t_3 = (__pyx_v_self->name != Py_None);
-    __pyx_v_use_setstate = __pyx_t_3;
-  }
-  __pyx_L3:;
-
-  /* "(tree fragment)":12
- *     else:
- *         use_setstate = self.name is not None
- *     if use_setstate:             # <<<<<<<<<<<<<<
- *         return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
- *     else:
- */
-  __pyx_t_3 = (__pyx_v_use_setstate != 0);
-  if (__pyx_t_3) {
-
-    /* "(tree fragment)":13
- *         use_setstate = self.name is not None
- *     if use_setstate:
- *         return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state             # <<<<<<<<<<<<<<
- *     else:
- *         return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
- */
-    __Pyx_XDECREF(__pyx_r);
-    __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 13, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 13, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
-    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
-    PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
-    __Pyx_INCREF(__pyx_int_184977713);
-    __Pyx_GIVEREF(__pyx_int_184977713);
-    PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
-    __Pyx_INCREF(Py_None);
-    __Pyx_GIVEREF(Py_None);
-    PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
-    __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 13, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_GIVEREF(__pyx_t_4);
-    PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
-    __Pyx_GIVEREF(__pyx_t_1);
-    PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
-    __Pyx_INCREF(__pyx_v_state);
-    __Pyx_GIVEREF(__pyx_v_state);
-    PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
-    __pyx_t_4 = 0;
-    __pyx_t_1 = 0;
-    __pyx_r = __pyx_t_5;
-    __pyx_t_5 = 0;
-    goto __pyx_L0;
-
-    /* "(tree fragment)":12
- *     else:
- *         use_setstate = self.name is not None
- *     if use_setstate:             # <<<<<<<<<<<<<<
- *         return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
- *     else:
- */
-  }
-
-  /* "(tree fragment)":15
- *         return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
- *     else:
- *         return __pyx_unpickle_Enum, (type(self), 0xb068931, state)             # <<<<<<<<<<<<<<
- * def __setstate_cython__(self, __pyx_state):
- *     __pyx_unpickle_Enum__set_state(self, __pyx_state)
- */
-  /*else*/ {
-    __Pyx_XDECREF(__pyx_r);
-    __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 15, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 15, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
-    __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
-    PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
-    __Pyx_INCREF(__pyx_int_184977713);
-    __Pyx_GIVEREF(__pyx_int_184977713);
-    PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
-    __Pyx_INCREF(__pyx_v_state);
-    __Pyx_GIVEREF(__pyx_v_state);
-    PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
-    __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 15, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_GIVEREF(__pyx_t_5);
-    PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
-    __Pyx_GIVEREF(__pyx_t_1);
-    PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
-    __pyx_t_5 = 0;
-    __pyx_t_1 = 0;
-    __pyx_r = __pyx_t_4;
-    __pyx_t_4 = 0;
-    goto __pyx_L0;
-  }
-
-  /* "(tree fragment)":1
- * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
- *     cdef tuple state
- *     cdef object _dict
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_state);
-  __Pyx_XDECREF(__pyx_v__dict);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "(tree fragment)":16
- *     else:
- *         return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
- * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
- *     __pyx_unpickle_Enum__set_state(self, __pyx_state)
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
-static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
-  __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__setstate_cython__", 0);
-
-  /* "(tree fragment)":17
- *         return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
- * def __setstate_cython__(self, __pyx_state):
- *     __pyx_unpickle_Enum__set_state(self, __pyx_state)             # <<<<<<<<<<<<<<
- */
-  if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(2, 17, __pyx_L1_error)
-  __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 17, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-  /* "(tree fragment)":16
- *     else:
- *         return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
- * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
- *     __pyx_unpickle_Enum__set_state(self, __pyx_state)
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":298
- * 
- * @cname('__pyx_align_pointer')
- * cdef void *align_pointer(void *memory, size_t alignment) nogil:             # <<<<<<<<<<<<<<
- *     "Align pointer memory on a given boundary"
- *     cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
- */
-
-static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
-  Py_intptr_t __pyx_v_aligned_p;
-  size_t __pyx_v_offset;
-  void *__pyx_r;
-  int __pyx_t_1;
-
-  /* "View.MemoryView":300
- * cdef void *align_pointer(void *memory, size_t alignment) nogil:
- *     "Align pointer memory on a given boundary"
- *     cdef Py_intptr_t aligned_p = <Py_intptr_t> memory             # <<<<<<<<<<<<<<
- *     cdef size_t offset
- * 
- */
-  __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
-
-  /* "View.MemoryView":304
- * 
- *     with cython.cdivision(True):
- *         offset = aligned_p % alignment             # <<<<<<<<<<<<<<
- * 
- *     if offset > 0:
- */
-  __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
-
-  /* "View.MemoryView":306
- *         offset = aligned_p % alignment
- * 
- *     if offset > 0:             # <<<<<<<<<<<<<<
- *         aligned_p += alignment - offset
- * 
- */
-  __pyx_t_1 = ((__pyx_v_offset > 0) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":307
- * 
- *     if offset > 0:
- *         aligned_p += alignment - offset             # <<<<<<<<<<<<<<
- * 
- *     return <void *> aligned_p
- */
-    __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
-
-    /* "View.MemoryView":306
- *         offset = aligned_p % alignment
- * 
- *     if offset > 0:             # <<<<<<<<<<<<<<
- *         aligned_p += alignment - offset
- * 
- */
-  }
-
-  /* "View.MemoryView":309
- *         aligned_p += alignment - offset
- * 
- *     return <void *> aligned_p             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_r = ((void *)__pyx_v_aligned_p);
-  goto __pyx_L0;
-
-  /* "View.MemoryView":298
- * 
- * @cname('__pyx_align_pointer')
- * cdef void *align_pointer(void *memory, size_t alignment) nogil:             # <<<<<<<<<<<<<<
- *     "Align pointer memory on a given boundary"
- *     cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  return __pyx_r;
-}
-
-/* "View.MemoryView":345
- *     cdef __Pyx_TypeInfo *typeinfo
- * 
- *     def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):             # <<<<<<<<<<<<<<
- *         self.obj = obj
- *         self.flags = flags
- */
-
-/* Python wrapper */
-static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v_obj = 0;
-  int __pyx_v_flags;
-  int __pyx_v_dtype_is_object;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
-    PyObject* values[3] = {0,0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        CYTHON_FALLTHROUGH;
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(2, 345, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  2:
-        if (kw_args > 0) {
-          PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object);
-          if (value) { values[2] = value; kw_args--; }
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(2, 345, __pyx_L3_error)
-      }
-    } else {
-      switch (PyTuple_GET_SIZE(__pyx_args)) {
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        CYTHON_FALLTHROUGH;
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-    }
-    __pyx_v_obj = values[0];
-    __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 345, __pyx_L3_error)
-    if (values[2]) {
-      __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 345, __pyx_L3_error)
-    } else {
-      __pyx_v_dtype_is_object = ((int)0);
-    }
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 345, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return -1;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  int __pyx_t_2;
-  int __pyx_t_3;
-  int __pyx_t_4;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__cinit__", 0);
-
-  /* "View.MemoryView":346
- * 
- *     def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
- *         self.obj = obj             # <<<<<<<<<<<<<<
- *         self.flags = flags
- *         if type(self) is memoryview or obj is not None:
- */
-  __Pyx_INCREF(__pyx_v_obj);
-  __Pyx_GIVEREF(__pyx_v_obj);
-  __Pyx_GOTREF(__pyx_v_self->obj);
-  __Pyx_DECREF(__pyx_v_self->obj);
-  __pyx_v_self->obj = __pyx_v_obj;
-
-  /* "View.MemoryView":347
- *     def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
- *         self.obj = obj
- *         self.flags = flags             # <<<<<<<<<<<<<<
- *         if type(self) is memoryview or obj is not None:
- *             __Pyx_GetBuffer(obj, &self.view, flags)
- */
-  __pyx_v_self->flags = __pyx_v_flags;
-
-  /* "View.MemoryView":348
- *         self.obj = obj
- *         self.flags = flags
- *         if type(self) is memoryview or obj is not None:             # <<<<<<<<<<<<<<
- *             __Pyx_GetBuffer(obj, &self.view, flags)
- *             if <PyObject *> self.view.obj == NULL:
- */
-  __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
-  __pyx_t_3 = (__pyx_t_2 != 0);
-  if (!__pyx_t_3) {
-  } else {
-    __pyx_t_1 = __pyx_t_3;
-    goto __pyx_L4_bool_binop_done;
-  }
-  __pyx_t_3 = (__pyx_v_obj != Py_None);
-  __pyx_t_2 = (__pyx_t_3 != 0);
-  __pyx_t_1 = __pyx_t_2;
-  __pyx_L4_bool_binop_done:;
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":349
- *         self.flags = flags
- *         if type(self) is memoryview or obj is not None:
- *             __Pyx_GetBuffer(obj, &self.view, flags)             # <<<<<<<<<<<<<<
- *             if <PyObject *> self.view.obj == NULL:
- *                 (<__pyx_buffer *> &self.view).obj = Py_None
- */
-    __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 349, __pyx_L1_error)
-
-    /* "View.MemoryView":350
- *         if type(self) is memoryview or obj is not None:
- *             __Pyx_GetBuffer(obj, &self.view, flags)
- *             if <PyObject *> self.view.obj == NULL:             # <<<<<<<<<<<<<<
- *                 (<__pyx_buffer *> &self.view).obj = Py_None
- *                 Py_INCREF(Py_None)
- */
-    __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
-    if (__pyx_t_1) {
-
-      /* "View.MemoryView":351
- *             __Pyx_GetBuffer(obj, &self.view, flags)
- *             if <PyObject *> self.view.obj == NULL:
- *                 (<__pyx_buffer *> &self.view).obj = Py_None             # <<<<<<<<<<<<<<
- *                 Py_INCREF(Py_None)
- * 
- */
-      ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
-
-      /* "View.MemoryView":352
- *             if <PyObject *> self.view.obj == NULL:
- *                 (<__pyx_buffer *> &self.view).obj = Py_None
- *                 Py_INCREF(Py_None)             # <<<<<<<<<<<<<<
- * 
- *         global __pyx_memoryview_thread_locks_used
- */
-      Py_INCREF(Py_None);
-
-      /* "View.MemoryView":350
- *         if type(self) is memoryview or obj is not None:
- *             __Pyx_GetBuffer(obj, &self.view, flags)
- *             if <PyObject *> self.view.obj == NULL:             # <<<<<<<<<<<<<<
- *                 (<__pyx_buffer *> &self.view).obj = Py_None
- *                 Py_INCREF(Py_None)
- */
-    }
-
-    /* "View.MemoryView":348
- *         self.obj = obj
- *         self.flags = flags
- *         if type(self) is memoryview or obj is not None:             # <<<<<<<<<<<<<<
- *             __Pyx_GetBuffer(obj, &self.view, flags)
- *             if <PyObject *> self.view.obj == NULL:
- */
-  }
-
-  /* "View.MemoryView":355
- * 
- *         global __pyx_memoryview_thread_locks_used
- *         if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:             # <<<<<<<<<<<<<<
- *             self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- *             __pyx_memoryview_thread_locks_used += 1
- */
-  __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":356
- *         global __pyx_memoryview_thread_locks_used
- *         if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
- *             self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]             # <<<<<<<<<<<<<<
- *             __pyx_memoryview_thread_locks_used += 1
- *         if self.lock is NULL:
- */
-    __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
-
-    /* "View.MemoryView":357
- *         if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
- *             self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- *             __pyx_memoryview_thread_locks_used += 1             # <<<<<<<<<<<<<<
- *         if self.lock is NULL:
- *             self.lock = PyThread_allocate_lock()
- */
-    __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
-
-    /* "View.MemoryView":355
- * 
- *         global __pyx_memoryview_thread_locks_used
- *         if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:             # <<<<<<<<<<<<<<
- *             self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- *             __pyx_memoryview_thread_locks_used += 1
- */
-  }
-
-  /* "View.MemoryView":358
- *             self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- *             __pyx_memoryview_thread_locks_used += 1
- *         if self.lock is NULL:             # <<<<<<<<<<<<<<
- *             self.lock = PyThread_allocate_lock()
- *             if self.lock is NULL:
- */
-  __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":359
- *             __pyx_memoryview_thread_locks_used += 1
- *         if self.lock is NULL:
- *             self.lock = PyThread_allocate_lock()             # <<<<<<<<<<<<<<
- *             if self.lock is NULL:
- *                 raise MemoryError
- */
-    __pyx_v_self->lock = PyThread_allocate_lock();
-
-    /* "View.MemoryView":360
- *         if self.lock is NULL:
- *             self.lock = PyThread_allocate_lock()
- *             if self.lock is NULL:             # <<<<<<<<<<<<<<
- *                 raise MemoryError
- * 
- */
-    __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
-    if (unlikely(__pyx_t_1)) {
-
-      /* "View.MemoryView":361
- *             self.lock = PyThread_allocate_lock()
- *             if self.lock is NULL:
- *                 raise MemoryError             # <<<<<<<<<<<<<<
- * 
- *         if flags & PyBUF_FORMAT:
- */
-      PyErr_NoMemory(); __PYX_ERR(2, 361, __pyx_L1_error)
-
-      /* "View.MemoryView":360
- *         if self.lock is NULL:
- *             self.lock = PyThread_allocate_lock()
- *             if self.lock is NULL:             # <<<<<<<<<<<<<<
- *                 raise MemoryError
- * 
- */
-    }
-
-    /* "View.MemoryView":358
- *             self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- *             __pyx_memoryview_thread_locks_used += 1
- *         if self.lock is NULL:             # <<<<<<<<<<<<<<
- *             self.lock = PyThread_allocate_lock()
- *             if self.lock is NULL:
- */
-  }
-
-  /* "View.MemoryView":363
- *                 raise MemoryError
- * 
- *         if flags & PyBUF_FORMAT:             # <<<<<<<<<<<<<<
- *             self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
- *         else:
- */
-  __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":364
- * 
- *         if flags & PyBUF_FORMAT:
- *             self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')             # <<<<<<<<<<<<<<
- *         else:
- *             self.dtype_is_object = dtype_is_object
- */
-    __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
-    if (__pyx_t_2) {
-    } else {
-      __pyx_t_1 = __pyx_t_2;
-      goto __pyx_L11_bool_binop_done;
-    }
-    __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
-    __pyx_t_1 = __pyx_t_2;
-    __pyx_L11_bool_binop_done:;
-    __pyx_v_self->dtype_is_object = __pyx_t_1;
-
-    /* "View.MemoryView":363
- *                 raise MemoryError
- * 
- *         if flags & PyBUF_FORMAT:             # <<<<<<<<<<<<<<
- *             self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
- *         else:
- */
-    goto __pyx_L10;
-  }
-
-  /* "View.MemoryView":366
- *             self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
- *         else:
- *             self.dtype_is_object = dtype_is_object             # <<<<<<<<<<<<<<
- * 
- *         self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
- */
-  /*else*/ {
-    __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
-  }
-  __pyx_L10:;
-
-  /* "View.MemoryView":368
- *             self.dtype_is_object = dtype_is_object
- * 
- *         self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(             # <<<<<<<<<<<<<<
- *                   <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
- *         self.typeinfo = NULL
- */
-  __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
-
-  /* "View.MemoryView":370
- *         self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
- *                   <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
- *         self.typeinfo = NULL             # <<<<<<<<<<<<<<
- * 
- *     def __dealloc__(memoryview self):
- */
-  __pyx_v_self->typeinfo = NULL;
-
-  /* "View.MemoryView":345
- *     cdef __Pyx_TypeInfo *typeinfo
- * 
- *     def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):             # <<<<<<<<<<<<<<
- *         self.obj = obj
- *         self.flags = flags
- */
-
-  /* function exit code */
-  __pyx_r = 0;
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = -1;
-  __pyx_L0:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":372
- *         self.typeinfo = NULL
- * 
- *     def __dealloc__(memoryview self):             # <<<<<<<<<<<<<<
- *         if self.obj is not None:
- *             __Pyx_ReleaseBuffer(&self.view)
- */
-
-/* Python wrapper */
-static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
-static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
-  __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-}
-
-static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
-  int __pyx_v_i;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  int __pyx_t_2;
-  int __pyx_t_3;
-  int __pyx_t_4;
-  int __pyx_t_5;
-  PyThread_type_lock __pyx_t_6;
-  PyThread_type_lock __pyx_t_7;
-  __Pyx_RefNannySetupContext("__dealloc__", 0);
-
-  /* "View.MemoryView":373
- * 
- *     def __dealloc__(memoryview self):
- *         if self.obj is not None:             # <<<<<<<<<<<<<<
- *             __Pyx_ReleaseBuffer(&self.view)
- *         elif (<__pyx_buffer *> &self.view).obj == Py_None:
- */
-  __pyx_t_1 = (__pyx_v_self->obj != Py_None);
-  __pyx_t_2 = (__pyx_t_1 != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":374
- *     def __dealloc__(memoryview self):
- *         if self.obj is not None:
- *             __Pyx_ReleaseBuffer(&self.view)             # <<<<<<<<<<<<<<
- *         elif (<__pyx_buffer *> &self.view).obj == Py_None:
- * 
- */
-    __Pyx_ReleaseBuffer((&__pyx_v_self->view));
-
-    /* "View.MemoryView":373
- * 
- *     def __dealloc__(memoryview self):
- *         if self.obj is not None:             # <<<<<<<<<<<<<<
- *             __Pyx_ReleaseBuffer(&self.view)
- *         elif (<__pyx_buffer *> &self.view).obj == Py_None:
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":375
- *         if self.obj is not None:
- *             __Pyx_ReleaseBuffer(&self.view)
- *         elif (<__pyx_buffer *> &self.view).obj == Py_None:             # <<<<<<<<<<<<<<
- * 
- *             (<__pyx_buffer *> &self.view).obj = NULL
- */
-  __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":377
- *         elif (<__pyx_buffer *> &self.view).obj == Py_None:
- * 
- *             (<__pyx_buffer *> &self.view).obj = NULL             # <<<<<<<<<<<<<<
- *             Py_DECREF(Py_None)
- * 
- */
-    ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL;
-
-    /* "View.MemoryView":378
- * 
- *             (<__pyx_buffer *> &self.view).obj = NULL
- *             Py_DECREF(Py_None)             # <<<<<<<<<<<<<<
- * 
- *         cdef int i
- */
-    Py_DECREF(Py_None);
-
-    /* "View.MemoryView":375
- *         if self.obj is not None:
- *             __Pyx_ReleaseBuffer(&self.view)
- *         elif (<__pyx_buffer *> &self.view).obj == Py_None:             # <<<<<<<<<<<<<<
- * 
- *             (<__pyx_buffer *> &self.view).obj = NULL
- */
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":382
- *         cdef int i
- *         global __pyx_memoryview_thread_locks_used
- *         if self.lock != NULL:             # <<<<<<<<<<<<<<
- *             for i in range(__pyx_memoryview_thread_locks_used):
- *                 if __pyx_memoryview_thread_locks[i] is self.lock:
- */
-  __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":383
- *         global __pyx_memoryview_thread_locks_used
- *         if self.lock != NULL:
- *             for i in range(__pyx_memoryview_thread_locks_used):             # <<<<<<<<<<<<<<
- *                 if __pyx_memoryview_thread_locks[i] is self.lock:
- *                     __pyx_memoryview_thread_locks_used -= 1
- */
-    __pyx_t_3 = __pyx_memoryview_thread_locks_used;
-    __pyx_t_4 = __pyx_t_3;
-    for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
-      __pyx_v_i = __pyx_t_5;
-
-      /* "View.MemoryView":384
- *         if self.lock != NULL:
- *             for i in range(__pyx_memoryview_thread_locks_used):
- *                 if __pyx_memoryview_thread_locks[i] is self.lock:             # <<<<<<<<<<<<<<
- *                     __pyx_memoryview_thread_locks_used -= 1
- *                     if i != __pyx_memoryview_thread_locks_used:
- */
-      __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
-      if (__pyx_t_2) {
-
-        /* "View.MemoryView":385
- *             for i in range(__pyx_memoryview_thread_locks_used):
- *                 if __pyx_memoryview_thread_locks[i] is self.lock:
- *                     __pyx_memoryview_thread_locks_used -= 1             # <<<<<<<<<<<<<<
- *                     if i != __pyx_memoryview_thread_locks_used:
- *                         __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- */
-        __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
-
-        /* "View.MemoryView":386
- *                 if __pyx_memoryview_thread_locks[i] is self.lock:
- *                     __pyx_memoryview_thread_locks_used -= 1
- *                     if i != __pyx_memoryview_thread_locks_used:             # <<<<<<<<<<<<<<
- *                         __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- *                             __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
- */
-        __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
-        if (__pyx_t_2) {
-
-          /* "View.MemoryView":388
- *                     if i != __pyx_memoryview_thread_locks_used:
- *                         __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- *                             __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])             # <<<<<<<<<<<<<<
- *                     break
- *             else:
- */
-          __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
-          __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
-
-          /* "View.MemoryView":387
- *                     __pyx_memoryview_thread_locks_used -= 1
- *                     if i != __pyx_memoryview_thread_locks_used:
- *                         __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (             # <<<<<<<<<<<<<<
- *                             __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
- *                     break
- */
-          (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6;
-          (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7;
-
-          /* "View.MemoryView":386
- *                 if __pyx_memoryview_thread_locks[i] is self.lock:
- *                     __pyx_memoryview_thread_locks_used -= 1
- *                     if i != __pyx_memoryview_thread_locks_used:             # <<<<<<<<<<<<<<
- *                         __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- *                             __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
- */
-        }
-
-        /* "View.MemoryView":389
- *                         __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- *                             __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
- *                     break             # <<<<<<<<<<<<<<
- *             else:
- *                 PyThread_free_lock(self.lock)
- */
-        goto __pyx_L6_break;
-
-        /* "View.MemoryView":384
- *         if self.lock != NULL:
- *             for i in range(__pyx_memoryview_thread_locks_used):
- *                 if __pyx_memoryview_thread_locks[i] is self.lock:             # <<<<<<<<<<<<<<
- *                     __pyx_memoryview_thread_locks_used -= 1
- *                     if i != __pyx_memoryview_thread_locks_used:
- */
-      }
-    }
-    /*else*/ {
-
-      /* "View.MemoryView":391
- *                     break
- *             else:
- *                 PyThread_free_lock(self.lock)             # <<<<<<<<<<<<<<
- * 
- *     cdef char *get_item_pointer(memoryview self, object index) except NULL:
- */
-      PyThread_free_lock(__pyx_v_self->lock);
-    }
-    __pyx_L6_break:;
-
-    /* "View.MemoryView":382
- *         cdef int i
- *         global __pyx_memoryview_thread_locks_used
- *         if self.lock != NULL:             # <<<<<<<<<<<<<<
- *             for i in range(__pyx_memoryview_thread_locks_used):
- *                 if __pyx_memoryview_thread_locks[i] is self.lock:
- */
-  }
-
-  /* "View.MemoryView":372
- *         self.typeinfo = NULL
- * 
- *     def __dealloc__(memoryview self):             # <<<<<<<<<<<<<<
- *         if self.obj is not None:
- *             __Pyx_ReleaseBuffer(&self.view)
- */
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-}
-
-/* "View.MemoryView":393
- *                 PyThread_free_lock(self.lock)
- * 
- *     cdef char *get_item_pointer(memoryview self, object index) except NULL:             # <<<<<<<<<<<<<<
- *         cdef Py_ssize_t dim
- *         cdef char *itemp = <char *> self.view.buf
- */
-
-static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
-  Py_ssize_t __pyx_v_dim;
-  char *__pyx_v_itemp;
-  PyObject *__pyx_v_idx = NULL;
-  char *__pyx_r;
-  __Pyx_RefNannyDeclarations
-  Py_ssize_t __pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  Py_ssize_t __pyx_t_3;
-  PyObject *(*__pyx_t_4)(PyObject *);
-  PyObject *__pyx_t_5 = NULL;
-  Py_ssize_t __pyx_t_6;
-  char *__pyx_t_7;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("get_item_pointer", 0);
-
-  /* "View.MemoryView":395
- *     cdef char *get_item_pointer(memoryview self, object index) except NULL:
- *         cdef Py_ssize_t dim
- *         cdef char *itemp = <char *> self.view.buf             # <<<<<<<<<<<<<<
- * 
- *         for dim, idx in enumerate(index):
- */
-  __pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
-
-  /* "View.MemoryView":397
- *         cdef char *itemp = <char *> self.view.buf
- * 
- *         for dim, idx in enumerate(index):             # <<<<<<<<<<<<<<
- *             itemp = pybuffer_index(&self.view, itemp, idx, dim)
- * 
- */
-  __pyx_t_1 = 0;
-  if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
-    __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
-    __pyx_t_4 = NULL;
-  } else {
-    __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 397, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 397, __pyx_L1_error)
-  }
-  for (;;) {
-    if (likely(!__pyx_t_4)) {
-      if (likely(PyList_CheckExact(__pyx_t_2))) {
-        if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
-        #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-        __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(2, 397, __pyx_L1_error)
-        #else
-        __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 397, __pyx_L1_error)
-        __Pyx_GOTREF(__pyx_t_5);
-        #endif
-      } else {
-        if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
-        #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-        __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(2, 397, __pyx_L1_error)
-        #else
-        __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 397, __pyx_L1_error)
-        __Pyx_GOTREF(__pyx_t_5);
-        #endif
-      }
-    } else {
-      __pyx_t_5 = __pyx_t_4(__pyx_t_2);
-      if (unlikely(!__pyx_t_5)) {
-        PyObject* exc_type = PyErr_Occurred();
-        if (exc_type) {
-          if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-          else __PYX_ERR(2, 397, __pyx_L1_error)
-        }
-        break;
-      }
-      __Pyx_GOTREF(__pyx_t_5);
-    }
-    __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
-    __pyx_t_5 = 0;
-    __pyx_v_dim = __pyx_t_1;
-    __pyx_t_1 = (__pyx_t_1 + 1);
-
-    /* "View.MemoryView":398
- * 
- *         for dim, idx in enumerate(index):
- *             itemp = pybuffer_index(&self.view, itemp, idx, dim)             # <<<<<<<<<<<<<<
- * 
- *         return itemp
- */
-    __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 398, __pyx_L1_error)
-    __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(2, 398, __pyx_L1_error)
-    __pyx_v_itemp = __pyx_t_7;
-
-    /* "View.MemoryView":397
- *         cdef char *itemp = <char *> self.view.buf
- * 
- *         for dim, idx in enumerate(index):             # <<<<<<<<<<<<<<
- *             itemp = pybuffer_index(&self.view, itemp, idx, dim)
- * 
- */
-  }
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
-  /* "View.MemoryView":400
- *             itemp = pybuffer_index(&self.view, itemp, idx, dim)
- * 
- *         return itemp             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_r = __pyx_v_itemp;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":393
- *                 PyThread_free_lock(self.lock)
- * 
- *     cdef char *get_item_pointer(memoryview self, object index) except NULL:             # <<<<<<<<<<<<<<
- *         cdef Py_ssize_t dim
- *         cdef char *itemp = <char *> self.view.buf
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_idx);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":403
- * 
- * 
- *     def __getitem__(memoryview self, object index):             # <<<<<<<<<<<<<<
- *         if index is Ellipsis:
- *             return self
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
-static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
-  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
-  PyObject *__pyx_v_have_slices = NULL;
-  PyObject *__pyx_v_indices = NULL;
-  char *__pyx_v_itemp;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  int __pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  char *__pyx_t_6;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__getitem__", 0);
-
-  /* "View.MemoryView":404
- * 
- *     def __getitem__(memoryview self, object index):
- *         if index is Ellipsis:             # <<<<<<<<<<<<<<
- *             return self
- * 
- */
-  __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
-  __pyx_t_2 = (__pyx_t_1 != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":405
- *     def __getitem__(memoryview self, object index):
- *         if index is Ellipsis:
- *             return self             # <<<<<<<<<<<<<<
- * 
- *         have_slices, indices = _unellipsify(index, self.view.ndim)
- */
-    __Pyx_XDECREF(__pyx_r);
-    __Pyx_INCREF(((PyObject *)__pyx_v_self));
-    __pyx_r = ((PyObject *)__pyx_v_self);
-    goto __pyx_L0;
-
-    /* "View.MemoryView":404
- * 
- *     def __getitem__(memoryview self, object index):
- *         if index is Ellipsis:             # <<<<<<<<<<<<<<
- *             return self
- * 
- */
-  }
-
-  /* "View.MemoryView":407
- *             return self
- * 
- *         have_slices, indices = _unellipsify(index, self.view.ndim)             # <<<<<<<<<<<<<<
- * 
- *         cdef char *itemp
- */
-  __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 407, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  if (likely(__pyx_t_3 != Py_None)) {
-    PyObject* sequence = __pyx_t_3;
-    Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
-    if (unlikely(size != 2)) {
-      if (size > 2) __Pyx_RaiseTooManyValuesError(2);
-      else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
-      __PYX_ERR(2, 407, __pyx_L1_error)
-    }
-    #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-    __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); 
-    __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); 
-    __Pyx_INCREF(__pyx_t_4);
-    __Pyx_INCREF(__pyx_t_5);
-    #else
-    __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 407, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 407, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    #endif
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  } else {
-    __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(2, 407, __pyx_L1_error)
-  }
-  __pyx_v_have_slices = __pyx_t_4;
-  __pyx_t_4 = 0;
-  __pyx_v_indices = __pyx_t_5;
-  __pyx_t_5 = 0;
-
-  /* "View.MemoryView":410
- * 
- *         cdef char *itemp
- *         if have_slices:             # <<<<<<<<<<<<<<
- *             return memview_slice(self, indices)
- *         else:
- */
-  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(2, 410, __pyx_L1_error)
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":411
- *         cdef char *itemp
- *         if have_slices:
- *             return memview_slice(self, indices)             # <<<<<<<<<<<<<<
- *         else:
- *             itemp = self.get_item_pointer(indices)
- */
-    __Pyx_XDECREF(__pyx_r);
-    __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 411, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __pyx_r = __pyx_t_3;
-    __pyx_t_3 = 0;
-    goto __pyx_L0;
-
-    /* "View.MemoryView":410
- * 
- *         cdef char *itemp
- *         if have_slices:             # <<<<<<<<<<<<<<
- *             return memview_slice(self, indices)
- *         else:
- */
-  }
-
-  /* "View.MemoryView":413
- *             return memview_slice(self, indices)
- *         else:
- *             itemp = self.get_item_pointer(indices)             # <<<<<<<<<<<<<<
- *             return self.convert_item_to_object(itemp)
- * 
- */
-  /*else*/ {
-    __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(2, 413, __pyx_L1_error)
-    __pyx_v_itemp = __pyx_t_6;
-
-    /* "View.MemoryView":414
- *         else:
- *             itemp = self.get_item_pointer(indices)
- *             return self.convert_item_to_object(itemp)             # <<<<<<<<<<<<<<
- * 
- *     def __setitem__(memoryview self, object index, object value):
- */
-    __Pyx_XDECREF(__pyx_r);
-    __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 414, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __pyx_r = __pyx_t_3;
-    __pyx_t_3 = 0;
-    goto __pyx_L0;
-  }
-
-  /* "View.MemoryView":403
- * 
- * 
- *     def __getitem__(memoryview self, object index):             # <<<<<<<<<<<<<<
- *         if index is Ellipsis:
- *             return self
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_have_slices);
-  __Pyx_XDECREF(__pyx_v_indices);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":416
- *             return self.convert_item_to_object(itemp)
- * 
- *     def __setitem__(memoryview self, object index, object value):             # <<<<<<<<<<<<<<
- *         if self.view.readonly:
- *             raise TypeError("Cannot assign to read-only memoryview")
- */
-
-/* Python wrapper */
-static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
-static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
-  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
-  PyObject *__pyx_v_have_slices = NULL;
-  PyObject *__pyx_v_obj = NULL;
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__setitem__", 0);
-  __Pyx_INCREF(__pyx_v_index);
-
-  /* "View.MemoryView":417
- * 
- *     def __setitem__(memoryview self, object index, object value):
- *         if self.view.readonly:             # <<<<<<<<<<<<<<
- *             raise TypeError("Cannot assign to read-only memoryview")
- * 
- */
-  __pyx_t_1 = (__pyx_v_self->view.readonly != 0);
-  if (unlikely(__pyx_t_1)) {
-
-    /* "View.MemoryView":418
- *     def __setitem__(memoryview self, object index, object value):
- *         if self.view.readonly:
- *             raise TypeError("Cannot assign to read-only memoryview")             # <<<<<<<<<<<<<<
- * 
- *         have_slices, index = _unellipsify(index, self.view.ndim)
- */
-    __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 418, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __PYX_ERR(2, 418, __pyx_L1_error)
-
-    /* "View.MemoryView":417
- * 
- *     def __setitem__(memoryview self, object index, object value):
- *         if self.view.readonly:             # <<<<<<<<<<<<<<
- *             raise TypeError("Cannot assign to read-only memoryview")
- * 
- */
-  }
-
-  /* "View.MemoryView":420
- *             raise TypeError("Cannot assign to read-only memoryview")
- * 
- *         have_slices, index = _unellipsify(index, self.view.ndim)             # <<<<<<<<<<<<<<
- * 
- *         if have_slices:
- */
-  __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 420, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  if (likely(__pyx_t_2 != Py_None)) {
-    PyObject* sequence = __pyx_t_2;
-    Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
-    if (unlikely(size != 2)) {
-      if (size > 2) __Pyx_RaiseTooManyValuesError(2);
-      else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
-      __PYX_ERR(2, 420, __pyx_L1_error)
-    }
-    #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-    __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); 
-    __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); 
-    __Pyx_INCREF(__pyx_t_3);
-    __Pyx_INCREF(__pyx_t_4);
-    #else
-    __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 420, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 420, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    #endif
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  } else {
-    __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(2, 420, __pyx_L1_error)
-  }
-  __pyx_v_have_slices = __pyx_t_3;
-  __pyx_t_3 = 0;
-  __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4);
-  __pyx_t_4 = 0;
-
-  /* "View.MemoryView":422
- *         have_slices, index = _unellipsify(index, self.view.ndim)
- * 
- *         if have_slices:             # <<<<<<<<<<<<<<
- *             obj = self.is_slice(value)
- *             if obj:
- */
-  __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 422, __pyx_L1_error)
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":423
- * 
- *         if have_slices:
- *             obj = self.is_slice(value)             # <<<<<<<<<<<<<<
- *             if obj:
- *                 self.setitem_slice_assignment(self[index], obj)
- */
-    __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 423, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __pyx_v_obj = __pyx_t_2;
-    __pyx_t_2 = 0;
-
-    /* "View.MemoryView":424
- *         if have_slices:
- *             obj = self.is_slice(value)
- *             if obj:             # <<<<<<<<<<<<<<
- *                 self.setitem_slice_assignment(self[index], obj)
- *             else:
- */
-    __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 424, __pyx_L1_error)
-    if (__pyx_t_1) {
-
-      /* "View.MemoryView":425
- *             obj = self.is_slice(value)
- *             if obj:
- *                 self.setitem_slice_assignment(self[index], obj)             # <<<<<<<<<<<<<<
- *             else:
- *                 self.setitem_slice_assign_scalar(self[index], value)
- */
-      __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 425, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_2);
-      __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 425, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_4);
-      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
-      /* "View.MemoryView":424
- *         if have_slices:
- *             obj = self.is_slice(value)
- *             if obj:             # <<<<<<<<<<<<<<
- *                 self.setitem_slice_assignment(self[index], obj)
- *             else:
- */
-      goto __pyx_L5;
-    }
-
-    /* "View.MemoryView":427
- *                 self.setitem_slice_assignment(self[index], obj)
- *             else:
- *                 self.setitem_slice_assign_scalar(self[index], value)             # <<<<<<<<<<<<<<
- *         else:
- *             self.setitem_indexed(index, value)
- */
-    /*else*/ {
-      __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 427, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_4);
-      if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(2, 427, __pyx_L1_error)
-      __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 427, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_2);
-      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    }
-    __pyx_L5:;
-
-    /* "View.MemoryView":422
- *         have_slices, index = _unellipsify(index, self.view.ndim)
- * 
- *         if have_slices:             # <<<<<<<<<<<<<<
- *             obj = self.is_slice(value)
- *             if obj:
- */
-    goto __pyx_L4;
-  }
-
-  /* "View.MemoryView":429
- *                 self.setitem_slice_assign_scalar(self[index], value)
- *         else:
- *             self.setitem_indexed(index, value)             # <<<<<<<<<<<<<<
- * 
- *     cdef is_slice(self, obj):
- */
-  /*else*/ {
-    __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 429, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  }
-  __pyx_L4:;
-
-  /* "View.MemoryView":416
- *             return self.convert_item_to_object(itemp)
- * 
- *     def __setitem__(memoryview self, object index, object value):             # <<<<<<<<<<<<<<
- *         if self.view.readonly:
- *             raise TypeError("Cannot assign to read-only memoryview")
- */
-
-  /* function exit code */
-  __pyx_r = 0;
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = -1;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_have_slices);
-  __Pyx_XDECREF(__pyx_v_obj);
-  __Pyx_XDECREF(__pyx_v_index);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":431
- *             self.setitem_indexed(index, value)
- * 
- *     cdef is_slice(self, obj):             # <<<<<<<<<<<<<<
- *         if not isinstance(obj, memoryview):
- *             try:
- */
-
-static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  int __pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  PyObject *__pyx_t_6 = NULL;
-  PyObject *__pyx_t_7 = NULL;
-  PyObject *__pyx_t_8 = NULL;
-  int __pyx_t_9;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("is_slice", 0);
-  __Pyx_INCREF(__pyx_v_obj);
-
-  /* "View.MemoryView":432
- * 
- *     cdef is_slice(self, obj):
- *         if not isinstance(obj, memoryview):             # <<<<<<<<<<<<<<
- *             try:
- *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- */
-  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); 
-  __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":433
- *     cdef is_slice(self, obj):
- *         if not isinstance(obj, memoryview):
- *             try:             # <<<<<<<<<<<<<<
- *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- *                                  self.dtype_is_object)
- */
-    {
-      __Pyx_PyThreadState_declare
-      __Pyx_PyThreadState_assign
-      __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
-      __Pyx_XGOTREF(__pyx_t_3);
-      __Pyx_XGOTREF(__pyx_t_4);
-      __Pyx_XGOTREF(__pyx_t_5);
-      /*try:*/ {
-
-        /* "View.MemoryView":434
- *         if not isinstance(obj, memoryview):
- *             try:
- *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,             # <<<<<<<<<<<<<<
- *                                  self.dtype_is_object)
- *             except TypeError:
- */
-        __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 434, __pyx_L4_error)
-        __Pyx_GOTREF(__pyx_t_6);
-
-        /* "View.MemoryView":435
- *             try:
- *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- *                                  self.dtype_is_object)             # <<<<<<<<<<<<<<
- *             except TypeError:
- *                 return None
- */
-        __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 435, __pyx_L4_error)
-        __Pyx_GOTREF(__pyx_t_7);
-
-        /* "View.MemoryView":434
- *         if not isinstance(obj, memoryview):
- *             try:
- *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,             # <<<<<<<<<<<<<<
- *                                  self.dtype_is_object)
- *             except TypeError:
- */
-        __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 434, __pyx_L4_error)
-        __Pyx_GOTREF(__pyx_t_8);
-        __Pyx_INCREF(__pyx_v_obj);
-        __Pyx_GIVEREF(__pyx_v_obj);
-        PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
-        __Pyx_GIVEREF(__pyx_t_6);
-        PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
-        __Pyx_GIVEREF(__pyx_t_7);
-        PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
-        __pyx_t_6 = 0;
-        __pyx_t_7 = 0;
-        __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 434, __pyx_L4_error)
-        __Pyx_GOTREF(__pyx_t_7);
-        __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-        __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
-        __pyx_t_7 = 0;
-
-        /* "View.MemoryView":433
- *     cdef is_slice(self, obj):
- *         if not isinstance(obj, memoryview):
- *             try:             # <<<<<<<<<<<<<<
- *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- *                                  self.dtype_is_object)
- */
-      }
-      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
-      goto __pyx_L9_try_end;
-      __pyx_L4_error:;
-      __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
-      __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
-      __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
-
-      /* "View.MemoryView":436
- *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- *                                  self.dtype_is_object)
- *             except TypeError:             # <<<<<<<<<<<<<<
- *                 return None
- * 
- */
-      __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
-      if (__pyx_t_9) {
-        __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
-        if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(2, 436, __pyx_L6_except_error)
-        __Pyx_GOTREF(__pyx_t_7);
-        __Pyx_GOTREF(__pyx_t_8);
-        __Pyx_GOTREF(__pyx_t_6);
-
-        /* "View.MemoryView":437
- *                                  self.dtype_is_object)
- *             except TypeError:
- *                 return None             # <<<<<<<<<<<<<<
- * 
- *         return obj
- */
-        __Pyx_XDECREF(__pyx_r);
-        __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-        __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-        __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-        __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-        goto __pyx_L7_except_return;
-      }
-      goto __pyx_L6_except_error;
-      __pyx_L6_except_error:;
-
-      /* "View.MemoryView":433
- *     cdef is_slice(self, obj):
- *         if not isinstance(obj, memoryview):
- *             try:             # <<<<<<<<<<<<<<
- *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- *                                  self.dtype_is_object)
- */
-      __Pyx_XGIVEREF(__pyx_t_3);
-      __Pyx_XGIVEREF(__pyx_t_4);
-      __Pyx_XGIVEREF(__pyx_t_5);
-      __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
-      goto __pyx_L1_error;
-      __pyx_L7_except_return:;
-      __Pyx_XGIVEREF(__pyx_t_3);
-      __Pyx_XGIVEREF(__pyx_t_4);
-      __Pyx_XGIVEREF(__pyx_t_5);
-      __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
-      goto __pyx_L0;
-      __pyx_L9_try_end:;
-    }
-
-    /* "View.MemoryView":432
- * 
- *     cdef is_slice(self, obj):
- *         if not isinstance(obj, memoryview):             # <<<<<<<<<<<<<<
- *             try:
- *                 obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- */
-  }
-
-  /* "View.MemoryView":439
- *                 return None
- * 
- *         return obj             # <<<<<<<<<<<<<<
- * 
- *     cdef setitem_slice_assignment(self, dst, src):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_INCREF(__pyx_v_obj);
-  __pyx_r = __pyx_v_obj;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":431
- *             self.setitem_indexed(index, value)
- * 
- *     cdef is_slice(self, obj):             # <<<<<<<<<<<<<<
- *         if not isinstance(obj, memoryview):
- *             try:
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_7);
-  __Pyx_XDECREF(__pyx_t_8);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_obj);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":441
- *         return obj
- * 
- *     cdef setitem_slice_assignment(self, dst, src):             # <<<<<<<<<<<<<<
- *         cdef __Pyx_memviewslice dst_slice
- *         cdef __Pyx_memviewslice src_slice
- */
-
-static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
-  __Pyx_memviewslice __pyx_v_dst_slice;
-  __Pyx_memviewslice __pyx_v_src_slice;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  __Pyx_memviewslice *__pyx_t_1;
-  __Pyx_memviewslice *__pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_t_4;
-  int __pyx_t_5;
-  int __pyx_t_6;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
-
-  /* "View.MemoryView":445
- *         cdef __Pyx_memviewslice src_slice
- * 
- *         memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],             # <<<<<<<<<<<<<<
- *                                  get_slice_from_memview(dst, &dst_slice)[0],
- *                                  src.ndim, dst.ndim, self.dtype_is_object)
- */
-  if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(2, 445, __pyx_L1_error)
-  __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 445, __pyx_L1_error)
-
-  /* "View.MemoryView":446
- * 
- *         memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
- *                                  get_slice_from_memview(dst, &dst_slice)[0],             # <<<<<<<<<<<<<<
- *                                  src.ndim, dst.ndim, self.dtype_is_object)
- * 
- */
-  if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(2, 446, __pyx_L1_error)
-  __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 446, __pyx_L1_error)
-
-  /* "View.MemoryView":447
- *         memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
- *                                  get_slice_from_memview(dst, &dst_slice)[0],
- *                                  src.ndim, dst.ndim, self.dtype_is_object)             # <<<<<<<<<<<<<<
- * 
- *     cdef setitem_slice_assign_scalar(self, memoryview dst, value):
- */
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 447, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 447, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 447, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 447, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "View.MemoryView":445
- *         cdef __Pyx_memviewslice src_slice
- * 
- *         memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],             # <<<<<<<<<<<<<<
- *                                  get_slice_from_memview(dst, &dst_slice)[0],
- *                                  src.ndim, dst.ndim, self.dtype_is_object)
- */
-  __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 445, __pyx_L1_error)
-
-  /* "View.MemoryView":441
- *         return obj
- * 
- *     cdef setitem_slice_assignment(self, dst, src):             # <<<<<<<<<<<<<<
- *         cdef __Pyx_memviewslice dst_slice
- *         cdef __Pyx_memviewslice src_slice
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":449
- *                                  src.ndim, dst.ndim, self.dtype_is_object)
- * 
- *     cdef setitem_slice_assign_scalar(self, memoryview dst, value):             # <<<<<<<<<<<<<<
- *         cdef int array[128]
- *         cdef void *tmp = NULL
- */
-
-static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
-  int __pyx_v_array[0x80];
-  void *__pyx_v_tmp;
-  void *__pyx_v_item;
-  __Pyx_memviewslice *__pyx_v_dst_slice;
-  __Pyx_memviewslice __pyx_v_tmp_slice;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  __Pyx_memviewslice *__pyx_t_1;
-  int __pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_t_4;
-  int __pyx_t_5;
-  char const *__pyx_t_6;
-  PyObject *__pyx_t_7 = NULL;
-  PyObject *__pyx_t_8 = NULL;
-  PyObject *__pyx_t_9 = NULL;
-  PyObject *__pyx_t_10 = NULL;
-  PyObject *__pyx_t_11 = NULL;
-  PyObject *__pyx_t_12 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
-
-  /* "View.MemoryView":451
- *     cdef setitem_slice_assign_scalar(self, memoryview dst, value):
- *         cdef int array[128]
- *         cdef void *tmp = NULL             # <<<<<<<<<<<<<<
- *         cdef void *item
- * 
- */
-  __pyx_v_tmp = NULL;
-
-  /* "View.MemoryView":456
- *         cdef __Pyx_memviewslice *dst_slice
- *         cdef __Pyx_memviewslice tmp_slice
- *         dst_slice = get_slice_from_memview(dst, &tmp_slice)             # <<<<<<<<<<<<<<
- * 
- *         if <size_t>self.view.itemsize > sizeof(array):
- */
-  __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 456, __pyx_L1_error)
-  __pyx_v_dst_slice = __pyx_t_1;
-
-  /* "View.MemoryView":458
- *         dst_slice = get_slice_from_memview(dst, &tmp_slice)
- * 
- *         if <size_t>self.view.itemsize > sizeof(array):             # <<<<<<<<<<<<<<
- *             tmp = PyMem_Malloc(self.view.itemsize)
- *             if tmp == NULL:
- */
-  __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":459
- * 
- *         if <size_t>self.view.itemsize > sizeof(array):
- *             tmp = PyMem_Malloc(self.view.itemsize)             # <<<<<<<<<<<<<<
- *             if tmp == NULL:
- *                 raise MemoryError
- */
-    __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
-
-    /* "View.MemoryView":460
- *         if <size_t>self.view.itemsize > sizeof(array):
- *             tmp = PyMem_Malloc(self.view.itemsize)
- *             if tmp == NULL:             # <<<<<<<<<<<<<<
- *                 raise MemoryError
- *             item = tmp
- */
-    __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0);
-    if (unlikely(__pyx_t_2)) {
-
-      /* "View.MemoryView":461
- *             tmp = PyMem_Malloc(self.view.itemsize)
- *             if tmp == NULL:
- *                 raise MemoryError             # <<<<<<<<<<<<<<
- *             item = tmp
- *         else:
- */
-      PyErr_NoMemory(); __PYX_ERR(2, 461, __pyx_L1_error)
-
-      /* "View.MemoryView":460
- *         if <size_t>self.view.itemsize > sizeof(array):
- *             tmp = PyMem_Malloc(self.view.itemsize)
- *             if tmp == NULL:             # <<<<<<<<<<<<<<
- *                 raise MemoryError
- *             item = tmp
- */
-    }
-
-    /* "View.MemoryView":462
- *             if tmp == NULL:
- *                 raise MemoryError
- *             item = tmp             # <<<<<<<<<<<<<<
- *         else:
- *             item = <void *> array
- */
-    __pyx_v_item = __pyx_v_tmp;
-
-    /* "View.MemoryView":458
- *         dst_slice = get_slice_from_memview(dst, &tmp_slice)
- * 
- *         if <size_t>self.view.itemsize > sizeof(array):             # <<<<<<<<<<<<<<
- *             tmp = PyMem_Malloc(self.view.itemsize)
- *             if tmp == NULL:
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":464
- *             item = tmp
- *         else:
- *             item = <void *> array             # <<<<<<<<<<<<<<
- * 
- *         try:
- */
-  /*else*/ {
-    __pyx_v_item = ((void *)__pyx_v_array);
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":466
- *             item = <void *> array
- * 
- *         try:             # <<<<<<<<<<<<<<
- *             if self.dtype_is_object:
- *                 (<PyObject **> item)[0] = <PyObject *> value
- */
-  /*try:*/ {
-
-    /* "View.MemoryView":467
- * 
- *         try:
- *             if self.dtype_is_object:             # <<<<<<<<<<<<<<
- *                 (<PyObject **> item)[0] = <PyObject *> value
- *             else:
- */
-    __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":468
- *         try:
- *             if self.dtype_is_object:
- *                 (<PyObject **> item)[0] = <PyObject *> value             # <<<<<<<<<<<<<<
- *             else:
- *                 self.assign_item_from_object(<char *> item, value)
- */
-      (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
-
-      /* "View.MemoryView":467
- * 
- *         try:
- *             if self.dtype_is_object:             # <<<<<<<<<<<<<<
- *                 (<PyObject **> item)[0] = <PyObject *> value
- *             else:
- */
-      goto __pyx_L8;
-    }
-
-    /* "View.MemoryView":470
- *                 (<PyObject **> item)[0] = <PyObject *> value
- *             else:
- *                 self.assign_item_from_object(<char *> item, value)             # <<<<<<<<<<<<<<
- * 
- * 
- */
-    /*else*/ {
-      __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 470, __pyx_L6_error)
-      __Pyx_GOTREF(__pyx_t_3);
-      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    }
-    __pyx_L8:;
-
-    /* "View.MemoryView":474
- * 
- * 
- *             if self.view.suboffsets != NULL:             # <<<<<<<<<<<<<<
- *                 assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
- *             slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
- */
-    __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":475
- * 
- *             if self.view.suboffsets != NULL:
- *                 assert_direct_dimensions(self.view.suboffsets, self.view.ndim)             # <<<<<<<<<<<<<<
- *             slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
- *                                 item, self.dtype_is_object)
- */
-      __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 475, __pyx_L6_error)
-      __Pyx_GOTREF(__pyx_t_3);
-      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-      /* "View.MemoryView":474
- * 
- * 
- *             if self.view.suboffsets != NULL:             # <<<<<<<<<<<<<<
- *                 assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
- *             slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
- */
-    }
-
-    /* "View.MemoryView":476
- *             if self.view.suboffsets != NULL:
- *                 assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
- *             slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,             # <<<<<<<<<<<<<<
- *                                 item, self.dtype_is_object)
- *         finally:
- */
-    __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
-  }
-
-  /* "View.MemoryView":479
- *                                 item, self.dtype_is_object)
- *         finally:
- *             PyMem_Free(tmp)             # <<<<<<<<<<<<<<
- * 
- *     cdef setitem_indexed(self, index, value):
- */
-  /*finally:*/ {
-    /*normal exit:*/{
-      PyMem_Free(__pyx_v_tmp);
-      goto __pyx_L7;
-    }
-    __pyx_L6_error:;
-    /*exception exit:*/{
-      __Pyx_PyThreadState_declare
-      __Pyx_PyThreadState_assign
-      __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
-      __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
-      if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
-      if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
-      __Pyx_XGOTREF(__pyx_t_7);
-      __Pyx_XGOTREF(__pyx_t_8);
-      __Pyx_XGOTREF(__pyx_t_9);
-      __Pyx_XGOTREF(__pyx_t_10);
-      __Pyx_XGOTREF(__pyx_t_11);
-      __Pyx_XGOTREF(__pyx_t_12);
-      __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename;
-      {
-        PyMem_Free(__pyx_v_tmp);
-      }
-      if (PY_MAJOR_VERSION >= 3) {
-        __Pyx_XGIVEREF(__pyx_t_10);
-        __Pyx_XGIVEREF(__pyx_t_11);
-        __Pyx_XGIVEREF(__pyx_t_12);
-        __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12);
-      }
-      __Pyx_XGIVEREF(__pyx_t_7);
-      __Pyx_XGIVEREF(__pyx_t_8);
-      __Pyx_XGIVEREF(__pyx_t_9);
-      __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
-      __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
-      __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6;
-      goto __pyx_L1_error;
-    }
-    __pyx_L7:;
-  }
-
-  /* "View.MemoryView":449
- *                                  src.ndim, dst.ndim, self.dtype_is_object)
- * 
- *     cdef setitem_slice_assign_scalar(self, memoryview dst, value):             # <<<<<<<<<<<<<<
- *         cdef int array[128]
- *         cdef void *tmp = NULL
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":481
- *             PyMem_Free(tmp)
- * 
- *     cdef setitem_indexed(self, index, value):             # <<<<<<<<<<<<<<
- *         cdef char *itemp = self.get_item_pointer(index)
- *         self.assign_item_from_object(itemp, value)
- */
-
-static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
-  char *__pyx_v_itemp;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  char *__pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("setitem_indexed", 0);
-
-  /* "View.MemoryView":482
- * 
- *     cdef setitem_indexed(self, index, value):
- *         cdef char *itemp = self.get_item_pointer(index)             # <<<<<<<<<<<<<<
- *         self.assign_item_from_object(itemp, value)
- * 
- */
-  __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(2, 482, __pyx_L1_error)
-  __pyx_v_itemp = __pyx_t_1;
-
-  /* "View.MemoryView":483
- *     cdef setitem_indexed(self, index, value):
- *         cdef char *itemp = self.get_item_pointer(index)
- *         self.assign_item_from_object(itemp, value)             # <<<<<<<<<<<<<<
- * 
- *     cdef convert_item_to_object(self, char *itemp):
- */
-  __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 483, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
-  /* "View.MemoryView":481
- *             PyMem_Free(tmp)
- * 
- *     cdef setitem_indexed(self, index, value):             # <<<<<<<<<<<<<<
- *         cdef char *itemp = self.get_item_pointer(index)
- *         self.assign_item_from_object(itemp, value)
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":485
- *         self.assign_item_from_object(itemp, value)
- * 
- *     cdef convert_item_to_object(self, char *itemp):             # <<<<<<<<<<<<<<
- *         """Only used if instantiated manually by the user, or if Cython doesn't
- *         know how to convert the type"""
- */
-
-static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
-  PyObject *__pyx_v_struct = NULL;
-  PyObject *__pyx_v_bytesitem = 0;
-  PyObject *__pyx_v_result = NULL;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  PyObject *__pyx_t_6 = NULL;
-  PyObject *__pyx_t_7 = NULL;
-  int __pyx_t_8;
-  PyObject *__pyx_t_9 = NULL;
-  size_t __pyx_t_10;
-  int __pyx_t_11;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("convert_item_to_object", 0);
-
-  /* "View.MemoryView":488
- *         """Only used if instantiated manually by the user, or if Cython doesn't
- *         know how to convert the type"""
- *         import struct             # <<<<<<<<<<<<<<
- *         cdef bytes bytesitem
- * 
- */
-  __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 488, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_v_struct = __pyx_t_1;
-  __pyx_t_1 = 0;
-
-  /* "View.MemoryView":491
- *         cdef bytes bytesitem
- * 
- *         bytesitem = itemp[:self.view.itemsize]             # <<<<<<<<<<<<<<
- *         try:
- *             result = struct.unpack(self.view.format, bytesitem)
- */
-  __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 491, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
-  __pyx_t_1 = 0;
-
-  /* "View.MemoryView":492
- * 
- *         bytesitem = itemp[:self.view.itemsize]
- *         try:             # <<<<<<<<<<<<<<
- *             result = struct.unpack(self.view.format, bytesitem)
- *         except struct.error:
- */
-  {
-    __Pyx_PyThreadState_declare
-    __Pyx_PyThreadState_assign
-    __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
-    __Pyx_XGOTREF(__pyx_t_2);
-    __Pyx_XGOTREF(__pyx_t_3);
-    __Pyx_XGOTREF(__pyx_t_4);
-    /*try:*/ {
-
-      /* "View.MemoryView":493
- *         bytesitem = itemp[:self.view.itemsize]
- *         try:
- *             result = struct.unpack(self.view.format, bytesitem)             # <<<<<<<<<<<<<<
- *         except struct.error:
- *             raise ValueError("Unable to convert item to object")
- */
-      __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 493, __pyx_L3_error)
-      __Pyx_GOTREF(__pyx_t_5);
-      __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 493, __pyx_L3_error)
-      __Pyx_GOTREF(__pyx_t_6);
-      __pyx_t_7 = NULL;
-      __pyx_t_8 = 0;
-      if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
-        __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
-        if (likely(__pyx_t_7)) {
-          PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
-          __Pyx_INCREF(__pyx_t_7);
-          __Pyx_INCREF(function);
-          __Pyx_DECREF_SET(__pyx_t_5, function);
-          __pyx_t_8 = 1;
-        }
-      }
-      #if CYTHON_FAST_PYCALL
-      if (PyFunction_Check(__pyx_t_5)) {
-        PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
-        __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error)
-        __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
-        __Pyx_GOTREF(__pyx_t_1);
-        __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-      } else
-      #endif
-      #if CYTHON_FAST_PYCCALL
-      if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
-        PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
-        __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error)
-        __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
-        __Pyx_GOTREF(__pyx_t_1);
-        __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-      } else
-      #endif
-      {
-        __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 493, __pyx_L3_error)
-        __Pyx_GOTREF(__pyx_t_9);
-        if (__pyx_t_7) {
-          __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
-        }
-        __Pyx_GIVEREF(__pyx_t_6);
-        PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
-        __Pyx_INCREF(__pyx_v_bytesitem);
-        __Pyx_GIVEREF(__pyx_v_bytesitem);
-        PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
-        __pyx_t_6 = 0;
-        __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error)
-        __Pyx_GOTREF(__pyx_t_1);
-        __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-      }
-      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-      __pyx_v_result = __pyx_t_1;
-      __pyx_t_1 = 0;
-
-      /* "View.MemoryView":492
- * 
- *         bytesitem = itemp[:self.view.itemsize]
- *         try:             # <<<<<<<<<<<<<<
- *             result = struct.unpack(self.view.format, bytesitem)
- *         except struct.error:
- */
-    }
-
-    /* "View.MemoryView":497
- *             raise ValueError("Unable to convert item to object")
- *         else:
- *             if len(self.view.format) == 1:             # <<<<<<<<<<<<<<
- *                 return result[0]
- *             return result
- */
-    /*else:*/ {
-      __pyx_t_10 = strlen(__pyx_v_self->view.format); 
-      __pyx_t_11 = ((__pyx_t_10 == 1) != 0);
-      if (__pyx_t_11) {
-
-        /* "View.MemoryView":498
- *         else:
- *             if len(self.view.format) == 1:
- *                 return result[0]             # <<<<<<<<<<<<<<
- *             return result
- * 
- */
-        __Pyx_XDECREF(__pyx_r);
-        __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 498, __pyx_L5_except_error)
-        __Pyx_GOTREF(__pyx_t_1);
-        __pyx_r = __pyx_t_1;
-        __pyx_t_1 = 0;
-        goto __pyx_L6_except_return;
-
-        /* "View.MemoryView":497
- *             raise ValueError("Unable to convert item to object")
- *         else:
- *             if len(self.view.format) == 1:             # <<<<<<<<<<<<<<
- *                 return result[0]
- *             return result
- */
-      }
-
-      /* "View.MemoryView":499
- *             if len(self.view.format) == 1:
- *                 return result[0]
- *             return result             # <<<<<<<<<<<<<<
- * 
- *     cdef assign_item_from_object(self, char *itemp, object value):
- */
-      __Pyx_XDECREF(__pyx_r);
-      __Pyx_INCREF(__pyx_v_result);
-      __pyx_r = __pyx_v_result;
-      goto __pyx_L6_except_return;
-    }
-    __pyx_L3_error:;
-    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
-    __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
-    __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
-
-    /* "View.MemoryView":494
- *         try:
- *             result = struct.unpack(self.view.format, bytesitem)
- *         except struct.error:             # <<<<<<<<<<<<<<
- *             raise ValueError("Unable to convert item to object")
- *         else:
- */
-    __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9);
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 494, __pyx_L5_except_error)
-    __Pyx_GOTREF(__pyx_t_6);
-    __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6);
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9);
-    __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0;
-    if (__pyx_t_8) {
-      __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
-      if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(2, 494, __pyx_L5_except_error)
-      __Pyx_GOTREF(__pyx_t_9);
-      __Pyx_GOTREF(__pyx_t_5);
-      __Pyx_GOTREF(__pyx_t_1);
-
-      /* "View.MemoryView":495
- *             result = struct.unpack(self.view.format, bytesitem)
- *         except struct.error:
- *             raise ValueError("Unable to convert item to object")             # <<<<<<<<<<<<<<
- *         else:
- *             if len(self.view.format) == 1:
- */
-      __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 495, __pyx_L5_except_error)
-      __Pyx_GOTREF(__pyx_t_6);
-      __Pyx_Raise(__pyx_t_6, 0, 0, 0);
-      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-      __PYX_ERR(2, 495, __pyx_L5_except_error)
-    }
-    goto __pyx_L5_except_error;
-    __pyx_L5_except_error:;
-
-    /* "View.MemoryView":492
- * 
- *         bytesitem = itemp[:self.view.itemsize]
- *         try:             # <<<<<<<<<<<<<<
- *             result = struct.unpack(self.view.format, bytesitem)
- *         except struct.error:
- */
-    __Pyx_XGIVEREF(__pyx_t_2);
-    __Pyx_XGIVEREF(__pyx_t_3);
-    __Pyx_XGIVEREF(__pyx_t_4);
-    __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
-    goto __pyx_L1_error;
-    __pyx_L6_except_return:;
-    __Pyx_XGIVEREF(__pyx_t_2);
-    __Pyx_XGIVEREF(__pyx_t_3);
-    __Pyx_XGIVEREF(__pyx_t_4);
-    __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
-    goto __pyx_L0;
-  }
-
-  /* "View.MemoryView":485
- *         self.assign_item_from_object(itemp, value)
- * 
- *     cdef convert_item_to_object(self, char *itemp):             # <<<<<<<<<<<<<<
- *         """Only used if instantiated manually by the user, or if Cython doesn't
- *         know how to convert the type"""
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_7);
-  __Pyx_XDECREF(__pyx_t_9);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_struct);
-  __Pyx_XDECREF(__pyx_v_bytesitem);
-  __Pyx_XDECREF(__pyx_v_result);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":501
- *             return result
- * 
- *     cdef assign_item_from_object(self, char *itemp, object value):             # <<<<<<<<<<<<<<
- *         """Only used if instantiated manually by the user, or if Cython doesn't
- *         know how to convert the type"""
- */
-
-static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
-  PyObject *__pyx_v_struct = NULL;
-  char __pyx_v_c;
-  PyObject *__pyx_v_bytesvalue = 0;
-  Py_ssize_t __pyx_v_i;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_t_2;
-  int __pyx_t_3;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  PyObject *__pyx_t_6 = NULL;
-  int __pyx_t_7;
-  PyObject *__pyx_t_8 = NULL;
-  Py_ssize_t __pyx_t_9;
-  PyObject *__pyx_t_10 = NULL;
-  char *__pyx_t_11;
-  char *__pyx_t_12;
-  char *__pyx_t_13;
-  char *__pyx_t_14;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("assign_item_from_object", 0);
-
-  /* "View.MemoryView":504
- *         """Only used if instantiated manually by the user, or if Cython doesn't
- *         know how to convert the type"""
- *         import struct             # <<<<<<<<<<<<<<
- *         cdef char c
- *         cdef bytes bytesvalue
- */
-  __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 504, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_v_struct = __pyx_t_1;
-  __pyx_t_1 = 0;
-
-  /* "View.MemoryView":509
- *         cdef Py_ssize_t i
- * 
- *         if isinstance(value, tuple):             # <<<<<<<<<<<<<<
- *             bytesvalue = struct.pack(self.view.format, *value)
- *         else:
- */
-  __pyx_t_2 = PyTuple_Check(__pyx_v_value); 
-  __pyx_t_3 = (__pyx_t_2 != 0);
-  if (__pyx_t_3) {
-
-    /* "View.MemoryView":510
- * 
- *         if isinstance(value, tuple):
- *             bytesvalue = struct.pack(self.view.format, *value)             # <<<<<<<<<<<<<<
- *         else:
- *             bytesvalue = struct.pack(self.view.format, value)
- */
-    __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 510, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 510, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    __Pyx_GIVEREF(__pyx_t_4);
-    PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
-    __pyx_t_4 = 0;
-    __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 510, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_6);
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(2, 510, __pyx_L1_error)
-    __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
-    __pyx_t_4 = 0;
-
-    /* "View.MemoryView":509
- *         cdef Py_ssize_t i
- * 
- *         if isinstance(value, tuple):             # <<<<<<<<<<<<<<
- *             bytesvalue = struct.pack(self.view.format, *value)
- *         else:
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":512
- *             bytesvalue = struct.pack(self.view.format, *value)
- *         else:
- *             bytesvalue = struct.pack(self.view.format, value)             # <<<<<<<<<<<<<<
- * 
- *         for i, c in enumerate(bytesvalue):
- */
-  /*else*/ {
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 512, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_6);
-    __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 512, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_5 = NULL;
-    __pyx_t_7 = 0;
-    if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
-      __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
-      if (likely(__pyx_t_5)) {
-        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
-        __Pyx_INCREF(__pyx_t_5);
-        __Pyx_INCREF(function);
-        __Pyx_DECREF_SET(__pyx_t_6, function);
-        __pyx_t_7 = 1;
-      }
-    }
-    #if CYTHON_FAST_PYCALL
-    if (PyFunction_Check(__pyx_t_6)) {
-      PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
-      __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error)
-      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
-      __Pyx_GOTREF(__pyx_t_4);
-      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    } else
-    #endif
-    #if CYTHON_FAST_PYCCALL
-    if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
-      PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
-      __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error)
-      __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
-      __Pyx_GOTREF(__pyx_t_4);
-      __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    } else
-    #endif
-    {
-      __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 512, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_8);
-      if (__pyx_t_5) {
-        __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
-      }
-      __Pyx_GIVEREF(__pyx_t_1);
-      PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
-      __Pyx_INCREF(__pyx_v_value);
-      __Pyx_GIVEREF(__pyx_v_value);
-      PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
-      __pyx_t_1 = 0;
-      __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_4);
-      __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-    }
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(2, 512, __pyx_L1_error)
-    __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
-    __pyx_t_4 = 0;
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":514
- *             bytesvalue = struct.pack(self.view.format, value)
- * 
- *         for i, c in enumerate(bytesvalue):             # <<<<<<<<<<<<<<
- *             itemp[i] = c
- * 
- */
-  __pyx_t_9 = 0;
-  if (unlikely(__pyx_v_bytesvalue == Py_None)) {
-    PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
-    __PYX_ERR(2, 514, __pyx_L1_error)
-  }
-  __Pyx_INCREF(__pyx_v_bytesvalue);
-  __pyx_t_10 = __pyx_v_bytesvalue;
-  __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
-  __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
-  for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
-    __pyx_t_11 = __pyx_t_14;
-    __pyx_v_c = (__pyx_t_11[0]);
-
-    /* "View.MemoryView":515
- * 
- *         for i, c in enumerate(bytesvalue):
- *             itemp[i] = c             # <<<<<<<<<<<<<<
- * 
- *     @cname('getbuffer')
- */
-    __pyx_v_i = __pyx_t_9;
-
-    /* "View.MemoryView":514
- *             bytesvalue = struct.pack(self.view.format, value)
- * 
- *         for i, c in enumerate(bytesvalue):             # <<<<<<<<<<<<<<
- *             itemp[i] = c
- * 
- */
-    __pyx_t_9 = (__pyx_t_9 + 1);
-
-    /* "View.MemoryView":515
- * 
- *         for i, c in enumerate(bytesvalue):
- *             itemp[i] = c             # <<<<<<<<<<<<<<
- * 
- *     @cname('getbuffer')
- */
-    (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
-  }
-  __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
-
-  /* "View.MemoryView":501
- *             return result
- * 
- *     cdef assign_item_from_object(self, char *itemp, object value):             # <<<<<<<<<<<<<<
- *         """Only used if instantiated manually by the user, or if Cython doesn't
- *         know how to convert the type"""
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_8);
-  __Pyx_XDECREF(__pyx_t_10);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_struct);
-  __Pyx_XDECREF(__pyx_v_bytesvalue);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":518
- * 
- *     @cname('getbuffer')
- *     def __getbuffer__(self, Py_buffer *info, int flags):             # <<<<<<<<<<<<<<
- *         if flags & PyBUF_WRITABLE and self.view.readonly:
- *             raise ValueError("Cannot create writable memory view from read-only memoryview")
- */
-
-/* Python wrapper */
-static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
-static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
-  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  int __pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  Py_ssize_t *__pyx_t_4;
-  char *__pyx_t_5;
-  void *__pyx_t_6;
-  int __pyx_t_7;
-  Py_ssize_t __pyx_t_8;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  if (__pyx_v_info == NULL) {
-    PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
-    return -1;
-  }
-  __Pyx_RefNannySetupContext("__getbuffer__", 0);
-  __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
-  __Pyx_GIVEREF(__pyx_v_info->obj);
-
-  /* "View.MemoryView":519
- *     @cname('getbuffer')
- *     def __getbuffer__(self, Py_buffer *info, int flags):
- *         if flags & PyBUF_WRITABLE and self.view.readonly:             # <<<<<<<<<<<<<<
- *             raise ValueError("Cannot create writable memory view from read-only memoryview")
- * 
- */
-  __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
-  if (__pyx_t_2) {
-  } else {
-    __pyx_t_1 = __pyx_t_2;
-    goto __pyx_L4_bool_binop_done;
-  }
-  __pyx_t_2 = (__pyx_v_self->view.readonly != 0);
-  __pyx_t_1 = __pyx_t_2;
-  __pyx_L4_bool_binop_done:;
-  if (unlikely(__pyx_t_1)) {
-
-    /* "View.MemoryView":520
- *     def __getbuffer__(self, Py_buffer *info, int flags):
- *         if flags & PyBUF_WRITABLE and self.view.readonly:
- *             raise ValueError("Cannot create writable memory view from read-only memoryview")             # <<<<<<<<<<<<<<
- * 
- *         if flags & PyBUF_ND:
- */
-    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 520, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __PYX_ERR(2, 520, __pyx_L1_error)
-
-    /* "View.MemoryView":519
- *     @cname('getbuffer')
- *     def __getbuffer__(self, Py_buffer *info, int flags):
- *         if flags & PyBUF_WRITABLE and self.view.readonly:             # <<<<<<<<<<<<<<
- *             raise ValueError("Cannot create writable memory view from read-only memoryview")
- * 
- */
-  }
-
-  /* "View.MemoryView":522
- *             raise ValueError("Cannot create writable memory view from read-only memoryview")
- * 
- *         if flags & PyBUF_ND:             # <<<<<<<<<<<<<<
- *             info.shape = self.view.shape
- *         else:
- */
-  __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":523
- * 
- *         if flags & PyBUF_ND:
- *             info.shape = self.view.shape             # <<<<<<<<<<<<<<
- *         else:
- *             info.shape = NULL
- */
-    __pyx_t_4 = __pyx_v_self->view.shape;
-    __pyx_v_info->shape = __pyx_t_4;
-
-    /* "View.MemoryView":522
- *             raise ValueError("Cannot create writable memory view from read-only memoryview")
- * 
- *         if flags & PyBUF_ND:             # <<<<<<<<<<<<<<
- *             info.shape = self.view.shape
- *         else:
- */
-    goto __pyx_L6;
-  }
-
-  /* "View.MemoryView":525
- *             info.shape = self.view.shape
- *         else:
- *             info.shape = NULL             # <<<<<<<<<<<<<<
- * 
- *         if flags & PyBUF_STRIDES:
- */
-  /*else*/ {
-    __pyx_v_info->shape = NULL;
-  }
-  __pyx_L6:;
-
-  /* "View.MemoryView":527
- *             info.shape = NULL
- * 
- *         if flags & PyBUF_STRIDES:             # <<<<<<<<<<<<<<
- *             info.strides = self.view.strides
- *         else:
- */
-  __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":528
- * 
- *         if flags & PyBUF_STRIDES:
- *             info.strides = self.view.strides             # <<<<<<<<<<<<<<
- *         else:
- *             info.strides = NULL
- */
-    __pyx_t_4 = __pyx_v_self->view.strides;
-    __pyx_v_info->strides = __pyx_t_4;
-
-    /* "View.MemoryView":527
- *             info.shape = NULL
- * 
- *         if flags & PyBUF_STRIDES:             # <<<<<<<<<<<<<<
- *             info.strides = self.view.strides
- *         else:
- */
-    goto __pyx_L7;
-  }
-
-  /* "View.MemoryView":530
- *             info.strides = self.view.strides
- *         else:
- *             info.strides = NULL             # <<<<<<<<<<<<<<
- * 
- *         if flags & PyBUF_INDIRECT:
- */
-  /*else*/ {
-    __pyx_v_info->strides = NULL;
-  }
-  __pyx_L7:;
-
-  /* "View.MemoryView":532
- *             info.strides = NULL
- * 
- *         if flags & PyBUF_INDIRECT:             # <<<<<<<<<<<<<<
- *             info.suboffsets = self.view.suboffsets
- *         else:
- */
-  __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":533
- * 
- *         if flags & PyBUF_INDIRECT:
- *             info.suboffsets = self.view.suboffsets             # <<<<<<<<<<<<<<
- *         else:
- *             info.suboffsets = NULL
- */
-    __pyx_t_4 = __pyx_v_self->view.suboffsets;
-    __pyx_v_info->suboffsets = __pyx_t_4;
-
-    /* "View.MemoryView":532
- *             info.strides = NULL
- * 
- *         if flags & PyBUF_INDIRECT:             # <<<<<<<<<<<<<<
- *             info.suboffsets = self.view.suboffsets
- *         else:
- */
-    goto __pyx_L8;
-  }
-
-  /* "View.MemoryView":535
- *             info.suboffsets = self.view.suboffsets
- *         else:
- *             info.suboffsets = NULL             # <<<<<<<<<<<<<<
- * 
- *         if flags & PyBUF_FORMAT:
- */
-  /*else*/ {
-    __pyx_v_info->suboffsets = NULL;
-  }
-  __pyx_L8:;
-
-  /* "View.MemoryView":537
- *             info.suboffsets = NULL
- * 
- *         if flags & PyBUF_FORMAT:             # <<<<<<<<<<<<<<
- *             info.format = self.view.format
- *         else:
- */
-  __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":538
- * 
- *         if flags & PyBUF_FORMAT:
- *             info.format = self.view.format             # <<<<<<<<<<<<<<
- *         else:
- *             info.format = NULL
- */
-    __pyx_t_5 = __pyx_v_self->view.format;
-    __pyx_v_info->format = __pyx_t_5;
-
-    /* "View.MemoryView":537
- *             info.suboffsets = NULL
- * 
- *         if flags & PyBUF_FORMAT:             # <<<<<<<<<<<<<<
- *             info.format = self.view.format
- *         else:
- */
-    goto __pyx_L9;
-  }
-
-  /* "View.MemoryView":540
- *             info.format = self.view.format
- *         else:
- *             info.format = NULL             # <<<<<<<<<<<<<<
- * 
- *         info.buf = self.view.buf
- */
-  /*else*/ {
-    __pyx_v_info->format = NULL;
-  }
-  __pyx_L9:;
-
-  /* "View.MemoryView":542
- *             info.format = NULL
- * 
- *         info.buf = self.view.buf             # <<<<<<<<<<<<<<
- *         info.ndim = self.view.ndim
- *         info.itemsize = self.view.itemsize
- */
-  __pyx_t_6 = __pyx_v_self->view.buf;
-  __pyx_v_info->buf = __pyx_t_6;
-
-  /* "View.MemoryView":543
- * 
- *         info.buf = self.view.buf
- *         info.ndim = self.view.ndim             # <<<<<<<<<<<<<<
- *         info.itemsize = self.view.itemsize
- *         info.len = self.view.len
- */
-  __pyx_t_7 = __pyx_v_self->view.ndim;
-  __pyx_v_info->ndim = __pyx_t_7;
-
-  /* "View.MemoryView":544
- *         info.buf = self.view.buf
- *         info.ndim = self.view.ndim
- *         info.itemsize = self.view.itemsize             # <<<<<<<<<<<<<<
- *         info.len = self.view.len
- *         info.readonly = self.view.readonly
- */
-  __pyx_t_8 = __pyx_v_self->view.itemsize;
-  __pyx_v_info->itemsize = __pyx_t_8;
-
-  /* "View.MemoryView":545
- *         info.ndim = self.view.ndim
- *         info.itemsize = self.view.itemsize
- *         info.len = self.view.len             # <<<<<<<<<<<<<<
- *         info.readonly = self.view.readonly
- *         info.obj = self
- */
-  __pyx_t_8 = __pyx_v_self->view.len;
-  __pyx_v_info->len = __pyx_t_8;
-
-  /* "View.MemoryView":546
- *         info.itemsize = self.view.itemsize
- *         info.len = self.view.len
- *         info.readonly = self.view.readonly             # <<<<<<<<<<<<<<
- *         info.obj = self
- * 
- */
-  __pyx_t_1 = __pyx_v_self->view.readonly;
-  __pyx_v_info->readonly = __pyx_t_1;
-
-  /* "View.MemoryView":547
- *         info.len = self.view.len
- *         info.readonly = self.view.readonly
- *         info.obj = self             # <<<<<<<<<<<<<<
- * 
- *     __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
- */
-  __Pyx_INCREF(((PyObject *)__pyx_v_self));
-  __Pyx_GIVEREF(((PyObject *)__pyx_v_self));
-  __Pyx_GOTREF(__pyx_v_info->obj);
-  __Pyx_DECREF(__pyx_v_info->obj);
-  __pyx_v_info->obj = ((PyObject *)__pyx_v_self);
-
-  /* "View.MemoryView":518
- * 
- *     @cname('getbuffer')
- *     def __getbuffer__(self, Py_buffer *info, int flags):             # <<<<<<<<<<<<<<
- *         if flags & PyBUF_WRITABLE and self.view.readonly:
- *             raise ValueError("Cannot create writable memory view from read-only memoryview")
- */
-
-  /* function exit code */
-  __pyx_r = 0;
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = -1;
-  if (__pyx_v_info->obj != NULL) {
-    __Pyx_GOTREF(__pyx_v_info->obj);
-    __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
-  }
-  goto __pyx_L2;
-  __pyx_L0:;
-  if (__pyx_v_info->obj == Py_None) {
-    __Pyx_GOTREF(__pyx_v_info->obj);
-    __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
-  }
-  __pyx_L2:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":553
- * 
- *     @property
- *     def T(self):             # <<<<<<<<<<<<<<
- *         cdef _memoryviewslice result = memoryview_copy(self)
- *         transpose_memslice(&result.from_slice)
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
-  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
-  struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_t_2;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__get__", 0);
-
-  /* "View.MemoryView":554
- *     @property
- *     def T(self):
- *         cdef _memoryviewslice result = memoryview_copy(self)             # <<<<<<<<<<<<<<
- *         transpose_memslice(&result.from_slice)
- *         return result
- */
-  __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 554, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(2, 554, __pyx_L1_error)
-  __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
-  __pyx_t_1 = 0;
-
-  /* "View.MemoryView":555
- *     def T(self):
- *         cdef _memoryviewslice result = memoryview_copy(self)
- *         transpose_memslice(&result.from_slice)             # <<<<<<<<<<<<<<
- *         return result
- * 
- */
-  __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(2, 555, __pyx_L1_error)
-
-  /* "View.MemoryView":556
- *         cdef _memoryviewslice result = memoryview_copy(self)
- *         transpose_memslice(&result.from_slice)
- *         return result             # <<<<<<<<<<<<<<
- * 
- *     @property
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_INCREF(((PyObject *)__pyx_v_result));
-  __pyx_r = ((PyObject *)__pyx_v_result);
-  goto __pyx_L0;
-
-  /* "View.MemoryView":553
- * 
- *     @property
- *     def T(self):             # <<<<<<<<<<<<<<
- *         cdef _memoryviewslice result = memoryview_copy(self)
- *         transpose_memslice(&result.from_slice)
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF((PyObject *)__pyx_v_result);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":559
- * 
- *     @property
- *     def base(self):             # <<<<<<<<<<<<<<
- *         return self.obj
- * 
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
-  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__get__", 0);
-
-  /* "View.MemoryView":560
- *     @property
- *     def base(self):
- *         return self.obj             # <<<<<<<<<<<<<<
- * 
- *     @property
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_INCREF(__pyx_v_self->obj);
-  __pyx_r = __pyx_v_self->obj;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":559
- * 
- *     @property
- *     def base(self):             # <<<<<<<<<<<<<<
- *         return self.obj
- * 
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":563
- * 
- *     @property
- *     def shape(self):             # <<<<<<<<<<<<<<
- *         return tuple([length for length in self.view.shape[:self.view.ndim]])
- * 
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
-  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
-  Py_ssize_t __pyx_v_length;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  Py_ssize_t *__pyx_t_2;
-  Py_ssize_t *__pyx_t_3;
-  Py_ssize_t *__pyx_t_4;
-  PyObject *__pyx_t_5 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__get__", 0);
-
-  /* "View.MemoryView":564
- *     @property
- *     def shape(self):
- *         return tuple([length for length in self.view.shape[:self.view.ndim]])             # <<<<<<<<<<<<<<
- * 
- *     @property
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 564, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
-  for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
-    __pyx_t_2 = __pyx_t_4;
-    __pyx_v_length = (__pyx_t_2[0]);
-    __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 564, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_5);
-    if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(2, 564, __pyx_L1_error)
-    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-  }
-  __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 564, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_r = __pyx_t_5;
-  __pyx_t_5 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":563
- * 
- *     @property
- *     def shape(self):             # <<<<<<<<<<<<<<
- *         return tuple([length for length in self.view.shape[:self.view.ndim]])
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":567
- * 
- *     @property
- *     def strides(self):             # <<<<<<<<<<<<<<
- *         if self.view.strides == NULL:
- * 
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
-  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
-  Py_ssize_t __pyx_v_stride;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  Py_ssize_t *__pyx_t_3;
-  Py_ssize_t *__pyx_t_4;
-  Py_ssize_t *__pyx_t_5;
-  PyObject *__pyx_t_6 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__get__", 0);
-
-  /* "View.MemoryView":568
- *     @property
- *     def strides(self):
- *         if self.view.strides == NULL:             # <<<<<<<<<<<<<<
- * 
- *             raise ValueError("Buffer view does not expose strides")
- */
-  __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
-  if (unlikely(__pyx_t_1)) {
-
-    /* "View.MemoryView":570
- *         if self.view.strides == NULL:
- * 
- *             raise ValueError("Buffer view does not expose strides")             # <<<<<<<<<<<<<<
- * 
- *         return tuple([stride for stride in self.view.strides[:self.view.ndim]])
- */
-    __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 570, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __PYX_ERR(2, 570, __pyx_L1_error)
-
-    /* "View.MemoryView":568
- *     @property
- *     def strides(self):
- *         if self.view.strides == NULL:             # <<<<<<<<<<<<<<
- * 
- *             raise ValueError("Buffer view does not expose strides")
- */
-  }
-
-  /* "View.MemoryView":572
- *             raise ValueError("Buffer view does not expose strides")
- * 
- *         return tuple([stride for stride in self.view.strides[:self.view.ndim]])             # <<<<<<<<<<<<<<
- * 
- *     @property
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 572, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
-  for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
-    __pyx_t_3 = __pyx_t_5;
-    __pyx_v_stride = (__pyx_t_3[0]);
-    __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 572, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_6);
-    if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(2, 572, __pyx_L1_error)
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-  }
-  __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 572, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_6);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_r = __pyx_t_6;
-  __pyx_t_6 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":567
- * 
- *     @property
- *     def strides(self):             # <<<<<<<<<<<<<<
- *         if self.view.strides == NULL:
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":575
- * 
- *     @property
- *     def suboffsets(self):             # <<<<<<<<<<<<<<
- *         if self.view.suboffsets == NULL:
- *             return (-1,) * self.view.ndim
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
-  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
-  Py_ssize_t __pyx_v_suboffset;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  Py_ssize_t *__pyx_t_4;
-  Py_ssize_t *__pyx_t_5;
-  Py_ssize_t *__pyx_t_6;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__get__", 0);
-
-  /* "View.MemoryView":576
- *     @property
- *     def suboffsets(self):
- *         if self.view.suboffsets == NULL:             # <<<<<<<<<<<<<<
- *             return (-1,) * self.view.ndim
- * 
- */
-  __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":577
- *     def suboffsets(self):
- *         if self.view.suboffsets == NULL:
- *             return (-1,) * self.view.ndim             # <<<<<<<<<<<<<<
- * 
- *         return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
- */
-    __Pyx_XDECREF(__pyx_r);
-    __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 577, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__22, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 577, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __pyx_r = __pyx_t_3;
-    __pyx_t_3 = 0;
-    goto __pyx_L0;
-
-    /* "View.MemoryView":576
- *     @property
- *     def suboffsets(self):
- *         if self.view.suboffsets == NULL:             # <<<<<<<<<<<<<<
- *             return (-1,) * self.view.ndim
- * 
- */
-  }
-
-  /* "View.MemoryView":579
- *             return (-1,) * self.view.ndim
- * 
- *         return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])             # <<<<<<<<<<<<<<
- * 
- *     @property
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 579, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
-  for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
-    __pyx_t_4 = __pyx_t_6;
-    __pyx_v_suboffset = (__pyx_t_4[0]);
-    __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 579, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(2, 579, __pyx_L1_error)
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  }
-  __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 579, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_r = __pyx_t_2;
-  __pyx_t_2 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":575
- * 
- *     @property
- *     def suboffsets(self):             # <<<<<<<<<<<<<<
- *         if self.view.suboffsets == NULL:
- *             return (-1,) * self.view.ndim
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":582
- * 
- *     @property
- *     def ndim(self):             # <<<<<<<<<<<<<<
- *         return self.view.ndim
- * 
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
-  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__get__", 0);
-
-  /* "View.MemoryView":583
- *     @property
- *     def ndim(self):
- *         return self.view.ndim             # <<<<<<<<<<<<<<
- * 
- *     @property
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 583, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":582
- * 
- *     @property
- *     def ndim(self):             # <<<<<<<<<<<<<<
- *         return self.view.ndim
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":586
- * 
- *     @property
- *     def itemsize(self):             # <<<<<<<<<<<<<<
- *         return self.view.itemsize
- * 
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
-  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__get__", 0);
-
-  /* "View.MemoryView":587
- *     @property
- *     def itemsize(self):
- *         return self.view.itemsize             # <<<<<<<<<<<<<<
- * 
- *     @property
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 587, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":586
- * 
- *     @property
- *     def itemsize(self):             # <<<<<<<<<<<<<<
- *         return self.view.itemsize
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":590
- * 
- *     @property
- *     def nbytes(self):             # <<<<<<<<<<<<<<
- *         return self.size * self.view.itemsize
- * 
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
-  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__get__", 0);
-
-  /* "View.MemoryView":591
- *     @property
- *     def nbytes(self):
- *         return self.size * self.view.itemsize             # <<<<<<<<<<<<<<
- * 
- *     @property
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 591, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 591, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 591, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_r = __pyx_t_3;
-  __pyx_t_3 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":590
- * 
- *     @property
- *     def nbytes(self):             # <<<<<<<<<<<<<<
- *         return self.size * self.view.itemsize
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":594
- * 
- *     @property
- *     def size(self):             # <<<<<<<<<<<<<<
- *         if self._size is None:
- *             result = 1
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
-  __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
-  PyObject *__pyx_v_result = NULL;
-  PyObject *__pyx_v_length = NULL;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  int __pyx_t_2;
-  Py_ssize_t *__pyx_t_3;
-  Py_ssize_t *__pyx_t_4;
-  Py_ssize_t *__pyx_t_5;
-  PyObject *__pyx_t_6 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__get__", 0);
-
-  /* "View.MemoryView":595
- *     @property
- *     def size(self):
- *         if self._size is None:             # <<<<<<<<<<<<<<
- *             result = 1
- * 
- */
-  __pyx_t_1 = (__pyx_v_self->_size == Py_None);
-  __pyx_t_2 = (__pyx_t_1 != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":596
- *     def size(self):
- *         if self._size is None:
- *             result = 1             # <<<<<<<<<<<<<<
- * 
- *             for length in self.view.shape[:self.view.ndim]:
- */
-    __Pyx_INCREF(__pyx_int_1);
-    __pyx_v_result = __pyx_int_1;
-
-    /* "View.MemoryView":598
- *             result = 1
- * 
- *             for length in self.view.shape[:self.view.ndim]:             # <<<<<<<<<<<<<<
- *                 result *= length
- * 
- */
-    __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
-    for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
-      __pyx_t_3 = __pyx_t_5;
-      __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 598, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_6);
-      __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
-      __pyx_t_6 = 0;
-
-      /* "View.MemoryView":599
- * 
- *             for length in self.view.shape[:self.view.ndim]:
- *                 result *= length             # <<<<<<<<<<<<<<
- * 
- *             self._size = result
- */
-      __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 599, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_6);
-      __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
-      __pyx_t_6 = 0;
-    }
-
-    /* "View.MemoryView":601
- *                 result *= length
- * 
- *             self._size = result             # <<<<<<<<<<<<<<
- * 
- *         return self._size
- */
-    __Pyx_INCREF(__pyx_v_result);
-    __Pyx_GIVEREF(__pyx_v_result);
-    __Pyx_GOTREF(__pyx_v_self->_size);
-    __Pyx_DECREF(__pyx_v_self->_size);
-    __pyx_v_self->_size = __pyx_v_result;
-
-    /* "View.MemoryView":595
- *     @property
- *     def size(self):
- *         if self._size is None:             # <<<<<<<<<<<<<<
- *             result = 1
- * 
- */
-  }
-
-  /* "View.MemoryView":603
- *             self._size = result
- * 
- *         return self._size             # <<<<<<<<<<<<<<
- * 
- *     def __len__(self):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_INCREF(__pyx_v_self->_size);
-  __pyx_r = __pyx_v_self->_size;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":594
- * 
- *     @property
- *     def size(self):             # <<<<<<<<<<<<<<
- *         if self._size is None:
- *             result = 1
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_result);
-  __Pyx_XDECREF(__pyx_v_length);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":605
- *         return self._size
- * 
- *     def __len__(self):             # <<<<<<<<<<<<<<
- *         if self.view.ndim >= 1:
- *             return self.view.shape[0]
- */
-
-/* Python wrapper */
-static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
-static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
-  Py_ssize_t __pyx_r;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
-  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
-  Py_ssize_t __pyx_r;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  __Pyx_RefNannySetupContext("__len__", 0);
-
-  /* "View.MemoryView":606
- * 
- *     def __len__(self):
- *         if self.view.ndim >= 1:             # <<<<<<<<<<<<<<
- *             return self.view.shape[0]
- * 
- */
-  __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":607
- *     def __len__(self):
- *         if self.view.ndim >= 1:
- *             return self.view.shape[0]             # <<<<<<<<<<<<<<
- * 
- *         return 0
- */
-    __pyx_r = (__pyx_v_self->view.shape[0]);
-    goto __pyx_L0;
-
-    /* "View.MemoryView":606
- * 
- *     def __len__(self):
- *         if self.view.ndim >= 1:             # <<<<<<<<<<<<<<
- *             return self.view.shape[0]
- * 
- */
-  }
-
-  /* "View.MemoryView":609
- *             return self.view.shape[0]
- * 
- *         return 0             # <<<<<<<<<<<<<<
- * 
- *     def __repr__(self):
- */
-  __pyx_r = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":605
- *         return self._size
- * 
- *     def __len__(self):             # <<<<<<<<<<<<<<
- *         if self.view.ndim >= 1:
- *             return self.view.shape[0]
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":611
- *         return 0
- * 
- *     def __repr__(self):             # <<<<<<<<<<<<<<
- *         return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
- *                                                id(self))
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
-  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__repr__", 0);
-
-  /* "View.MemoryView":612
- * 
- *     def __repr__(self):
- *         return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,             # <<<<<<<<<<<<<<
- *                                                id(self))
- * 
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 612, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 612, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 612, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
-  /* "View.MemoryView":613
- *     def __repr__(self):
- *         return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
- *                                                id(self))             # <<<<<<<<<<<<<<
- * 
- *     def __str__(self):
- */
-  __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 613, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-
-  /* "View.MemoryView":612
- * 
- *     def __repr__(self):
- *         return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,             # <<<<<<<<<<<<<<
- *                                                id(self))
- * 
- */
-  __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 612, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_2);
-  PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
-  __pyx_t_1 = 0;
-  __pyx_t_2 = 0;
-  __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 612, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_r = __pyx_t_2;
-  __pyx_t_2 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":611
- *         return 0
- * 
- *     def __repr__(self):             # <<<<<<<<<<<<<<
- *         return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
- *                                                id(self))
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":615
- *                                                id(self))
- * 
- *     def __str__(self):             # <<<<<<<<<<<<<<
- *         return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
- * 
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
-  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__str__", 0);
-
-  /* "View.MemoryView":616
- * 
- *     def __str__(self):
- *         return "<MemoryView of %r object>" % (self.base.__class__.__name__,)             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 616, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 616, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":615
- *                                                id(self))
- * 
- *     def __str__(self):             # <<<<<<<<<<<<<<
- *         return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":619
- * 
- * 
- *     def is_c_contig(self):             # <<<<<<<<<<<<<<
- *         cdef __Pyx_memviewslice *mslice
- *         cdef __Pyx_memviewslice tmp
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
-static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
-  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
-  __Pyx_memviewslice *__pyx_v_mslice;
-  __Pyx_memviewslice __pyx_v_tmp;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  __Pyx_memviewslice *__pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("is_c_contig", 0);
-
-  /* "View.MemoryView":622
- *         cdef __Pyx_memviewslice *mslice
- *         cdef __Pyx_memviewslice tmp
- *         mslice = get_slice_from_memview(self, &tmp)             # <<<<<<<<<<<<<<
- *         return slice_is_contig(mslice[0], 'C', self.view.ndim)
- * 
- */
-  __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 622, __pyx_L1_error)
-  __pyx_v_mslice = __pyx_t_1;
-
-  /* "View.MemoryView":623
- *         cdef __Pyx_memviewslice tmp
- *         mslice = get_slice_from_memview(self, &tmp)
- *         return slice_is_contig(mslice[0], 'C', self.view.ndim)             # <<<<<<<<<<<<<<
- * 
- *     def is_f_contig(self):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 623, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_r = __pyx_t_2;
-  __pyx_t_2 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":619
- * 
- * 
- *     def is_c_contig(self):             # <<<<<<<<<<<<<<
- *         cdef __Pyx_memviewslice *mslice
- *         cdef __Pyx_memviewslice tmp
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":625
- *         return slice_is_contig(mslice[0], 'C', self.view.ndim)
- * 
- *     def is_f_contig(self):             # <<<<<<<<<<<<<<
- *         cdef __Pyx_memviewslice *mslice
- *         cdef __Pyx_memviewslice tmp
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
-static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
-  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
-  __Pyx_memviewslice *__pyx_v_mslice;
-  __Pyx_memviewslice __pyx_v_tmp;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  __Pyx_memviewslice *__pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("is_f_contig", 0);
-
-  /* "View.MemoryView":628
- *         cdef __Pyx_memviewslice *mslice
- *         cdef __Pyx_memviewslice tmp
- *         mslice = get_slice_from_memview(self, &tmp)             # <<<<<<<<<<<<<<
- *         return slice_is_contig(mslice[0], 'F', self.view.ndim)
- * 
- */
-  __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 628, __pyx_L1_error)
-  __pyx_v_mslice = __pyx_t_1;
-
-  /* "View.MemoryView":629
- *         cdef __Pyx_memviewslice tmp
- *         mslice = get_slice_from_memview(self, &tmp)
- *         return slice_is_contig(mslice[0], 'F', self.view.ndim)             # <<<<<<<<<<<<<<
- * 
- *     def copy(self):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 629, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_r = __pyx_t_2;
-  __pyx_t_2 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":625
- *         return slice_is_contig(mslice[0], 'C', self.view.ndim)
- * 
- *     def is_f_contig(self):             # <<<<<<<<<<<<<<
- *         cdef __Pyx_memviewslice *mslice
- *         cdef __Pyx_memviewslice tmp
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":631
- *         return slice_is_contig(mslice[0], 'F', self.view.ndim)
- * 
- *     def copy(self):             # <<<<<<<<<<<<<<
- *         cdef __Pyx_memviewslice mslice
- *         cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
-static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("copy (wrapper)", 0);
-  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
-  __Pyx_memviewslice __pyx_v_mslice;
-  int __pyx_v_flags;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  __Pyx_memviewslice __pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("copy", 0);
-
-  /* "View.MemoryView":633
- *     def copy(self):
- *         cdef __Pyx_memviewslice mslice
- *         cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS             # <<<<<<<<<<<<<<
- * 
- *         slice_copy(self, &mslice)
- */
-  __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
-
-  /* "View.MemoryView":635
- *         cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
- * 
- *         slice_copy(self, &mslice)             # <<<<<<<<<<<<<<
- *         mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
- *                                    self.view.itemsize,
- */
-  __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
-
-  /* "View.MemoryView":636
- * 
- *         slice_copy(self, &mslice)
- *         mslice = slice_copy_contig(&mslice, "c", self.view.ndim,             # <<<<<<<<<<<<<<
- *                                    self.view.itemsize,
- *                                    flags|PyBUF_C_CONTIGUOUS,
- */
-  __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(2, 636, __pyx_L1_error)
-  __pyx_v_mslice = __pyx_t_1;
-
-  /* "View.MemoryView":641
- *                                    self.dtype_is_object)
- * 
- *         return memoryview_copy_from_slice(self, &mslice)             # <<<<<<<<<<<<<<
- * 
- *     def copy_fortran(self):
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 641, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_r = __pyx_t_2;
-  __pyx_t_2 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":631
- *         return slice_is_contig(mslice[0], 'F', self.view.ndim)
- * 
- *     def copy(self):             # <<<<<<<<<<<<<<
- *         cdef __Pyx_memviewslice mslice
- *         cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":643
- *         return memoryview_copy_from_slice(self, &mslice)
- * 
- *     def copy_fortran(self):             # <<<<<<<<<<<<<<
- *         cdef __Pyx_memviewslice src, dst
- *         cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
-static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
-  __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
-  __Pyx_memviewslice __pyx_v_src;
-  __Pyx_memviewslice __pyx_v_dst;
-  int __pyx_v_flags;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  __Pyx_memviewslice __pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("copy_fortran", 0);
-
-  /* "View.MemoryView":645
- *     def copy_fortran(self):
- *         cdef __Pyx_memviewslice src, dst
- *         cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS             # <<<<<<<<<<<<<<
- * 
- *         slice_copy(self, &src)
- */
-  __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
-
-  /* "View.MemoryView":647
- *         cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
- * 
- *         slice_copy(self, &src)             # <<<<<<<<<<<<<<
- *         dst = slice_copy_contig(&src, "fortran", self.view.ndim,
- *                                 self.view.itemsize,
- */
-  __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
-
-  /* "View.MemoryView":648
- * 
- *         slice_copy(self, &src)
- *         dst = slice_copy_contig(&src, "fortran", self.view.ndim,             # <<<<<<<<<<<<<<
- *                                 self.view.itemsize,
- *                                 flags|PyBUF_F_CONTIGUOUS,
- */
-  __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(2, 648, __pyx_L1_error)
-  __pyx_v_dst = __pyx_t_1;
-
-  /* "View.MemoryView":653
- *                                 self.dtype_is_object)
- * 
- *         return memoryview_copy_from_slice(self, &dst)             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 653, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_r = __pyx_t_2;
-  __pyx_t_2 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":643
- *         return memoryview_copy_from_slice(self, &mslice)
- * 
- *     def copy_fortran(self):             # <<<<<<<<<<<<<<
- *         cdef __Pyx_memviewslice src, dst
- *         cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "(tree fragment)":1
- * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
-static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
-  __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__reduce_cython__", 0);
-
-  /* "(tree fragment)":2
- * def __reduce_cython__(self):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")             # <<<<<<<<<<<<<<
- * def __setstate_cython__(self, __pyx_state):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- */
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_Raise(__pyx_t_1, 0, 0, 0);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __PYX_ERR(2, 2, __pyx_L1_error)
-
-  /* "(tree fragment)":1
- * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "(tree fragment)":3
- * def __reduce_cython__(self):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
-static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
-  __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__setstate_cython__", 0);
-
-  /* "(tree fragment)":4
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")             # <<<<<<<<<<<<<<
- */
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_Raise(__pyx_t_1, 0, 0, 0);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __PYX_ERR(2, 4, __pyx_L1_error)
-
-  /* "(tree fragment)":3
- * def __reduce_cython__(self):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":657
- * 
- * @cname('__pyx_memoryview_new')
- * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):             # <<<<<<<<<<<<<<
- *     cdef memoryview result = memoryview(o, flags, dtype_is_object)
- *     result.typeinfo = typeinfo
- */
-
-static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
-  struct __pyx_memoryview_obj *__pyx_v_result = 0;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
-
-  /* "View.MemoryView":658
- * @cname('__pyx_memoryview_new')
- * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
- *     cdef memoryview result = memoryview(o, flags, dtype_is_object)             # <<<<<<<<<<<<<<
- *     result.typeinfo = typeinfo
- *     return result
- */
-  __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 658, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 658, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 658, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_INCREF(__pyx_v_o);
-  __Pyx_GIVEREF(__pyx_v_o);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_2);
-  PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
-  __pyx_t_1 = 0;
-  __pyx_t_2 = 0;
-  __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 658, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
-  __pyx_t_2 = 0;
-
-  /* "View.MemoryView":659
- * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
- *     cdef memoryview result = memoryview(o, flags, dtype_is_object)
- *     result.typeinfo = typeinfo             # <<<<<<<<<<<<<<
- *     return result
- * 
- */
-  __pyx_v_result->typeinfo = __pyx_v_typeinfo;
-
-  /* "View.MemoryView":660
- *     cdef memoryview result = memoryview(o, flags, dtype_is_object)
- *     result.typeinfo = typeinfo
- *     return result             # <<<<<<<<<<<<<<
- * 
- * @cname('__pyx_memoryview_check')
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_INCREF(((PyObject *)__pyx_v_result));
-  __pyx_r = ((PyObject *)__pyx_v_result);
-  goto __pyx_L0;
-
-  /* "View.MemoryView":657
- * 
- * @cname('__pyx_memoryview_new')
- * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):             # <<<<<<<<<<<<<<
- *     cdef memoryview result = memoryview(o, flags, dtype_is_object)
- *     result.typeinfo = typeinfo
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XDECREF((PyObject *)__pyx_v_result);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":663
- * 
- * @cname('__pyx_memoryview_check')
- * cdef inline bint memoryview_check(object o):             # <<<<<<<<<<<<<<
- *     return isinstance(o, memoryview)
- * 
- */
-
-static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  __Pyx_RefNannySetupContext("memoryview_check", 0);
-
-  /* "View.MemoryView":664
- * @cname('__pyx_memoryview_check')
- * cdef inline bint memoryview_check(object o):
- *     return isinstance(o, memoryview)             # <<<<<<<<<<<<<<
- * 
- * cdef tuple _unellipsify(object index, int ndim):
- */
-  __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); 
-  __pyx_r = __pyx_t_1;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":663
- * 
- * @cname('__pyx_memoryview_check')
- * cdef inline bint memoryview_check(object o):             # <<<<<<<<<<<<<<
- *     return isinstance(o, memoryview)
- * 
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":666
- *     return isinstance(o, memoryview)
- * 
- * cdef tuple _unellipsify(object index, int ndim):             # <<<<<<<<<<<<<<
- *     """
- *     Replace all ellipses with full slices and fill incomplete indices with
- */
-
-static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
-  PyObject *__pyx_v_tup = NULL;
-  PyObject *__pyx_v_result = NULL;
-  int __pyx_v_have_slices;
-  int __pyx_v_seen_ellipsis;
-  CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
-  PyObject *__pyx_v_item = NULL;
-  Py_ssize_t __pyx_v_nslices;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  int __pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  Py_ssize_t __pyx_t_5;
-  PyObject *(*__pyx_t_6)(PyObject *);
-  PyObject *__pyx_t_7 = NULL;
-  Py_ssize_t __pyx_t_8;
-  int __pyx_t_9;
-  int __pyx_t_10;
-  PyObject *__pyx_t_11 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("_unellipsify", 0);
-
-  /* "View.MemoryView":671
- *     full slices.
- *     """
- *     if not isinstance(index, tuple):             # <<<<<<<<<<<<<<
- *         tup = (index,)
- *     else:
- */
-  __pyx_t_1 = PyTuple_Check(__pyx_v_index); 
-  __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":672
- *     """
- *     if not isinstance(index, tuple):
- *         tup = (index,)             # <<<<<<<<<<<<<<
- *     else:
- *         tup = index
- */
-    __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 672, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_INCREF(__pyx_v_index);
-    __Pyx_GIVEREF(__pyx_v_index);
-    PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
-    __pyx_v_tup = __pyx_t_3;
-    __pyx_t_3 = 0;
-
-    /* "View.MemoryView":671
- *     full slices.
- *     """
- *     if not isinstance(index, tuple):             # <<<<<<<<<<<<<<
- *         tup = (index,)
- *     else:
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":674
- *         tup = (index,)
- *     else:
- *         tup = index             # <<<<<<<<<<<<<<
- * 
- *     result = []
- */
-  /*else*/ {
-    __Pyx_INCREF(__pyx_v_index);
-    __pyx_v_tup = __pyx_v_index;
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":676
- *         tup = index
- * 
- *     result = []             # <<<<<<<<<<<<<<
- *     have_slices = False
- *     seen_ellipsis = False
- */
-  __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 676, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_v_result = ((PyObject*)__pyx_t_3);
-  __pyx_t_3 = 0;
-
-  /* "View.MemoryView":677
- * 
- *     result = []
- *     have_slices = False             # <<<<<<<<<<<<<<
- *     seen_ellipsis = False
- *     for idx, item in enumerate(tup):
- */
-  __pyx_v_have_slices = 0;
-
-  /* "View.MemoryView":678
- *     result = []
- *     have_slices = False
- *     seen_ellipsis = False             # <<<<<<<<<<<<<<
- *     for idx, item in enumerate(tup):
- *         if item is Ellipsis:
- */
-  __pyx_v_seen_ellipsis = 0;
-
-  /* "View.MemoryView":679
- *     have_slices = False
- *     seen_ellipsis = False
- *     for idx, item in enumerate(tup):             # <<<<<<<<<<<<<<
- *         if item is Ellipsis:
- *             if not seen_ellipsis:
- */
-  __Pyx_INCREF(__pyx_int_0);
-  __pyx_t_3 = __pyx_int_0;
-  if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
-    __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
-    __pyx_t_6 = NULL;
-  } else {
-    __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 679, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 679, __pyx_L1_error)
-  }
-  for (;;) {
-    if (likely(!__pyx_t_6)) {
-      if (likely(PyList_CheckExact(__pyx_t_4))) {
-        if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
-        #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-        __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(2, 679, __pyx_L1_error)
-        #else
-        __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error)
-        __Pyx_GOTREF(__pyx_t_7);
-        #endif
-      } else {
-        if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
-        #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-        __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(2, 679, __pyx_L1_error)
-        #else
-        __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error)
-        __Pyx_GOTREF(__pyx_t_7);
-        #endif
-      }
-    } else {
-      __pyx_t_7 = __pyx_t_6(__pyx_t_4);
-      if (unlikely(!__pyx_t_7)) {
-        PyObject* exc_type = PyErr_Occurred();
-        if (exc_type) {
-          if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-          else __PYX_ERR(2, 679, __pyx_L1_error)
-        }
-        break;
-      }
-      __Pyx_GOTREF(__pyx_t_7);
-    }
-    __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
-    __pyx_t_7 = 0;
-    __Pyx_INCREF(__pyx_t_3);
-    __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
-    __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_7);
-    __Pyx_DECREF(__pyx_t_3);
-    __pyx_t_3 = __pyx_t_7;
-    __pyx_t_7 = 0;
-
-    /* "View.MemoryView":680
- *     seen_ellipsis = False
- *     for idx, item in enumerate(tup):
- *         if item is Ellipsis:             # <<<<<<<<<<<<<<
- *             if not seen_ellipsis:
- *                 result.extend([slice(None)] * (ndim - len(tup) + 1))
- */
-    __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
-    __pyx_t_1 = (__pyx_t_2 != 0);
-    if (__pyx_t_1) {
-
-      /* "View.MemoryView":681
- *     for idx, item in enumerate(tup):
- *         if item is Ellipsis:
- *             if not seen_ellipsis:             # <<<<<<<<<<<<<<
- *                 result.extend([slice(None)] * (ndim - len(tup) + 1))
- *                 seen_ellipsis = True
- */
-      __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
-      if (__pyx_t_1) {
-
-        /* "View.MemoryView":682
- *         if item is Ellipsis:
- *             if not seen_ellipsis:
- *                 result.extend([slice(None)] * (ndim - len(tup) + 1))             # <<<<<<<<<<<<<<
- *                 seen_ellipsis = True
- *             else:
- */
-        __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(2, 682, __pyx_L1_error)
-        __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 682, __pyx_L1_error)
-        __Pyx_GOTREF(__pyx_t_7);
-        { Py_ssize_t __pyx_temp;
-          for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
-            __Pyx_INCREF(__pyx_slice__3);
-            __Pyx_GIVEREF(__pyx_slice__3);
-            PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__3);
-          }
-        }
-        __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 682, __pyx_L1_error)
-        __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-
-        /* "View.MemoryView":683
- *             if not seen_ellipsis:
- *                 result.extend([slice(None)] * (ndim - len(tup) + 1))
- *                 seen_ellipsis = True             # <<<<<<<<<<<<<<
- *             else:
- *                 result.append(slice(None))
- */
-        __pyx_v_seen_ellipsis = 1;
-
-        /* "View.MemoryView":681
- *     for idx, item in enumerate(tup):
- *         if item is Ellipsis:
- *             if not seen_ellipsis:             # <<<<<<<<<<<<<<
- *                 result.extend([slice(None)] * (ndim - len(tup) + 1))
- *                 seen_ellipsis = True
- */
-        goto __pyx_L7;
-      }
-
-      /* "View.MemoryView":685
- *                 seen_ellipsis = True
- *             else:
- *                 result.append(slice(None))             # <<<<<<<<<<<<<<
- *             have_slices = True
- *         else:
- */
-      /*else*/ {
-        __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 685, __pyx_L1_error)
-      }
-      __pyx_L7:;
-
-      /* "View.MemoryView":686
- *             else:
- *                 result.append(slice(None))
- *             have_slices = True             # <<<<<<<<<<<<<<
- *         else:
- *             if not isinstance(item, slice) and not PyIndex_Check(item):
- */
-      __pyx_v_have_slices = 1;
-
-      /* "View.MemoryView":680
- *     seen_ellipsis = False
- *     for idx, item in enumerate(tup):
- *         if item is Ellipsis:             # <<<<<<<<<<<<<<
- *             if not seen_ellipsis:
- *                 result.extend([slice(None)] * (ndim - len(tup) + 1))
- */
-      goto __pyx_L6;
-    }
-
-    /* "View.MemoryView":688
- *             have_slices = True
- *         else:
- *             if not isinstance(item, slice) and not PyIndex_Check(item):             # <<<<<<<<<<<<<<
- *                 raise TypeError("Cannot index with type '%s'" % type(item))
- * 
- */
-    /*else*/ {
-      __pyx_t_2 = PySlice_Check(__pyx_v_item); 
-      __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
-      if (__pyx_t_10) {
-      } else {
-        __pyx_t_1 = __pyx_t_10;
-        goto __pyx_L9_bool_binop_done;
-      }
-      __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
-      __pyx_t_1 = __pyx_t_10;
-      __pyx_L9_bool_binop_done:;
-      if (unlikely(__pyx_t_1)) {
-
-        /* "View.MemoryView":689
- *         else:
- *             if not isinstance(item, slice) and not PyIndex_Check(item):
- *                 raise TypeError("Cannot index with type '%s'" % type(item))             # <<<<<<<<<<<<<<
- * 
- *             have_slices = have_slices or isinstance(item, slice)
- */
-        __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 689, __pyx_L1_error)
-        __Pyx_GOTREF(__pyx_t_7);
-        __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(2, 689, __pyx_L1_error)
-        __Pyx_GOTREF(__pyx_t_11);
-        __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-        __Pyx_Raise(__pyx_t_11, 0, 0, 0);
-        __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
-        __PYX_ERR(2, 689, __pyx_L1_error)
-
-        /* "View.MemoryView":688
- *             have_slices = True
- *         else:
- *             if not isinstance(item, slice) and not PyIndex_Check(item):             # <<<<<<<<<<<<<<
- *                 raise TypeError("Cannot index with type '%s'" % type(item))
- * 
- */
-      }
-
-      /* "View.MemoryView":691
- *                 raise TypeError("Cannot index with type '%s'" % type(item))
- * 
- *             have_slices = have_slices or isinstance(item, slice)             # <<<<<<<<<<<<<<
- *             result.append(item)
- * 
- */
-      __pyx_t_10 = (__pyx_v_have_slices != 0);
-      if (!__pyx_t_10) {
-      } else {
-        __pyx_t_1 = __pyx_t_10;
-        goto __pyx_L11_bool_binop_done;
-      }
-      __pyx_t_10 = PySlice_Check(__pyx_v_item); 
-      __pyx_t_2 = (__pyx_t_10 != 0);
-      __pyx_t_1 = __pyx_t_2;
-      __pyx_L11_bool_binop_done:;
-      __pyx_v_have_slices = __pyx_t_1;
-
-      /* "View.MemoryView":692
- * 
- *             have_slices = have_slices or isinstance(item, slice)
- *             result.append(item)             # <<<<<<<<<<<<<<
- * 
- *     nslices = ndim - len(result)
- */
-      __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 692, __pyx_L1_error)
-    }
-    __pyx_L6:;
-
-    /* "View.MemoryView":679
- *     have_slices = False
- *     seen_ellipsis = False
- *     for idx, item in enumerate(tup):             # <<<<<<<<<<<<<<
- *         if item is Ellipsis:
- *             if not seen_ellipsis:
- */
-  }
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "View.MemoryView":694
- *             result.append(item)
- * 
- *     nslices = ndim - len(result)             # <<<<<<<<<<<<<<
- *     if nslices:
- *         result.extend([slice(None)] * nslices)
- */
-  __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(2, 694, __pyx_L1_error)
-  __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
-
-  /* "View.MemoryView":695
- * 
- *     nslices = ndim - len(result)
- *     if nslices:             # <<<<<<<<<<<<<<
- *         result.extend([slice(None)] * nslices)
- * 
- */
-  __pyx_t_1 = (__pyx_v_nslices != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":696
- *     nslices = ndim - len(result)
- *     if nslices:
- *         result.extend([slice(None)] * nslices)             # <<<<<<<<<<<<<<
- * 
- *     return have_slices or nslices, tuple(result)
- */
-    __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 696, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    { Py_ssize_t __pyx_temp;
-      for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
-        __Pyx_INCREF(__pyx_slice__3);
-        __Pyx_GIVEREF(__pyx_slice__3);
-        PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__3);
-      }
-    }
-    __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 696, __pyx_L1_error)
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-    /* "View.MemoryView":695
- * 
- *     nslices = ndim - len(result)
- *     if nslices:             # <<<<<<<<<<<<<<
- *         result.extend([slice(None)] * nslices)
- * 
- */
-  }
-
-  /* "View.MemoryView":698
- *         result.extend([slice(None)] * nslices)
- * 
- *     return have_slices or nslices, tuple(result)             # <<<<<<<<<<<<<<
- * 
- * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
- */
-  __Pyx_XDECREF(__pyx_r);
-  if (!__pyx_v_have_slices) {
-  } else {
-    __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __pyx_t_3 = __pyx_t_4;
-    __pyx_t_4 = 0;
-    goto __pyx_L14_bool_binop_done;
-  }
-  __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __pyx_t_3 = __pyx_t_4;
-  __pyx_t_4 = 0;
-  __pyx_L14_bool_binop_done:;
-  __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(2, 698, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_11);
-  __Pyx_GIVEREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
-  __Pyx_GIVEREF(__pyx_t_4);
-  PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4);
-  __pyx_t_3 = 0;
-  __pyx_t_4 = 0;
-  __pyx_r = ((PyObject*)__pyx_t_11);
-  __pyx_t_11 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":666
- *     return isinstance(o, memoryview)
- * 
- * cdef tuple _unellipsify(object index, int ndim):             # <<<<<<<<<<<<<<
- *     """
- *     Replace all ellipses with full slices and fill incomplete indices with
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_7);
-  __Pyx_XDECREF(__pyx_t_11);
-  __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v_tup);
-  __Pyx_XDECREF(__pyx_v_result);
-  __Pyx_XDECREF(__pyx_v_idx);
-  __Pyx_XDECREF(__pyx_v_item);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":700
- *     return have_slices or nslices, tuple(result)
- * 
- * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):             # <<<<<<<<<<<<<<
- *     for suboffset in suboffsets[:ndim]:
- *         if suboffset >= 0:
- */
-
-static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
-  Py_ssize_t __pyx_v_suboffset;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  Py_ssize_t *__pyx_t_1;
-  Py_ssize_t *__pyx_t_2;
-  Py_ssize_t *__pyx_t_3;
-  int __pyx_t_4;
-  PyObject *__pyx_t_5 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
-
-  /* "View.MemoryView":701
- * 
- * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
- *     for suboffset in suboffsets[:ndim]:             # <<<<<<<<<<<<<<
- *         if suboffset >= 0:
- *             raise ValueError("Indirect dimensions not supported")
- */
-  __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
-  for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
-    __pyx_t_1 = __pyx_t_3;
-    __pyx_v_suboffset = (__pyx_t_1[0]);
-
-    /* "View.MemoryView":702
- * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
- *     for suboffset in suboffsets[:ndim]:
- *         if suboffset >= 0:             # <<<<<<<<<<<<<<
- *             raise ValueError("Indirect dimensions not supported")
- * 
- */
-    __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
-    if (unlikely(__pyx_t_4)) {
-
-      /* "View.MemoryView":703
- *     for suboffset in suboffsets[:ndim]:
- *         if suboffset >= 0:
- *             raise ValueError("Indirect dimensions not supported")             # <<<<<<<<<<<<<<
- * 
- * 
- */
-      __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 703, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_5);
-      __Pyx_Raise(__pyx_t_5, 0, 0, 0);
-      __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
-      __PYX_ERR(2, 703, __pyx_L1_error)
-
-      /* "View.MemoryView":702
- * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
- *     for suboffset in suboffsets[:ndim]:
- *         if suboffset >= 0:             # <<<<<<<<<<<<<<
- *             raise ValueError("Indirect dimensions not supported")
- * 
- */
-    }
-  }
-
-  /* "View.MemoryView":700
- *     return have_slices or nslices, tuple(result)
- * 
- * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):             # <<<<<<<<<<<<<<
- *     for suboffset in suboffsets[:ndim]:
- *         if suboffset >= 0:
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":710
- * 
- * @cname('__pyx_memview_slice')
- * cdef memoryview memview_slice(memoryview memview, object indices):             # <<<<<<<<<<<<<<
- *     cdef int new_ndim = 0, suboffset_dim = -1, dim
- *     cdef bint negative_step
- */
-
-static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
-  int __pyx_v_new_ndim;
-  int __pyx_v_suboffset_dim;
-  int __pyx_v_dim;
-  __Pyx_memviewslice __pyx_v_src;
-  __Pyx_memviewslice __pyx_v_dst;
-  __Pyx_memviewslice *__pyx_v_p_src;
-  struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
-  __Pyx_memviewslice *__pyx_v_p_dst;
-  int *__pyx_v_p_suboffset_dim;
-  Py_ssize_t __pyx_v_start;
-  Py_ssize_t __pyx_v_stop;
-  Py_ssize_t __pyx_v_step;
-  int __pyx_v_have_start;
-  int __pyx_v_have_stop;
-  int __pyx_v_have_step;
-  PyObject *__pyx_v_index = NULL;
-  struct __pyx_memoryview_obj *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  int __pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  struct __pyx_memoryview_obj *__pyx_t_4;
-  char *__pyx_t_5;
-  int __pyx_t_6;
-  Py_ssize_t __pyx_t_7;
-  PyObject *(*__pyx_t_8)(PyObject *);
-  PyObject *__pyx_t_9 = NULL;
-  Py_ssize_t __pyx_t_10;
-  int __pyx_t_11;
-  Py_ssize_t __pyx_t_12;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("memview_slice", 0);
-
-  /* "View.MemoryView":711
- * @cname('__pyx_memview_slice')
- * cdef memoryview memview_slice(memoryview memview, object indices):
- *     cdef int new_ndim = 0, suboffset_dim = -1, dim             # <<<<<<<<<<<<<<
- *     cdef bint negative_step
- *     cdef __Pyx_memviewslice src, dst
- */
-  __pyx_v_new_ndim = 0;
-  __pyx_v_suboffset_dim = -1;
-
-  /* "View.MemoryView":718
- * 
- * 
- *     memset(&dst, 0, sizeof(dst))             # <<<<<<<<<<<<<<
- * 
- *     cdef _memoryviewslice memviewsliceobj
- */
-  (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
-
-  /* "View.MemoryView":722
- *     cdef _memoryviewslice memviewsliceobj
- * 
- *     assert memview.view.ndim > 0             # <<<<<<<<<<<<<<
- * 
- *     if isinstance(memview, _memoryviewslice):
- */
-  #ifndef CYTHON_WITHOUT_ASSERTIONS
-  if (unlikely(!Py_OptimizeFlag)) {
-    if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
-      PyErr_SetNone(PyExc_AssertionError);
-      __PYX_ERR(2, 722, __pyx_L1_error)
-    }
-  }
-  #endif
-
-  /* "View.MemoryView":724
- *     assert memview.view.ndim > 0
- * 
- *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
- *         memviewsliceobj = memview
- *         p_src = &memviewsliceobj.from_slice
- */
-  __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); 
-  __pyx_t_2 = (__pyx_t_1 != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":725
- * 
- *     if isinstance(memview, _memoryviewslice):
- *         memviewsliceobj = memview             # <<<<<<<<<<<<<<
- *         p_src = &memviewsliceobj.from_slice
- *     else:
- */
-    if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(2, 725, __pyx_L1_error)
-    __pyx_t_3 = ((PyObject *)__pyx_v_memview);
-    __Pyx_INCREF(__pyx_t_3);
-    __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
-    __pyx_t_3 = 0;
-
-    /* "View.MemoryView":726
- *     if isinstance(memview, _memoryviewslice):
- *         memviewsliceobj = memview
- *         p_src = &memviewsliceobj.from_slice             # <<<<<<<<<<<<<<
- *     else:
- *         slice_copy(memview, &src)
- */
-    __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
-
-    /* "View.MemoryView":724
- *     assert memview.view.ndim > 0
- * 
- *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
- *         memviewsliceobj = memview
- *         p_src = &memviewsliceobj.from_slice
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":728
- *         p_src = &memviewsliceobj.from_slice
- *     else:
- *         slice_copy(memview, &src)             # <<<<<<<<<<<<<<
- *         p_src = &src
- * 
- */
-  /*else*/ {
-    __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
-
-    /* "View.MemoryView":729
- *     else:
- *         slice_copy(memview, &src)
- *         p_src = &src             # <<<<<<<<<<<<<<
- * 
- * 
- */
-    __pyx_v_p_src = (&__pyx_v_src);
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":735
- * 
- * 
- *     dst.memview = p_src.memview             # <<<<<<<<<<<<<<
- *     dst.data = p_src.data
- * 
- */
-  __pyx_t_4 = __pyx_v_p_src->memview;
-  __pyx_v_dst.memview = __pyx_t_4;
-
-  /* "View.MemoryView":736
- * 
- *     dst.memview = p_src.memview
- *     dst.data = p_src.data             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_t_5 = __pyx_v_p_src->data;
-  __pyx_v_dst.data = __pyx_t_5;
-
-  /* "View.MemoryView":741
- * 
- * 
- *     cdef __Pyx_memviewslice *p_dst = &dst             # <<<<<<<<<<<<<<
- *     cdef int *p_suboffset_dim = &suboffset_dim
- *     cdef Py_ssize_t start, stop, step
- */
-  __pyx_v_p_dst = (&__pyx_v_dst);
-
-  /* "View.MemoryView":742
- * 
- *     cdef __Pyx_memviewslice *p_dst = &dst
- *     cdef int *p_suboffset_dim = &suboffset_dim             # <<<<<<<<<<<<<<
- *     cdef Py_ssize_t start, stop, step
- *     cdef bint have_start, have_stop, have_step
- */
-  __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
-
-  /* "View.MemoryView":746
- *     cdef bint have_start, have_stop, have_step
- * 
- *     for dim, index in enumerate(indices):             # <<<<<<<<<<<<<<
- *         if PyIndex_Check(index):
- *             slice_memviewslice(
- */
-  __pyx_t_6 = 0;
-  if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
-    __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
-    __pyx_t_8 = NULL;
-  } else {
-    __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 746, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 746, __pyx_L1_error)
-  }
-  for (;;) {
-    if (likely(!__pyx_t_8)) {
-      if (likely(PyList_CheckExact(__pyx_t_3))) {
-        if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
-        #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-        __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(2, 746, __pyx_L1_error)
-        #else
-        __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 746, __pyx_L1_error)
-        __Pyx_GOTREF(__pyx_t_9);
-        #endif
-      } else {
-        if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
-        #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-        __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(2, 746, __pyx_L1_error)
-        #else
-        __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 746, __pyx_L1_error)
-        __Pyx_GOTREF(__pyx_t_9);
-        #endif
-      }
-    } else {
-      __pyx_t_9 = __pyx_t_8(__pyx_t_3);
-      if (unlikely(!__pyx_t_9)) {
-        PyObject* exc_type = PyErr_Occurred();
-        if (exc_type) {
-          if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
-          else __PYX_ERR(2, 746, __pyx_L1_error)
-        }
-        break;
-      }
-      __Pyx_GOTREF(__pyx_t_9);
-    }
-    __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
-    __pyx_t_9 = 0;
-    __pyx_v_dim = __pyx_t_6;
-    __pyx_t_6 = (__pyx_t_6 + 1);
-
-    /* "View.MemoryView":747
- * 
- *     for dim, index in enumerate(indices):
- *         if PyIndex_Check(index):             # <<<<<<<<<<<<<<
- *             slice_memviewslice(
- *                 p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
- */
-    __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":751
- *                 p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
- *                 dim, new_ndim, p_suboffset_dim,
- *                 index, 0, 0, # start, stop, step             # <<<<<<<<<<<<<<
- *                 0, 0, 0, # have_{start,stop,step}
- *                 False)
- */
-      __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 751, __pyx_L1_error)
-
-      /* "View.MemoryView":748
- *     for dim, index in enumerate(indices):
- *         if PyIndex_Check(index):
- *             slice_memviewslice(             # <<<<<<<<<<<<<<
- *                 p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
- *                 dim, new_ndim, p_suboffset_dim,
- */
-      __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(2, 748, __pyx_L1_error)
-
-      /* "View.MemoryView":747
- * 
- *     for dim, index in enumerate(indices):
- *         if PyIndex_Check(index):             # <<<<<<<<<<<<<<
- *             slice_memviewslice(
- *                 p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
- */
-      goto __pyx_L6;
-    }
-
-    /* "View.MemoryView":754
- *                 0, 0, 0, # have_{start,stop,step}
- *                 False)
- *         elif index is None:             # <<<<<<<<<<<<<<
- *             p_dst.shape[new_ndim] = 1
- *             p_dst.strides[new_ndim] = 0
- */
-    __pyx_t_2 = (__pyx_v_index == Py_None);
-    __pyx_t_1 = (__pyx_t_2 != 0);
-    if (__pyx_t_1) {
-
-      /* "View.MemoryView":755
- *                 False)
- *         elif index is None:
- *             p_dst.shape[new_ndim] = 1             # <<<<<<<<<<<<<<
- *             p_dst.strides[new_ndim] = 0
- *             p_dst.suboffsets[new_ndim] = -1
- */
-      (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
-
-      /* "View.MemoryView":756
- *         elif index is None:
- *             p_dst.shape[new_ndim] = 1
- *             p_dst.strides[new_ndim] = 0             # <<<<<<<<<<<<<<
- *             p_dst.suboffsets[new_ndim] = -1
- *             new_ndim += 1
- */
-      (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
-
-      /* "View.MemoryView":757
- *             p_dst.shape[new_ndim] = 1
- *             p_dst.strides[new_ndim] = 0
- *             p_dst.suboffsets[new_ndim] = -1             # <<<<<<<<<<<<<<
- *             new_ndim += 1
- *         else:
- */
-      (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
-
-      /* "View.MemoryView":758
- *             p_dst.strides[new_ndim] = 0
- *             p_dst.suboffsets[new_ndim] = -1
- *             new_ndim += 1             # <<<<<<<<<<<<<<
- *         else:
- *             start = index.start or 0
- */
-      __pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
-
-      /* "View.MemoryView":754
- *                 0, 0, 0, # have_{start,stop,step}
- *                 False)
- *         elif index is None:             # <<<<<<<<<<<<<<
- *             p_dst.shape[new_ndim] = 1
- *             p_dst.strides[new_ndim] = 0
- */
-      goto __pyx_L6;
-    }
-
-    /* "View.MemoryView":760
- *             new_ndim += 1
- *         else:
- *             start = index.start or 0             # <<<<<<<<<<<<<<
- *             stop = index.stop or 0
- *             step = index.step or 0
- */
-    /*else*/ {
-      __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 760, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_9);
-      __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 760, __pyx_L1_error)
-      if (!__pyx_t_1) {
-        __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-      } else {
-        __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 760, __pyx_L1_error)
-        __pyx_t_10 = __pyx_t_12;
-        __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-        goto __pyx_L7_bool_binop_done;
-      }
-      __pyx_t_10 = 0;
-      __pyx_L7_bool_binop_done:;
-      __pyx_v_start = __pyx_t_10;
-
-      /* "View.MemoryView":761
- *         else:
- *             start = index.start or 0
- *             stop = index.stop or 0             # <<<<<<<<<<<<<<
- *             step = index.step or 0
- * 
- */
-      __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 761, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_9);
-      __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 761, __pyx_L1_error)
-      if (!__pyx_t_1) {
-        __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-      } else {
-        __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 761, __pyx_L1_error)
-        __pyx_t_10 = __pyx_t_12;
-        __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-        goto __pyx_L9_bool_binop_done;
-      }
-      __pyx_t_10 = 0;
-      __pyx_L9_bool_binop_done:;
-      __pyx_v_stop = __pyx_t_10;
-
-      /* "View.MemoryView":762
- *             start = index.start or 0
- *             stop = index.stop or 0
- *             step = index.step or 0             # <<<<<<<<<<<<<<
- * 
- *             have_start = index.start is not None
- */
-      __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 762, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_9);
-      __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 762, __pyx_L1_error)
-      if (!__pyx_t_1) {
-        __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-      } else {
-        __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 762, __pyx_L1_error)
-        __pyx_t_10 = __pyx_t_12;
-        __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-        goto __pyx_L11_bool_binop_done;
-      }
-      __pyx_t_10 = 0;
-      __pyx_L11_bool_binop_done:;
-      __pyx_v_step = __pyx_t_10;
-
-      /* "View.MemoryView":764
- *             step = index.step or 0
- * 
- *             have_start = index.start is not None             # <<<<<<<<<<<<<<
- *             have_stop = index.stop is not None
- *             have_step = index.step is not None
- */
-      __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 764, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_9);
-      __pyx_t_1 = (__pyx_t_9 != Py_None);
-      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-      __pyx_v_have_start = __pyx_t_1;
-
-      /* "View.MemoryView":765
- * 
- *             have_start = index.start is not None
- *             have_stop = index.stop is not None             # <<<<<<<<<<<<<<
- *             have_step = index.step is not None
- * 
- */
-      __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 765, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_9);
-      __pyx_t_1 = (__pyx_t_9 != Py_None);
-      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-      __pyx_v_have_stop = __pyx_t_1;
-
-      /* "View.MemoryView":766
- *             have_start = index.start is not None
- *             have_stop = index.stop is not None
- *             have_step = index.step is not None             # <<<<<<<<<<<<<<
- * 
- *             slice_memviewslice(
- */
-      __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 766, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_9);
-      __pyx_t_1 = (__pyx_t_9 != Py_None);
-      __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
-      __pyx_v_have_step = __pyx_t_1;
-
-      /* "View.MemoryView":768
- *             have_step = index.step is not None
- * 
- *             slice_memviewslice(             # <<<<<<<<<<<<<<
- *                 p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
- *                 dim, new_ndim, p_suboffset_dim,
- */
-      __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(2, 768, __pyx_L1_error)
-
-      /* "View.MemoryView":774
- *                 have_start, have_stop, have_step,
- *                 True)
- *             new_ndim += 1             # <<<<<<<<<<<<<<
- * 
- *     if isinstance(memview, _memoryviewslice):
- */
-      __pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
-    }
-    __pyx_L6:;
-
-    /* "View.MemoryView":746
- *     cdef bint have_start, have_stop, have_step
- * 
- *     for dim, index in enumerate(indices):             # <<<<<<<<<<<<<<
- *         if PyIndex_Check(index):
- *             slice_memviewslice(
- */
-  }
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "View.MemoryView":776
- *             new_ndim += 1
- * 
- *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
- *         return memoryview_fromslice(dst, new_ndim,
- *                                     memviewsliceobj.to_object_func,
- */
-  __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); 
-  __pyx_t_2 = (__pyx_t_1 != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":777
- * 
- *     if isinstance(memview, _memoryviewslice):
- *         return memoryview_fromslice(dst, new_ndim,             # <<<<<<<<<<<<<<
- *                                     memviewsliceobj.to_object_func,
- *                                     memviewsliceobj.to_dtype_func,
- */
-    __Pyx_XDECREF(((PyObject *)__pyx_r));
-
-    /* "View.MemoryView":778
- *     if isinstance(memview, _memoryviewslice):
- *         return memoryview_fromslice(dst, new_ndim,
- *                                     memviewsliceobj.to_object_func,             # <<<<<<<<<<<<<<
- *                                     memviewsliceobj.to_dtype_func,
- *                                     memview.dtype_is_object)
- */
-    if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(2, 778, __pyx_L1_error) }
-
-    /* "View.MemoryView":779
- *         return memoryview_fromslice(dst, new_ndim,
- *                                     memviewsliceobj.to_object_func,
- *                                     memviewsliceobj.to_dtype_func,             # <<<<<<<<<<<<<<
- *                                     memview.dtype_is_object)
- *     else:
- */
-    if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(2, 779, __pyx_L1_error) }
-
-    /* "View.MemoryView":777
- * 
- *     if isinstance(memview, _memoryviewslice):
- *         return memoryview_fromslice(dst, new_ndim,             # <<<<<<<<<<<<<<
- *                                     memviewsliceobj.to_object_func,
- *                                     memviewsliceobj.to_dtype_func,
- */
-    __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 777, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(2, 777, __pyx_L1_error)
-    __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
-    __pyx_t_3 = 0;
-    goto __pyx_L0;
-
-    /* "View.MemoryView":776
- *             new_ndim += 1
- * 
- *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
- *         return memoryview_fromslice(dst, new_ndim,
- *                                     memviewsliceobj.to_object_func,
- */
-  }
-
-  /* "View.MemoryView":782
- *                                     memview.dtype_is_object)
- *     else:
- *         return memoryview_fromslice(dst, new_ndim, NULL, NULL,             # <<<<<<<<<<<<<<
- *                                     memview.dtype_is_object)
- * 
- */
-  /*else*/ {
-    __Pyx_XDECREF(((PyObject *)__pyx_r));
-
-    /* "View.MemoryView":783
- *     else:
- *         return memoryview_fromslice(dst, new_ndim, NULL, NULL,
- *                                     memview.dtype_is_object)             # <<<<<<<<<<<<<<
- * 
- * 
- */
-    __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 782, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-
-    /* "View.MemoryView":782
- *                                     memview.dtype_is_object)
- *     else:
- *         return memoryview_fromslice(dst, new_ndim, NULL, NULL,             # <<<<<<<<<<<<<<
- *                                     memview.dtype_is_object)
- * 
- */
-    if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(2, 782, __pyx_L1_error)
-    __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
-    __pyx_t_3 = 0;
-    goto __pyx_L0;
-  }
-
-  /* "View.MemoryView":710
- * 
- * @cname('__pyx_memview_slice')
- * cdef memoryview memview_slice(memoryview memview, object indices):             # <<<<<<<<<<<<<<
- *     cdef int new_ndim = 0, suboffset_dim = -1, dim
- *     cdef bint negative_step
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_9);
-  __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
-  __Pyx_XDECREF(__pyx_v_index);
-  __Pyx_XGIVEREF((PyObject *)__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":807
- * 
- * @cname('__pyx_memoryview_slice_memviewslice')
- * cdef int slice_memviewslice(             # <<<<<<<<<<<<<<
- *         __Pyx_memviewslice *dst,
- *         Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
- */
-
-static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
-  Py_ssize_t __pyx_v_new_shape;
-  int __pyx_v_negative_step;
-  int __pyx_r;
-  int __pyx_t_1;
-  int __pyx_t_2;
-  int __pyx_t_3;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-
-  /* "View.MemoryView":827
- *     cdef bint negative_step
- * 
- *     if not is_slice:             # <<<<<<<<<<<<<<
- * 
- *         if start < 0:
- */
-  __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":829
- *     if not is_slice:
- * 
- *         if start < 0:             # <<<<<<<<<<<<<<
- *             start += shape
- *         if not 0 <= start < shape:
- */
-    __pyx_t_1 = ((__pyx_v_start < 0) != 0);
-    if (__pyx_t_1) {
-
-      /* "View.MemoryView":830
- * 
- *         if start < 0:
- *             start += shape             # <<<<<<<<<<<<<<
- *         if not 0 <= start < shape:
- *             _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
- */
-      __pyx_v_start = (__pyx_v_start + __pyx_v_shape);
-
-      /* "View.MemoryView":829
- *     if not is_slice:
- * 
- *         if start < 0:             # <<<<<<<<<<<<<<
- *             start += shape
- *         if not 0 <= start < shape:
- */
-    }
-
-    /* "View.MemoryView":831
- *         if start < 0:
- *             start += shape
- *         if not 0 <= start < shape:             # <<<<<<<<<<<<<<
- *             _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
- *     else:
- */
-    __pyx_t_1 = (0 <= __pyx_v_start);
-    if (__pyx_t_1) {
-      __pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
-    }
-    __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":832
- *             start += shape
- *         if not 0 <= start < shape:
- *             _err_dim(IndexError, "Index out of bounds (axis %d)", dim)             # <<<<<<<<<<<<<<
- *     else:
- * 
- */
-      __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 832, __pyx_L1_error)
-
-      /* "View.MemoryView":831
- *         if start < 0:
- *             start += shape
- *         if not 0 <= start < shape:             # <<<<<<<<<<<<<<
- *             _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
- *     else:
- */
-    }
-
-    /* "View.MemoryView":827
- *     cdef bint negative_step
- * 
- *     if not is_slice:             # <<<<<<<<<<<<<<
- * 
- *         if start < 0:
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":835
- *     else:
- * 
- *         negative_step = have_step != 0 and step < 0             # <<<<<<<<<<<<<<
- * 
- *         if have_step and step == 0:
- */
-  /*else*/ {
-    __pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
-    if (__pyx_t_1) {
-    } else {
-      __pyx_t_2 = __pyx_t_1;
-      goto __pyx_L6_bool_binop_done;
-    }
-    __pyx_t_1 = ((__pyx_v_step < 0) != 0);
-    __pyx_t_2 = __pyx_t_1;
-    __pyx_L6_bool_binop_done:;
-    __pyx_v_negative_step = __pyx_t_2;
-
-    /* "View.MemoryView":837
- *         negative_step = have_step != 0 and step < 0
- * 
- *         if have_step and step == 0:             # <<<<<<<<<<<<<<
- *             _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
- * 
- */
-    __pyx_t_1 = (__pyx_v_have_step != 0);
-    if (__pyx_t_1) {
-    } else {
-      __pyx_t_2 = __pyx_t_1;
-      goto __pyx_L9_bool_binop_done;
-    }
-    __pyx_t_1 = ((__pyx_v_step == 0) != 0);
-    __pyx_t_2 = __pyx_t_1;
-    __pyx_L9_bool_binop_done:;
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":838
- * 
- *         if have_step and step == 0:
- *             _err_dim(ValueError, "Step may not be zero (axis %d)", dim)             # <<<<<<<<<<<<<<
- * 
- * 
- */
-      __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 838, __pyx_L1_error)
-
-      /* "View.MemoryView":837
- *         negative_step = have_step != 0 and step < 0
- * 
- *         if have_step and step == 0:             # <<<<<<<<<<<<<<
- *             _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
- * 
- */
-    }
-
-    /* "View.MemoryView":841
- * 
- * 
- *         if have_start:             # <<<<<<<<<<<<<<
- *             if start < 0:
- *                 start += shape
- */
-    __pyx_t_2 = (__pyx_v_have_start != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":842
- * 
- *         if have_start:
- *             if start < 0:             # <<<<<<<<<<<<<<
- *                 start += shape
- *                 if start < 0:
- */
-      __pyx_t_2 = ((__pyx_v_start < 0) != 0);
-      if (__pyx_t_2) {
-
-        /* "View.MemoryView":843
- *         if have_start:
- *             if start < 0:
- *                 start += shape             # <<<<<<<<<<<<<<
- *                 if start < 0:
- *                     start = 0
- */
-        __pyx_v_start = (__pyx_v_start + __pyx_v_shape);
-
-        /* "View.MemoryView":844
- *             if start < 0:
- *                 start += shape
- *                 if start < 0:             # <<<<<<<<<<<<<<
- *                     start = 0
- *             elif start >= shape:
- */
-        __pyx_t_2 = ((__pyx_v_start < 0) != 0);
-        if (__pyx_t_2) {
-
-          /* "View.MemoryView":845
- *                 start += shape
- *                 if start < 0:
- *                     start = 0             # <<<<<<<<<<<<<<
- *             elif start >= shape:
- *                 if negative_step:
- */
-          __pyx_v_start = 0;
-
-          /* "View.MemoryView":844
- *             if start < 0:
- *                 start += shape
- *                 if start < 0:             # <<<<<<<<<<<<<<
- *                     start = 0
- *             elif start >= shape:
- */
-        }
-
-        /* "View.MemoryView":842
- * 
- *         if have_start:
- *             if start < 0:             # <<<<<<<<<<<<<<
- *                 start += shape
- *                 if start < 0:
- */
-        goto __pyx_L12;
-      }
-
-      /* "View.MemoryView":846
- *                 if start < 0:
- *                     start = 0
- *             elif start >= shape:             # <<<<<<<<<<<<<<
- *                 if negative_step:
- *                     start = shape - 1
- */
-      __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
-      if (__pyx_t_2) {
-
-        /* "View.MemoryView":847
- *                     start = 0
- *             elif start >= shape:
- *                 if negative_step:             # <<<<<<<<<<<<<<
- *                     start = shape - 1
- *                 else:
- */
-        __pyx_t_2 = (__pyx_v_negative_step != 0);
-        if (__pyx_t_2) {
-
-          /* "View.MemoryView":848
- *             elif start >= shape:
- *                 if negative_step:
- *                     start = shape - 1             # <<<<<<<<<<<<<<
- *                 else:
- *                     start = shape
- */
-          __pyx_v_start = (__pyx_v_shape - 1);
-
-          /* "View.MemoryView":847
- *                     start = 0
- *             elif start >= shape:
- *                 if negative_step:             # <<<<<<<<<<<<<<
- *                     start = shape - 1
- *                 else:
- */
-          goto __pyx_L14;
-        }
-
-        /* "View.MemoryView":850
- *                     start = shape - 1
- *                 else:
- *                     start = shape             # <<<<<<<<<<<<<<
- *         else:
- *             if negative_step:
- */
-        /*else*/ {
-          __pyx_v_start = __pyx_v_shape;
-        }
-        __pyx_L14:;
-
-        /* "View.MemoryView":846
- *                 if start < 0:
- *                     start = 0
- *             elif start >= shape:             # <<<<<<<<<<<<<<
- *                 if negative_step:
- *                     start = shape - 1
- */
-      }
-      __pyx_L12:;
-
-      /* "View.MemoryView":841
- * 
- * 
- *         if have_start:             # <<<<<<<<<<<<<<
- *             if start < 0:
- *                 start += shape
- */
-      goto __pyx_L11;
-    }
-
-    /* "View.MemoryView":852
- *                     start = shape
- *         else:
- *             if negative_step:             # <<<<<<<<<<<<<<
- *                 start = shape - 1
- *             else:
- */
-    /*else*/ {
-      __pyx_t_2 = (__pyx_v_negative_step != 0);
-      if (__pyx_t_2) {
-
-        /* "View.MemoryView":853
- *         else:
- *             if negative_step:
- *                 start = shape - 1             # <<<<<<<<<<<<<<
- *             else:
- *                 start = 0
- */
-        __pyx_v_start = (__pyx_v_shape - 1);
-
-        /* "View.MemoryView":852
- *                     start = shape
- *         else:
- *             if negative_step:             # <<<<<<<<<<<<<<
- *                 start = shape - 1
- *             else:
- */
-        goto __pyx_L15;
-      }
-
-      /* "View.MemoryView":855
- *                 start = shape - 1
- *             else:
- *                 start = 0             # <<<<<<<<<<<<<<
- * 
- *         if have_stop:
- */
-      /*else*/ {
-        __pyx_v_start = 0;
-      }
-      __pyx_L15:;
-    }
-    __pyx_L11:;
-
-    /* "View.MemoryView":857
- *                 start = 0
- * 
- *         if have_stop:             # <<<<<<<<<<<<<<
- *             if stop < 0:
- *                 stop += shape
- */
-    __pyx_t_2 = (__pyx_v_have_stop != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":858
- * 
- *         if have_stop:
- *             if stop < 0:             # <<<<<<<<<<<<<<
- *                 stop += shape
- *                 if stop < 0:
- */
-      __pyx_t_2 = ((__pyx_v_stop < 0) != 0);
-      if (__pyx_t_2) {
-
-        /* "View.MemoryView":859
- *         if have_stop:
- *             if stop < 0:
- *                 stop += shape             # <<<<<<<<<<<<<<
- *                 if stop < 0:
- *                     stop = 0
- */
-        __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
-
-        /* "View.MemoryView":860
- *             if stop < 0:
- *                 stop += shape
- *                 if stop < 0:             # <<<<<<<<<<<<<<
- *                     stop = 0
- *             elif stop > shape:
- */
-        __pyx_t_2 = ((__pyx_v_stop < 0) != 0);
-        if (__pyx_t_2) {
-
-          /* "View.MemoryView":861
- *                 stop += shape
- *                 if stop < 0:
- *                     stop = 0             # <<<<<<<<<<<<<<
- *             elif stop > shape:
- *                 stop = shape
- */
-          __pyx_v_stop = 0;
-
-          /* "View.MemoryView":860
- *             if stop < 0:
- *                 stop += shape
- *                 if stop < 0:             # <<<<<<<<<<<<<<
- *                     stop = 0
- *             elif stop > shape:
- */
-        }
-
-        /* "View.MemoryView":858
- * 
- *         if have_stop:
- *             if stop < 0:             # <<<<<<<<<<<<<<
- *                 stop += shape
- *                 if stop < 0:
- */
-        goto __pyx_L17;
-      }
-
-      /* "View.MemoryView":862
- *                 if stop < 0:
- *                     stop = 0
- *             elif stop > shape:             # <<<<<<<<<<<<<<
- *                 stop = shape
- *         else:
- */
-      __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
-      if (__pyx_t_2) {
-
-        /* "View.MemoryView":863
- *                     stop = 0
- *             elif stop > shape:
- *                 stop = shape             # <<<<<<<<<<<<<<
- *         else:
- *             if negative_step:
- */
-        __pyx_v_stop = __pyx_v_shape;
-
-        /* "View.MemoryView":862
- *                 if stop < 0:
- *                     stop = 0
- *             elif stop > shape:             # <<<<<<<<<<<<<<
- *                 stop = shape
- *         else:
- */
-      }
-      __pyx_L17:;
-
-      /* "View.MemoryView":857
- *                 start = 0
- * 
- *         if have_stop:             # <<<<<<<<<<<<<<
- *             if stop < 0:
- *                 stop += shape
- */
-      goto __pyx_L16;
-    }
-
-    /* "View.MemoryView":865
- *                 stop = shape
- *         else:
- *             if negative_step:             # <<<<<<<<<<<<<<
- *                 stop = -1
- *             else:
- */
-    /*else*/ {
-      __pyx_t_2 = (__pyx_v_negative_step != 0);
-      if (__pyx_t_2) {
-
-        /* "View.MemoryView":866
- *         else:
- *             if negative_step:
- *                 stop = -1             # <<<<<<<<<<<<<<
- *             else:
- *                 stop = shape
- */
-        __pyx_v_stop = -1L;
-
-        /* "View.MemoryView":865
- *                 stop = shape
- *         else:
- *             if negative_step:             # <<<<<<<<<<<<<<
- *                 stop = -1
- *             else:
- */
-        goto __pyx_L19;
-      }
-
-      /* "View.MemoryView":868
- *                 stop = -1
- *             else:
- *                 stop = shape             # <<<<<<<<<<<<<<
- * 
- *         if not have_step:
- */
-      /*else*/ {
-        __pyx_v_stop = __pyx_v_shape;
-      }
-      __pyx_L19:;
-    }
-    __pyx_L16:;
-
-    /* "View.MemoryView":870
- *                 stop = shape
- * 
- *         if not have_step:             # <<<<<<<<<<<<<<
- *             step = 1
- * 
- */
-    __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":871
- * 
- *         if not have_step:
- *             step = 1             # <<<<<<<<<<<<<<
- * 
- * 
- */
-      __pyx_v_step = 1;
-
-      /* "View.MemoryView":870
- *                 stop = shape
- * 
- *         if not have_step:             # <<<<<<<<<<<<<<
- *             step = 1
- * 
- */
-    }
-
-    /* "View.MemoryView":875
- * 
- *         with cython.cdivision(True):
- *             new_shape = (stop - start) // step             # <<<<<<<<<<<<<<
- * 
- *             if (stop - start) - step * new_shape:
- */
-    __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
-
-    /* "View.MemoryView":877
- *             new_shape = (stop - start) // step
- * 
- *             if (stop - start) - step * new_shape:             # <<<<<<<<<<<<<<
- *                 new_shape += 1
- * 
- */
-    __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":878
- * 
- *             if (stop - start) - step * new_shape:
- *                 new_shape += 1             # <<<<<<<<<<<<<<
- * 
- *         if new_shape < 0:
- */
-      __pyx_v_new_shape = (__pyx_v_new_shape + 1);
-
-      /* "View.MemoryView":877
- *             new_shape = (stop - start) // step
- * 
- *             if (stop - start) - step * new_shape:             # <<<<<<<<<<<<<<
- *                 new_shape += 1
- * 
- */
-    }
-
-    /* "View.MemoryView":880
- *                 new_shape += 1
- * 
- *         if new_shape < 0:             # <<<<<<<<<<<<<<
- *             new_shape = 0
- * 
- */
-    __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":881
- * 
- *         if new_shape < 0:
- *             new_shape = 0             # <<<<<<<<<<<<<<
- * 
- * 
- */
-      __pyx_v_new_shape = 0;
-
-      /* "View.MemoryView":880
- *                 new_shape += 1
- * 
- *         if new_shape < 0:             # <<<<<<<<<<<<<<
- *             new_shape = 0
- * 
- */
-    }
-
-    /* "View.MemoryView":884
- * 
- * 
- *         dst.strides[new_ndim] = stride * step             # <<<<<<<<<<<<<<
- *         dst.shape[new_ndim] = new_shape
- *         dst.suboffsets[new_ndim] = suboffset
- */
-    (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
-
-    /* "View.MemoryView":885
- * 
- *         dst.strides[new_ndim] = stride * step
- *         dst.shape[new_ndim] = new_shape             # <<<<<<<<<<<<<<
- *         dst.suboffsets[new_ndim] = suboffset
- * 
- */
-    (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
-
-    /* "View.MemoryView":886
- *         dst.strides[new_ndim] = stride * step
- *         dst.shape[new_ndim] = new_shape
- *         dst.suboffsets[new_ndim] = suboffset             # <<<<<<<<<<<<<<
- * 
- * 
- */
-    (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":889
- * 
- * 
- *     if suboffset_dim[0] < 0:             # <<<<<<<<<<<<<<
- *         dst.data += start * stride
- *     else:
- */
-  __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":890
- * 
- *     if suboffset_dim[0] < 0:
- *         dst.data += start * stride             # <<<<<<<<<<<<<<
- *     else:
- *         dst.suboffsets[suboffset_dim[0]] += start * stride
- */
-    __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
-
-    /* "View.MemoryView":889
- * 
- * 
- *     if suboffset_dim[0] < 0:             # <<<<<<<<<<<<<<
- *         dst.data += start * stride
- *     else:
- */
-    goto __pyx_L23;
-  }
-
-  /* "View.MemoryView":892
- *         dst.data += start * stride
- *     else:
- *         dst.suboffsets[suboffset_dim[0]] += start * stride             # <<<<<<<<<<<<<<
- * 
- *     if suboffset >= 0:
- */
-  /*else*/ {
-    __pyx_t_3 = (__pyx_v_suboffset_dim[0]);
-    (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
-  }
-  __pyx_L23:;
-
-  /* "View.MemoryView":894
- *         dst.suboffsets[suboffset_dim[0]] += start * stride
- * 
- *     if suboffset >= 0:             # <<<<<<<<<<<<<<
- *         if not is_slice:
- *             if new_ndim == 0:
- */
-  __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":895
- * 
- *     if suboffset >= 0:
- *         if not is_slice:             # <<<<<<<<<<<<<<
- *             if new_ndim == 0:
- *                 dst.data = (<char **> dst.data)[0] + suboffset
- */
-    __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":896
- *     if suboffset >= 0:
- *         if not is_slice:
- *             if new_ndim == 0:             # <<<<<<<<<<<<<<
- *                 dst.data = (<char **> dst.data)[0] + suboffset
- *             else:
- */
-      __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
-      if (__pyx_t_2) {
-
-        /* "View.MemoryView":897
- *         if not is_slice:
- *             if new_ndim == 0:
- *                 dst.data = (<char **> dst.data)[0] + suboffset             # <<<<<<<<<<<<<<
- *             else:
- *                 _err_dim(IndexError, "All dimensions preceding dimension %d "
- */
-        __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
-
-        /* "View.MemoryView":896
- *     if suboffset >= 0:
- *         if not is_slice:
- *             if new_ndim == 0:             # <<<<<<<<<<<<<<
- *                 dst.data = (<char **> dst.data)[0] + suboffset
- *             else:
- */
-        goto __pyx_L26;
-      }
-
-      /* "View.MemoryView":899
- *                 dst.data = (<char **> dst.data)[0] + suboffset
- *             else:
- *                 _err_dim(IndexError, "All dimensions preceding dimension %d "             # <<<<<<<<<<<<<<
- *                                      "must be indexed and not sliced", dim)
- *         else:
- */
-      /*else*/ {
-
-        /* "View.MemoryView":900
- *             else:
- *                 _err_dim(IndexError, "All dimensions preceding dimension %d "
- *                                      "must be indexed and not sliced", dim)             # <<<<<<<<<<<<<<
- *         else:
- *             suboffset_dim[0] = new_ndim
- */
-        __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 899, __pyx_L1_error)
-      }
-      __pyx_L26:;
-
-      /* "View.MemoryView":895
- * 
- *     if suboffset >= 0:
- *         if not is_slice:             # <<<<<<<<<<<<<<
- *             if new_ndim == 0:
- *                 dst.data = (<char **> dst.data)[0] + suboffset
- */
-      goto __pyx_L25;
-    }
-
-    /* "View.MemoryView":902
- *                                      "must be indexed and not sliced", dim)
- *         else:
- *             suboffset_dim[0] = new_ndim             # <<<<<<<<<<<<<<
- * 
- *     return 0
- */
-    /*else*/ {
-      (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
-    }
-    __pyx_L25:;
-
-    /* "View.MemoryView":894
- *         dst.suboffsets[suboffset_dim[0]] += start * stride
- * 
- *     if suboffset >= 0:             # <<<<<<<<<<<<<<
- *         if not is_slice:
- *             if new_ndim == 0:
- */
-  }
-
-  /* "View.MemoryView":904
- *             suboffset_dim[0] = new_ndim
- * 
- *     return 0             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_r = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":807
- * 
- * @cname('__pyx_memoryview_slice_memviewslice')
- * cdef int slice_memviewslice(             # <<<<<<<<<<<<<<
- *         __Pyx_memviewslice *dst,
- *         Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  {
-    #ifdef WITH_THREAD
-    PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
-    #endif
-    __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
-    #ifdef WITH_THREAD
-    __Pyx_PyGILState_Release(__pyx_gilstate_save);
-    #endif
-  }
-  __pyx_r = -1;
-  __pyx_L0:;
-  return __pyx_r;
-}
-
-/* "View.MemoryView":910
- * 
- * @cname('__pyx_pybuffer_index')
- * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,             # <<<<<<<<<<<<<<
- *                           Py_ssize_t dim) except NULL:
- *     cdef Py_ssize_t shape, stride, suboffset = -1
- */
-
-static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
-  Py_ssize_t __pyx_v_shape;
-  Py_ssize_t __pyx_v_stride;
-  Py_ssize_t __pyx_v_suboffset;
-  Py_ssize_t __pyx_v_itemsize;
-  char *__pyx_v_resultp;
-  char *__pyx_r;
-  __Pyx_RefNannyDeclarations
-  Py_ssize_t __pyx_t_1;
-  int __pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("pybuffer_index", 0);
-
-  /* "View.MemoryView":912
- * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
- *                           Py_ssize_t dim) except NULL:
- *     cdef Py_ssize_t shape, stride, suboffset = -1             # <<<<<<<<<<<<<<
- *     cdef Py_ssize_t itemsize = view.itemsize
- *     cdef char *resultp
- */
-  __pyx_v_suboffset = -1L;
-
-  /* "View.MemoryView":913
- *                           Py_ssize_t dim) except NULL:
- *     cdef Py_ssize_t shape, stride, suboffset = -1
- *     cdef Py_ssize_t itemsize = view.itemsize             # <<<<<<<<<<<<<<
- *     cdef char *resultp
- * 
- */
-  __pyx_t_1 = __pyx_v_view->itemsize;
-  __pyx_v_itemsize = __pyx_t_1;
-
-  /* "View.MemoryView":916
- *     cdef char *resultp
- * 
- *     if view.ndim == 0:             # <<<<<<<<<<<<<<
- *         shape = view.len / itemsize
- *         stride = itemsize
- */
-  __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":917
- * 
- *     if view.ndim == 0:
- *         shape = view.len / itemsize             # <<<<<<<<<<<<<<
- *         stride = itemsize
- *     else:
- */
-    if (unlikely(__pyx_v_itemsize == 0)) {
-      PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
-      __PYX_ERR(2, 917, __pyx_L1_error)
-    }
-    else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1)  && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
-      PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
-      __PYX_ERR(2, 917, __pyx_L1_error)
-    }
-    __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
-
-    /* "View.MemoryView":918
- *     if view.ndim == 0:
- *         shape = view.len / itemsize
- *         stride = itemsize             # <<<<<<<<<<<<<<
- *     else:
- *         shape = view.shape[dim]
- */
-    __pyx_v_stride = __pyx_v_itemsize;
-
-    /* "View.MemoryView":916
- *     cdef char *resultp
- * 
- *     if view.ndim == 0:             # <<<<<<<<<<<<<<
- *         shape = view.len / itemsize
- *         stride = itemsize
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":920
- *         stride = itemsize
- *     else:
- *         shape = view.shape[dim]             # <<<<<<<<<<<<<<
- *         stride = view.strides[dim]
- *         if view.suboffsets != NULL:
- */
-  /*else*/ {
-    __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
-
-    /* "View.MemoryView":921
- *     else:
- *         shape = view.shape[dim]
- *         stride = view.strides[dim]             # <<<<<<<<<<<<<<
- *         if view.suboffsets != NULL:
- *             suboffset = view.suboffsets[dim]
- */
-    __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
-
-    /* "View.MemoryView":922
- *         shape = view.shape[dim]
- *         stride = view.strides[dim]
- *         if view.suboffsets != NULL:             # <<<<<<<<<<<<<<
- *             suboffset = view.suboffsets[dim]
- * 
- */
-    __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":923
- *         stride = view.strides[dim]
- *         if view.suboffsets != NULL:
- *             suboffset = view.suboffsets[dim]             # <<<<<<<<<<<<<<
- * 
- *     if index < 0:
- */
-      __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
-
-      /* "View.MemoryView":922
- *         shape = view.shape[dim]
- *         stride = view.strides[dim]
- *         if view.suboffsets != NULL:             # <<<<<<<<<<<<<<
- *             suboffset = view.suboffsets[dim]
- * 
- */
-    }
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":925
- *             suboffset = view.suboffsets[dim]
- * 
- *     if index < 0:             # <<<<<<<<<<<<<<
- *         index += view.shape[dim]
- *         if index < 0:
- */
-  __pyx_t_2 = ((__pyx_v_index < 0) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":926
- * 
- *     if index < 0:
- *         index += view.shape[dim]             # <<<<<<<<<<<<<<
- *         if index < 0:
- *             raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
- */
-    __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
-
-    /* "View.MemoryView":927
- *     if index < 0:
- *         index += view.shape[dim]
- *         if index < 0:             # <<<<<<<<<<<<<<
- *             raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
- * 
- */
-    __pyx_t_2 = ((__pyx_v_index < 0) != 0);
-    if (unlikely(__pyx_t_2)) {
-
-      /* "View.MemoryView":928
- *         index += view.shape[dim]
- *         if index < 0:
- *             raise IndexError("Out of bounds on buffer access (axis %d)" % dim)             # <<<<<<<<<<<<<<
- * 
- *     if index >= shape:
- */
-      __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 928, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_3);
-      __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 928, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_4);
-      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 928, __pyx_L1_error)
-      __Pyx_GOTREF(__pyx_t_3);
-      __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-      __Pyx_Raise(__pyx_t_3, 0, 0, 0);
-      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-      __PYX_ERR(2, 928, __pyx_L1_error)
-
-      /* "View.MemoryView":927
- *     if index < 0:
- *         index += view.shape[dim]
- *         if index < 0:             # <<<<<<<<<<<<<<
- *             raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
- * 
- */
-    }
-
-    /* "View.MemoryView":925
- *             suboffset = view.suboffsets[dim]
- * 
- *     if index < 0:             # <<<<<<<<<<<<<<
- *         index += view.shape[dim]
- *         if index < 0:
- */
-  }
-
-  /* "View.MemoryView":930
- *             raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
- * 
- *     if index >= shape:             # <<<<<<<<<<<<<<
- *         raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
- * 
- */
-  __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
-  if (unlikely(__pyx_t_2)) {
-
-    /* "View.MemoryView":931
- * 
- *     if index >= shape:
- *         raise IndexError("Out of bounds on buffer access (axis %d)" % dim)             # <<<<<<<<<<<<<<
- * 
- *     resultp = bufp + index * stride
- */
-    __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 931, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 931, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 931, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __PYX_ERR(2, 931, __pyx_L1_error)
-
-    /* "View.MemoryView":930
- *             raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
- * 
- *     if index >= shape:             # <<<<<<<<<<<<<<
- *         raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
- * 
- */
-  }
-
-  /* "View.MemoryView":933
- *         raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
- * 
- *     resultp = bufp + index * stride             # <<<<<<<<<<<<<<
- *     if suboffset >= 0:
- *         resultp = (<char **> resultp)[0] + suboffset
- */
-  __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
-
-  /* "View.MemoryView":934
- * 
- *     resultp = bufp + index * stride
- *     if suboffset >= 0:             # <<<<<<<<<<<<<<
- *         resultp = (<char **> resultp)[0] + suboffset
- * 
- */
-  __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":935
- *     resultp = bufp + index * stride
- *     if suboffset >= 0:
- *         resultp = (<char **> resultp)[0] + suboffset             # <<<<<<<<<<<<<<
- * 
- *     return resultp
- */
-    __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
-
-    /* "View.MemoryView":934
- * 
- *     resultp = bufp + index * stride
- *     if suboffset >= 0:             # <<<<<<<<<<<<<<
- *         resultp = (<char **> resultp)[0] + suboffset
- * 
- */
-  }
-
-  /* "View.MemoryView":937
- *         resultp = (<char **> resultp)[0] + suboffset
- * 
- *     return resultp             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_r = __pyx_v_resultp;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":910
- * 
- * @cname('__pyx_pybuffer_index')
- * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,             # <<<<<<<<<<<<<<
- *                           Py_ssize_t dim) except NULL:
- *     cdef Py_ssize_t shape, stride, suboffset = -1
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":943
- * 
- * @cname('__pyx_memslice_transpose')
- * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:             # <<<<<<<<<<<<<<
- *     cdef int ndim = memslice.memview.view.ndim
- * 
- */
-
-static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
-  int __pyx_v_ndim;
-  Py_ssize_t *__pyx_v_shape;
-  Py_ssize_t *__pyx_v_strides;
-  int __pyx_v_i;
-  int __pyx_v_j;
-  int __pyx_r;
-  int __pyx_t_1;
-  Py_ssize_t *__pyx_t_2;
-  long __pyx_t_3;
-  long __pyx_t_4;
-  Py_ssize_t __pyx_t_5;
-  Py_ssize_t __pyx_t_6;
-  int __pyx_t_7;
-  int __pyx_t_8;
-  int __pyx_t_9;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-
-  /* "View.MemoryView":944
- * @cname('__pyx_memslice_transpose')
- * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
- *     cdef int ndim = memslice.memview.view.ndim             # <<<<<<<<<<<<<<
- * 
- *     cdef Py_ssize_t *shape = memslice.shape
- */
-  __pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
-  __pyx_v_ndim = __pyx_t_1;
-
-  /* "View.MemoryView":946
- *     cdef int ndim = memslice.memview.view.ndim
- * 
- *     cdef Py_ssize_t *shape = memslice.shape             # <<<<<<<<<<<<<<
- *     cdef Py_ssize_t *strides = memslice.strides
- * 
- */
-  __pyx_t_2 = __pyx_v_memslice->shape;
-  __pyx_v_shape = __pyx_t_2;
-
-  /* "View.MemoryView":947
- * 
- *     cdef Py_ssize_t *shape = memslice.shape
- *     cdef Py_ssize_t *strides = memslice.strides             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_t_2 = __pyx_v_memslice->strides;
-  __pyx_v_strides = __pyx_t_2;
-
-  /* "View.MemoryView":951
- * 
- *     cdef int i, j
- *     for i in range(ndim / 2):             # <<<<<<<<<<<<<<
- *         j = ndim - 1 - i
- *         strides[i], strides[j] = strides[j], strides[i]
- */
-  __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
-  __pyx_t_4 = __pyx_t_3;
-  for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
-    __pyx_v_i = __pyx_t_1;
-
-    /* "View.MemoryView":952
- *     cdef int i, j
- *     for i in range(ndim / 2):
- *         j = ndim - 1 - i             # <<<<<<<<<<<<<<
- *         strides[i], strides[j] = strides[j], strides[i]
- *         shape[i], shape[j] = shape[j], shape[i]
- */
-    __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
-
-    /* "View.MemoryView":953
- *     for i in range(ndim / 2):
- *         j = ndim - 1 - i
- *         strides[i], strides[j] = strides[j], strides[i]             # <<<<<<<<<<<<<<
- *         shape[i], shape[j] = shape[j], shape[i]
- * 
- */
-    __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
-    __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
-    (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
-    (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
-
-    /* "View.MemoryView":954
- *         j = ndim - 1 - i
- *         strides[i], strides[j] = strides[j], strides[i]
- *         shape[i], shape[j] = shape[j], shape[i]             # <<<<<<<<<<<<<<
- * 
- *         if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
- */
-    __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
-    __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
-    (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
-    (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
-
-    /* "View.MemoryView":956
- *         shape[i], shape[j] = shape[j], shape[i]
- * 
- *         if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:             # <<<<<<<<<<<<<<
- *             _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
- * 
- */
-    __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
-    if (!__pyx_t_8) {
-    } else {
-      __pyx_t_7 = __pyx_t_8;
-      goto __pyx_L6_bool_binop_done;
-    }
-    __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
-    __pyx_t_7 = __pyx_t_8;
-    __pyx_L6_bool_binop_done:;
-    if (__pyx_t_7) {
-
-      /* "View.MemoryView":957
- * 
- *         if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
- *             _err(ValueError, "Cannot transpose memoryview with indirect dimensions")             # <<<<<<<<<<<<<<
- * 
- *     return 1
- */
-      __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 957, __pyx_L1_error)
-
-      /* "View.MemoryView":956
- *         shape[i], shape[j] = shape[j], shape[i]
- * 
- *         if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:             # <<<<<<<<<<<<<<
- *             _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
- * 
- */
-    }
-  }
-
-  /* "View.MemoryView":959
- *             _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
- * 
- *     return 1             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_r = 1;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":943
- * 
- * @cname('__pyx_memslice_transpose')
- * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:             # <<<<<<<<<<<<<<
- *     cdef int ndim = memslice.memview.view.ndim
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  {
-    #ifdef WITH_THREAD
-    PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
-    #endif
-    __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
-    #ifdef WITH_THREAD
-    __Pyx_PyGILState_Release(__pyx_gilstate_save);
-    #endif
-  }
-  __pyx_r = 0;
-  __pyx_L0:;
-  return __pyx_r;
-}
-
-/* "View.MemoryView":976
- *     cdef int (*to_dtype_func)(char *, object) except 0
- * 
- *     def __dealloc__(self):             # <<<<<<<<<<<<<<
- *         __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
- * 
- */
-
-/* Python wrapper */
-static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
-static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
-  __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-}
-
-static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__dealloc__", 0);
-
-  /* "View.MemoryView":977
- * 
- *     def __dealloc__(self):
- *         __PYX_XDEC_MEMVIEW(&self.from_slice, 1)             # <<<<<<<<<<<<<<
- * 
- *     cdef convert_item_to_object(self, char *itemp):
- */
-  __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
-
-  /* "View.MemoryView":976
- *     cdef int (*to_dtype_func)(char *, object) except 0
- * 
- *     def __dealloc__(self):             # <<<<<<<<<<<<<<
- *         __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
- * 
- */
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-}
-
-/* "View.MemoryView":979
- *         __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
- * 
- *     cdef convert_item_to_object(self, char *itemp):             # <<<<<<<<<<<<<<
- *         if self.to_object_func != NULL:
- *             return self.to_object_func(itemp)
- */
-
-static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("convert_item_to_object", 0);
-
-  /* "View.MemoryView":980
- * 
- *     cdef convert_item_to_object(self, char *itemp):
- *         if self.to_object_func != NULL:             # <<<<<<<<<<<<<<
- *             return self.to_object_func(itemp)
- *         else:
- */
-  __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":981
- *     cdef convert_item_to_object(self, char *itemp):
- *         if self.to_object_func != NULL:
- *             return self.to_object_func(itemp)             # <<<<<<<<<<<<<<
- *         else:
- *             return memoryview.convert_item_to_object(self, itemp)
- */
-    __Pyx_XDECREF(__pyx_r);
-    __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 981, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __pyx_r = __pyx_t_2;
-    __pyx_t_2 = 0;
-    goto __pyx_L0;
-
-    /* "View.MemoryView":980
- * 
- *     cdef convert_item_to_object(self, char *itemp):
- *         if self.to_object_func != NULL:             # <<<<<<<<<<<<<<
- *             return self.to_object_func(itemp)
- *         else:
- */
-  }
-
-  /* "View.MemoryView":983
- *             return self.to_object_func(itemp)
- *         else:
- *             return memoryview.convert_item_to_object(self, itemp)             # <<<<<<<<<<<<<<
- * 
- *     cdef assign_item_from_object(self, char *itemp, object value):
- */
-  /*else*/ {
-    __Pyx_XDECREF(__pyx_r);
-    __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 983, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __pyx_r = __pyx_t_2;
-    __pyx_t_2 = 0;
-    goto __pyx_L0;
-  }
-
-  /* "View.MemoryView":979
- *         __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
- * 
- *     cdef convert_item_to_object(self, char *itemp):             # <<<<<<<<<<<<<<
- *         if self.to_object_func != NULL:
- *             return self.to_object_func(itemp)
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":985
- *             return memoryview.convert_item_to_object(self, itemp)
- * 
- *     cdef assign_item_from_object(self, char *itemp, object value):             # <<<<<<<<<<<<<<
- *         if self.to_dtype_func != NULL:
- *             self.to_dtype_func(itemp, value)
- */
-
-static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  int __pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("assign_item_from_object", 0);
-
-  /* "View.MemoryView":986
- * 
- *     cdef assign_item_from_object(self, char *itemp, object value):
- *         if self.to_dtype_func != NULL:             # <<<<<<<<<<<<<<
- *             self.to_dtype_func(itemp, value)
- *         else:
- */
-  __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":987
- *     cdef assign_item_from_object(self, char *itemp, object value):
- *         if self.to_dtype_func != NULL:
- *             self.to_dtype_func(itemp, value)             # <<<<<<<<<<<<<<
- *         else:
- *             memoryview.assign_item_from_object(self, itemp, value)
- */
-    __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(2, 987, __pyx_L1_error)
-
-    /* "View.MemoryView":986
- * 
- *     cdef assign_item_from_object(self, char *itemp, object value):
- *         if self.to_dtype_func != NULL:             # <<<<<<<<<<<<<<
- *             self.to_dtype_func(itemp, value)
- *         else:
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":989
- *             self.to_dtype_func(itemp, value)
- *         else:
- *             memoryview.assign_item_from_object(self, itemp, value)             # <<<<<<<<<<<<<<
- * 
- *     @property
- */
-  /*else*/ {
-    __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 989, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":985
- *             return memoryview.convert_item_to_object(self, itemp)
- * 
- *     cdef assign_item_from_object(self, char *itemp, object value):             # <<<<<<<<<<<<<<
- *         if self.to_dtype_func != NULL:
- *             self.to_dtype_func(itemp, value)
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":992
- * 
- *     @property
- *     def base(self):             # <<<<<<<<<<<<<<
- *         return self.from_object
- * 
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
-  __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__get__", 0);
-
-  /* "View.MemoryView":993
- *     @property
- *     def base(self):
- *         return self.from_object             # <<<<<<<<<<<<<<
- * 
- *     __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_INCREF(__pyx_v_self->from_object);
-  __pyx_r = __pyx_v_self->from_object;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":992
- * 
- *     @property
- *     def base(self):             # <<<<<<<<<<<<<<
- *         return self.from_object
- * 
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "(tree fragment)":1
- * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
-static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
-  __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__reduce_cython__", 0);
-
-  /* "(tree fragment)":2
- * def __reduce_cython__(self):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")             # <<<<<<<<<<<<<<
- * def __setstate_cython__(self, __pyx_state):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- */
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_Raise(__pyx_t_1, 0, 0, 0);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __PYX_ERR(2, 2, __pyx_L1_error)
-
-  /* "(tree fragment)":1
- * def __reduce_cython__(self):             # <<<<<<<<<<<<<<
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "(tree fragment)":3
- * def __reduce_cython__(self):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
-static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
-  __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__setstate_cython__", 0);
-
-  /* "(tree fragment)":4
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")             # <<<<<<<<<<<<<<
- */
-  __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_Raise(__pyx_t_1, 0, 0, 0);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __PYX_ERR(2, 4, __pyx_L1_error)
-
-  /* "(tree fragment)":3
- * def __reduce_cython__(self):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):             # <<<<<<<<<<<<<<
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":999
- * 
- * @cname('__pyx_memoryview_fromslice')
- * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice,             # <<<<<<<<<<<<<<
- *                           int ndim,
- *                           object (*to_object_func)(char *),
- */
-
-static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
-  struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
-  Py_ssize_t __pyx_v_suboffset;
-  PyObject *__pyx_v_length = NULL;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  __Pyx_TypeInfo *__pyx_t_4;
-  Py_buffer __pyx_t_5;
-  Py_ssize_t *__pyx_t_6;
-  Py_ssize_t *__pyx_t_7;
-  Py_ssize_t *__pyx_t_8;
-  Py_ssize_t __pyx_t_9;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("memoryview_fromslice", 0);
-
-  /* "View.MemoryView":1007
- *     cdef _memoryviewslice result
- * 
- *     if <PyObject *> memviewslice.memview == Py_None:             # <<<<<<<<<<<<<<
- *         return None
- * 
- */
-  __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":1008
- * 
- *     if <PyObject *> memviewslice.memview == Py_None:
- *         return None             # <<<<<<<<<<<<<<
- * 
- * 
- */
-    __Pyx_XDECREF(__pyx_r);
-    __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-    goto __pyx_L0;
-
-    /* "View.MemoryView":1007
- *     cdef _memoryviewslice result
- * 
- *     if <PyObject *> memviewslice.memview == Py_None:             # <<<<<<<<<<<<<<
- *         return None
- * 
- */
-  }
-
-  /* "View.MemoryView":1013
- * 
- * 
- *     result = _memoryviewslice(None, 0, dtype_is_object)             # <<<<<<<<<<<<<<
- * 
- *     result.from_slice = memviewslice
- */
-  __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1013, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1013, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_INCREF(Py_None);
-  __Pyx_GIVEREF(Py_None);
-  PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
-  __Pyx_INCREF(__pyx_int_0);
-  __Pyx_GIVEREF(__pyx_int_0);
-  PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
-  __Pyx_GIVEREF(__pyx_t_2);
-  PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
-  __pyx_t_2 = 0;
-  __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1013, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
-  __pyx_t_2 = 0;
-
-  /* "View.MemoryView":1015
- *     result = _memoryviewslice(None, 0, dtype_is_object)
- * 
- *     result.from_slice = memviewslice             # <<<<<<<<<<<<<<
- *     __PYX_INC_MEMVIEW(&memviewslice, 1)
- * 
- */
-  __pyx_v_result->from_slice = __pyx_v_memviewslice;
-
-  /* "View.MemoryView":1016
- * 
- *     result.from_slice = memviewslice
- *     __PYX_INC_MEMVIEW(&memviewslice, 1)             # <<<<<<<<<<<<<<
- * 
- *     result.from_object = (<memoryview> memviewslice.memview).base
- */
-  __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
-
-  /* "View.MemoryView":1018
- *     __PYX_INC_MEMVIEW(&memviewslice, 1)
- * 
- *     result.from_object = (<memoryview> memviewslice.memview).base             # <<<<<<<<<<<<<<
- *     result.typeinfo = memviewslice.memview.typeinfo
- * 
- */
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1018, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_GIVEREF(__pyx_t_2);
-  __Pyx_GOTREF(__pyx_v_result->from_object);
-  __Pyx_DECREF(__pyx_v_result->from_object);
-  __pyx_v_result->from_object = __pyx_t_2;
-  __pyx_t_2 = 0;
-
-  /* "View.MemoryView":1019
- * 
- *     result.from_object = (<memoryview> memviewslice.memview).base
- *     result.typeinfo = memviewslice.memview.typeinfo             # <<<<<<<<<<<<<<
- * 
- *     result.view = memviewslice.memview.view
- */
-  __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
-  __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
-
-  /* "View.MemoryView":1021
- *     result.typeinfo = memviewslice.memview.typeinfo
- * 
- *     result.view = memviewslice.memview.view             # <<<<<<<<<<<<<<
- *     result.view.buf = <void *> memviewslice.data
- *     result.view.ndim = ndim
- */
-  __pyx_t_5 = __pyx_v_memviewslice.memview->view;
-  __pyx_v_result->__pyx_base.view = __pyx_t_5;
-
-  /* "View.MemoryView":1022
- * 
- *     result.view = memviewslice.memview.view
- *     result.view.buf = <void *> memviewslice.data             # <<<<<<<<<<<<<<
- *     result.view.ndim = ndim
- *     (<__pyx_buffer *> &result.view).obj = Py_None
- */
-  __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
-
-  /* "View.MemoryView":1023
- *     result.view = memviewslice.memview.view
- *     result.view.buf = <void *> memviewslice.data
- *     result.view.ndim = ndim             # <<<<<<<<<<<<<<
- *     (<__pyx_buffer *> &result.view).obj = Py_None
- *     Py_INCREF(Py_None)
- */
-  __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
-
-  /* "View.MemoryView":1024
- *     result.view.buf = <void *> memviewslice.data
- *     result.view.ndim = ndim
- *     (<__pyx_buffer *> &result.view).obj = Py_None             # <<<<<<<<<<<<<<
- *     Py_INCREF(Py_None)
- * 
- */
-  ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
-
-  /* "View.MemoryView":1025
- *     result.view.ndim = ndim
- *     (<__pyx_buffer *> &result.view).obj = Py_None
- *     Py_INCREF(Py_None)             # <<<<<<<<<<<<<<
- * 
- *     if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
- */
-  Py_INCREF(Py_None);
-
-  /* "View.MemoryView":1027
- *     Py_INCREF(Py_None)
- * 
- *     if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:             # <<<<<<<<<<<<<<
- *         result.flags = PyBUF_RECORDS
- *     else:
- */
-  __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":1028
- * 
- *     if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
- *         result.flags = PyBUF_RECORDS             # <<<<<<<<<<<<<<
- *     else:
- *         result.flags = PyBUF_RECORDS_RO
- */
-    __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
-
-    /* "View.MemoryView":1027
- *     Py_INCREF(Py_None)
- * 
- *     if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:             # <<<<<<<<<<<<<<
- *         result.flags = PyBUF_RECORDS
- *     else:
- */
-    goto __pyx_L4;
-  }
-
-  /* "View.MemoryView":1030
- *         result.flags = PyBUF_RECORDS
- *     else:
- *         result.flags = PyBUF_RECORDS_RO             # <<<<<<<<<<<<<<
- * 
- *     result.view.shape = <Py_ssize_t *> result.from_slice.shape
- */
-  /*else*/ {
-    __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
-  }
-  __pyx_L4:;
-
-  /* "View.MemoryView":1032
- *         result.flags = PyBUF_RECORDS_RO
- * 
- *     result.view.shape = <Py_ssize_t *> result.from_slice.shape             # <<<<<<<<<<<<<<
- *     result.view.strides = <Py_ssize_t *> result.from_slice.strides
- * 
- */
-  __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
-
-  /* "View.MemoryView":1033
- * 
- *     result.view.shape = <Py_ssize_t *> result.from_slice.shape
- *     result.view.strides = <Py_ssize_t *> result.from_slice.strides             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
-
-  /* "View.MemoryView":1036
- * 
- * 
- *     result.view.suboffsets = NULL             # <<<<<<<<<<<<<<
- *     for suboffset in result.from_slice.suboffsets[:ndim]:
- *         if suboffset >= 0:
- */
-  __pyx_v_result->__pyx_base.view.suboffsets = NULL;
-
-  /* "View.MemoryView":1037
- * 
- *     result.view.suboffsets = NULL
- *     for suboffset in result.from_slice.suboffsets[:ndim]:             # <<<<<<<<<<<<<<
- *         if suboffset >= 0:
- *             result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
- */
-  __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
-  for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
-    __pyx_t_6 = __pyx_t_8;
-    __pyx_v_suboffset = (__pyx_t_6[0]);
-
-    /* "View.MemoryView":1038
- *     result.view.suboffsets = NULL
- *     for suboffset in result.from_slice.suboffsets[:ndim]:
- *         if suboffset >= 0:             # <<<<<<<<<<<<<<
- *             result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
- *             break
- */
-    __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
-    if (__pyx_t_1) {
-
-      /* "View.MemoryView":1039
- *     for suboffset in result.from_slice.suboffsets[:ndim]:
- *         if suboffset >= 0:
- *             result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets             # <<<<<<<<<<<<<<
- *             break
- * 
- */
-      __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
-
-      /* "View.MemoryView":1040
- *         if suboffset >= 0:
- *             result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
- *             break             # <<<<<<<<<<<<<<
- * 
- *     result.view.len = result.view.itemsize
- */
-      goto __pyx_L6_break;
-
-      /* "View.MemoryView":1038
- *     result.view.suboffsets = NULL
- *     for suboffset in result.from_slice.suboffsets[:ndim]:
- *         if suboffset >= 0:             # <<<<<<<<<<<<<<
- *             result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
- *             break
- */
-    }
-  }
-  __pyx_L6_break:;
-
-  /* "View.MemoryView":1042
- *             break
- * 
- *     result.view.len = result.view.itemsize             # <<<<<<<<<<<<<<
- *     for length in result.view.shape[:ndim]:
- *         result.view.len *= length
- */
-  __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
-  __pyx_v_result->__pyx_base.view.len = __pyx_t_9;
-
-  /* "View.MemoryView":1043
- * 
- *     result.view.len = result.view.itemsize
- *     for length in result.view.shape[:ndim]:             # <<<<<<<<<<<<<<
- *         result.view.len *= length
- * 
- */
-  __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
-  for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
-    __pyx_t_6 = __pyx_t_8;
-    __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1043, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
-    __pyx_t_2 = 0;
-
-    /* "View.MemoryView":1044
- *     result.view.len = result.view.itemsize
- *     for length in result.view.shape[:ndim]:
- *         result.view.len *= length             # <<<<<<<<<<<<<<
- * 
- *     result.to_object_func = to_object_func
- */
-    __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1044, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1044, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 1044, __pyx_L1_error)
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __pyx_v_result->__pyx_base.view.len = __pyx_t_9;
-  }
-
-  /* "View.MemoryView":1046
- *         result.view.len *= length
- * 
- *     result.to_object_func = to_object_func             # <<<<<<<<<<<<<<
- *     result.to_dtype_func = to_dtype_func
- * 
- */
-  __pyx_v_result->to_object_func = __pyx_v_to_object_func;
-
-  /* "View.MemoryView":1047
- * 
- *     result.to_object_func = to_object_func
- *     result.to_dtype_func = to_dtype_func             # <<<<<<<<<<<<<<
- * 
- *     return result
- */
-  __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
-
-  /* "View.MemoryView":1049
- *     result.to_dtype_func = to_dtype_func
- * 
- *     return result             # <<<<<<<<<<<<<<
- * 
- * @cname('__pyx_memoryview_get_slice_from_memoryview')
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_INCREF(((PyObject *)__pyx_v_result));
-  __pyx_r = ((PyObject *)__pyx_v_result);
-  goto __pyx_L0;
-
-  /* "View.MemoryView":999
- * 
- * @cname('__pyx_memoryview_fromslice')
- * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice,             # <<<<<<<<<<<<<<
- *                           int ndim,
- *                           object (*to_object_func)(char *),
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XDECREF((PyObject *)__pyx_v_result);
-  __Pyx_XDECREF(__pyx_v_length);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":1052
- * 
- * @cname('__pyx_memoryview_get_slice_from_memoryview')
- * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview,             # <<<<<<<<<<<<<<
- *                                                    __Pyx_memviewslice *mslice) except NULL:
- *     cdef _memoryviewslice obj
- */
-
-static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
-  struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
-  __Pyx_memviewslice *__pyx_r;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  int __pyx_t_2;
-  PyObject *__pyx_t_3 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("get_slice_from_memview", 0);
-
-  /* "View.MemoryView":1055
- *                                                    __Pyx_memviewslice *mslice) except NULL:
- *     cdef _memoryviewslice obj
- *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
- *         obj = memview
- *         return &obj.from_slice
- */
-  __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); 
-  __pyx_t_2 = (__pyx_t_1 != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":1056
- *     cdef _memoryviewslice obj
- *     if isinstance(memview, _memoryviewslice):
- *         obj = memview             # <<<<<<<<<<<<<<
- *         return &obj.from_slice
- *     else:
- */
-    if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(2, 1056, __pyx_L1_error)
-    __pyx_t_3 = ((PyObject *)__pyx_v_memview);
-    __Pyx_INCREF(__pyx_t_3);
-    __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
-    __pyx_t_3 = 0;
-
-    /* "View.MemoryView":1057
- *     if isinstance(memview, _memoryviewslice):
- *         obj = memview
- *         return &obj.from_slice             # <<<<<<<<<<<<<<
- *     else:
- *         slice_copy(memview, mslice)
- */
-    __pyx_r = (&__pyx_v_obj->from_slice);
-    goto __pyx_L0;
-
-    /* "View.MemoryView":1055
- *                                                    __Pyx_memviewslice *mslice) except NULL:
- *     cdef _memoryviewslice obj
- *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
- *         obj = memview
- *         return &obj.from_slice
- */
-  }
-
-  /* "View.MemoryView":1059
- *         return &obj.from_slice
- *     else:
- *         slice_copy(memview, mslice)             # <<<<<<<<<<<<<<
- *         return mslice
- * 
- */
-  /*else*/ {
-    __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
-
-    /* "View.MemoryView":1060
- *     else:
- *         slice_copy(memview, mslice)
- *         return mslice             # <<<<<<<<<<<<<<
- * 
- * @cname('__pyx_memoryview_slice_copy')
- */
-    __pyx_r = __pyx_v_mslice;
-    goto __pyx_L0;
-  }
-
-  /* "View.MemoryView":1052
- * 
- * @cname('__pyx_memoryview_get_slice_from_memoryview')
- * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview,             # <<<<<<<<<<<<<<
- *                                                    __Pyx_memviewslice *mslice) except NULL:
- *     cdef _memoryviewslice obj
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF((PyObject *)__pyx_v_obj);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":1063
- * 
- * @cname('__pyx_memoryview_slice_copy')
- * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst):             # <<<<<<<<<<<<<<
- *     cdef int dim
- *     cdef (Py_ssize_t*) shape, strides, suboffsets
- */
-
-static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
-  int __pyx_v_dim;
-  Py_ssize_t *__pyx_v_shape;
-  Py_ssize_t *__pyx_v_strides;
-  Py_ssize_t *__pyx_v_suboffsets;
-  __Pyx_RefNannyDeclarations
-  Py_ssize_t *__pyx_t_1;
-  int __pyx_t_2;
-  int __pyx_t_3;
-  int __pyx_t_4;
-  Py_ssize_t __pyx_t_5;
-  __Pyx_RefNannySetupContext("slice_copy", 0);
-
-  /* "View.MemoryView":1067
- *     cdef (Py_ssize_t*) shape, strides, suboffsets
- * 
- *     shape = memview.view.shape             # <<<<<<<<<<<<<<
- *     strides = memview.view.strides
- *     suboffsets = memview.view.suboffsets
- */
-  __pyx_t_1 = __pyx_v_memview->view.shape;
-  __pyx_v_shape = __pyx_t_1;
-
-  /* "View.MemoryView":1068
- * 
- *     shape = memview.view.shape
- *     strides = memview.view.strides             # <<<<<<<<<<<<<<
- *     suboffsets = memview.view.suboffsets
- * 
- */
-  __pyx_t_1 = __pyx_v_memview->view.strides;
-  __pyx_v_strides = __pyx_t_1;
-
-  /* "View.MemoryView":1069
- *     shape = memview.view.shape
- *     strides = memview.view.strides
- *     suboffsets = memview.view.suboffsets             # <<<<<<<<<<<<<<
- * 
- *     dst.memview = <__pyx_memoryview *> memview
- */
-  __pyx_t_1 = __pyx_v_memview->view.suboffsets;
-  __pyx_v_suboffsets = __pyx_t_1;
-
-  /* "View.MemoryView":1071
- *     suboffsets = memview.view.suboffsets
- * 
- *     dst.memview = <__pyx_memoryview *> memview             # <<<<<<<<<<<<<<
- *     dst.data = <char *> memview.view.buf
- * 
- */
-  __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
-
-  /* "View.MemoryView":1072
- * 
- *     dst.memview = <__pyx_memoryview *> memview
- *     dst.data = <char *> memview.view.buf             # <<<<<<<<<<<<<<
- * 
- *     for dim in range(memview.view.ndim):
- */
-  __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
-
-  /* "View.MemoryView":1074
- *     dst.data = <char *> memview.view.buf
- * 
- *     for dim in range(memview.view.ndim):             # <<<<<<<<<<<<<<
- *         dst.shape[dim] = shape[dim]
- *         dst.strides[dim] = strides[dim]
- */
-  __pyx_t_2 = __pyx_v_memview->view.ndim;
-  __pyx_t_3 = __pyx_t_2;
-  for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
-    __pyx_v_dim = __pyx_t_4;
-
-    /* "View.MemoryView":1075
- * 
- *     for dim in range(memview.view.ndim):
- *         dst.shape[dim] = shape[dim]             # <<<<<<<<<<<<<<
- *         dst.strides[dim] = strides[dim]
- *         dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
- */
-    (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
-
-    /* "View.MemoryView":1076
- *     for dim in range(memview.view.ndim):
- *         dst.shape[dim] = shape[dim]
- *         dst.strides[dim] = strides[dim]             # <<<<<<<<<<<<<<
- *         dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
- * 
- */
-    (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
-
-    /* "View.MemoryView":1077
- *         dst.shape[dim] = shape[dim]
- *         dst.strides[dim] = strides[dim]
- *         dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1             # <<<<<<<<<<<<<<
- * 
- * @cname('__pyx_memoryview_copy_object')
- */
-    if ((__pyx_v_suboffsets != 0)) {
-      __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
-    } else {
-      __pyx_t_5 = -1L;
-    }
-    (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
-  }
-
-  /* "View.MemoryView":1063
- * 
- * @cname('__pyx_memoryview_slice_copy')
- * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst):             # <<<<<<<<<<<<<<
- *     cdef int dim
- *     cdef (Py_ssize_t*) shape, strides, suboffsets
- */
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-}
-
-/* "View.MemoryView":1080
- * 
- * @cname('__pyx_memoryview_copy_object')
- * cdef memoryview_copy(memoryview memview):             # <<<<<<<<<<<<<<
- *     "Create a new memoryview object"
- *     cdef __Pyx_memviewslice memviewslice
- */
-
-static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
-  __Pyx_memviewslice __pyx_v_memviewslice;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("memoryview_copy", 0);
-
-  /* "View.MemoryView":1083
- *     "Create a new memoryview object"
- *     cdef __Pyx_memviewslice memviewslice
- *     slice_copy(memview, &memviewslice)             # <<<<<<<<<<<<<<
- *     return memoryview_copy_from_slice(memview, &memviewslice)
- * 
- */
-  __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
-
-  /* "View.MemoryView":1084
- *     cdef __Pyx_memviewslice memviewslice
- *     slice_copy(memview, &memviewslice)
- *     return memoryview_copy_from_slice(memview, &memviewslice)             # <<<<<<<<<<<<<<
- * 
- * @cname('__pyx_memoryview_copy_object_from_slice')
- */
-  __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1084, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_r = __pyx_t_1;
-  __pyx_t_1 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":1080
- * 
- * @cname('__pyx_memoryview_copy_object')
- * cdef memoryview_copy(memoryview memview):             # <<<<<<<<<<<<<<
- *     "Create a new memoryview object"
- *     cdef __Pyx_memviewslice memviewslice
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":1087
- * 
- * @cname('__pyx_memoryview_copy_object_from_slice')
- * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice):             # <<<<<<<<<<<<<<
- *     """
- *     Create a new memoryview object from a given memoryview object and slice.
- */
-
-static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
-  PyObject *(*__pyx_v_to_object_func)(char *);
-  int (*__pyx_v_to_dtype_func)(char *, PyObject *);
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  int __pyx_t_2;
-  PyObject *(*__pyx_t_3)(char *);
-  int (*__pyx_t_4)(char *, PyObject *);
-  PyObject *__pyx_t_5 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
-
-  /* "View.MemoryView":1094
- *     cdef int (*to_dtype_func)(char *, object) except 0
- * 
- *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
- *         to_object_func = (<_memoryviewslice> memview).to_object_func
- *         to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
- */
-  __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); 
-  __pyx_t_2 = (__pyx_t_1 != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":1095
- * 
- *     if isinstance(memview, _memoryviewslice):
- *         to_object_func = (<_memoryviewslice> memview).to_object_func             # <<<<<<<<<<<<<<
- *         to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
- *     else:
- */
-    __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
-    __pyx_v_to_object_func = __pyx_t_3;
-
-    /* "View.MemoryView":1096
- *     if isinstance(memview, _memoryviewslice):
- *         to_object_func = (<_memoryviewslice> memview).to_object_func
- *         to_dtype_func = (<_memoryviewslice> memview).to_dtype_func             # <<<<<<<<<<<<<<
- *     else:
- *         to_object_func = NULL
- */
-    __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
-    __pyx_v_to_dtype_func = __pyx_t_4;
-
-    /* "View.MemoryView":1094
- *     cdef int (*to_dtype_func)(char *, object) except 0
- * 
- *     if isinstance(memview, _memoryviewslice):             # <<<<<<<<<<<<<<
- *         to_object_func = (<_memoryviewslice> memview).to_object_func
- *         to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":1098
- *         to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
- *     else:
- *         to_object_func = NULL             # <<<<<<<<<<<<<<
- *         to_dtype_func = NULL
- * 
- */
-  /*else*/ {
-    __pyx_v_to_object_func = NULL;
-
-    /* "View.MemoryView":1099
- *     else:
- *         to_object_func = NULL
- *         to_dtype_func = NULL             # <<<<<<<<<<<<<<
- * 
- *     return memoryview_fromslice(memviewslice[0], memview.view.ndim,
- */
-    __pyx_v_to_dtype_func = NULL;
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":1101
- *         to_dtype_func = NULL
- * 
- *     return memoryview_fromslice(memviewslice[0], memview.view.ndim,             # <<<<<<<<<<<<<<
- *                                 to_object_func, to_dtype_func,
- *                                 memview.dtype_is_object)
- */
-  __Pyx_XDECREF(__pyx_r);
-
-  /* "View.MemoryView":1103
- *     return memoryview_fromslice(memviewslice[0], memview.view.ndim,
- *                                 to_object_func, to_dtype_func,
- *                                 memview.dtype_is_object)             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 1101, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_5);
-  __pyx_r = __pyx_t_5;
-  __pyx_t_5 = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":1087
- * 
- * @cname('__pyx_memoryview_copy_object_from_slice')
- * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice):             # <<<<<<<<<<<<<<
- *     """
- *     Create a new memoryview object from a given memoryview object and slice.
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "View.MemoryView":1109
- * 
- * 
- * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:             # <<<<<<<<<<<<<<
- *     if arg < 0:
- *         return -arg
- */
-
-static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
-  Py_ssize_t __pyx_r;
-  int __pyx_t_1;
-
-  /* "View.MemoryView":1110
- * 
- * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
- *     if arg < 0:             # <<<<<<<<<<<<<<
- *         return -arg
- *     else:
- */
-  __pyx_t_1 = ((__pyx_v_arg < 0) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":1111
- * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
- *     if arg < 0:
- *         return -arg             # <<<<<<<<<<<<<<
- *     else:
- *         return arg
- */
-    __pyx_r = (-__pyx_v_arg);
-    goto __pyx_L0;
-
-    /* "View.MemoryView":1110
- * 
- * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
- *     if arg < 0:             # <<<<<<<<<<<<<<
- *         return -arg
- *     else:
- */
-  }
-
-  /* "View.MemoryView":1113
- *         return -arg
- *     else:
- *         return arg             # <<<<<<<<<<<<<<
- * 
- * @cname('__pyx_get_best_slice_order')
- */
-  /*else*/ {
-    __pyx_r = __pyx_v_arg;
-    goto __pyx_L0;
-  }
-
-  /* "View.MemoryView":1109
- * 
- * 
- * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:             # <<<<<<<<<<<<<<
- *     if arg < 0:
- *         return -arg
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  return __pyx_r;
-}
-
-/* "View.MemoryView":1116
- * 
- * @cname('__pyx_get_best_slice_order')
- * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil:             # <<<<<<<<<<<<<<
- *     """
- *     Figure out the best memory access order for a given slice.
- */
-
-static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
-  int __pyx_v_i;
-  Py_ssize_t __pyx_v_c_stride;
-  Py_ssize_t __pyx_v_f_stride;
-  char __pyx_r;
-  int __pyx_t_1;
-  int __pyx_t_2;
-  int __pyx_t_3;
-  int __pyx_t_4;
-
-  /* "View.MemoryView":1121
- *     """
- *     cdef int i
- *     cdef Py_ssize_t c_stride = 0             # <<<<<<<<<<<<<<
- *     cdef Py_ssize_t f_stride = 0
- * 
- */
-  __pyx_v_c_stride = 0;
-
-  /* "View.MemoryView":1122
- *     cdef int i
- *     cdef Py_ssize_t c_stride = 0
- *     cdef Py_ssize_t f_stride = 0             # <<<<<<<<<<<<<<
- * 
- *     for i in range(ndim - 1, -1, -1):
- */
-  __pyx_v_f_stride = 0;
-
-  /* "View.MemoryView":1124
- *     cdef Py_ssize_t f_stride = 0
- * 
- *     for i in range(ndim - 1, -1, -1):             # <<<<<<<<<<<<<<
- *         if mslice.shape[i] > 1:
- *             c_stride = mslice.strides[i]
- */
-  for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
-    __pyx_v_i = __pyx_t_1;
-
-    /* "View.MemoryView":1125
- * 
- *     for i in range(ndim - 1, -1, -1):
- *         if mslice.shape[i] > 1:             # <<<<<<<<<<<<<<
- *             c_stride = mslice.strides[i]
- *             break
- */
-    __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":1126
- *     for i in range(ndim - 1, -1, -1):
- *         if mslice.shape[i] > 1:
- *             c_stride = mslice.strides[i]             # <<<<<<<<<<<<<<
- *             break
- * 
- */
-      __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
-
-      /* "View.MemoryView":1127
- *         if mslice.shape[i] > 1:
- *             c_stride = mslice.strides[i]
- *             break             # <<<<<<<<<<<<<<
- * 
- *     for i in range(ndim):
- */
-      goto __pyx_L4_break;
-
-      /* "View.MemoryView":1125
- * 
- *     for i in range(ndim - 1, -1, -1):
- *         if mslice.shape[i] > 1:             # <<<<<<<<<<<<<<
- *             c_stride = mslice.strides[i]
- *             break
- */
-    }
-  }
-  __pyx_L4_break:;
-
-  /* "View.MemoryView":1129
- *             break
- * 
- *     for i in range(ndim):             # <<<<<<<<<<<<<<
- *         if mslice.shape[i] > 1:
- *             f_stride = mslice.strides[i]
- */
-  __pyx_t_1 = __pyx_v_ndim;
-  __pyx_t_3 = __pyx_t_1;
-  for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
-    __pyx_v_i = __pyx_t_4;
-
-    /* "View.MemoryView":1130
- * 
- *     for i in range(ndim):
- *         if mslice.shape[i] > 1:             # <<<<<<<<<<<<<<
- *             f_stride = mslice.strides[i]
- *             break
- */
-    __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":1131
- *     for i in range(ndim):
- *         if mslice.shape[i] > 1:
- *             f_stride = mslice.strides[i]             # <<<<<<<<<<<<<<
- *             break
- * 
- */
-      __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
-
-      /* "View.MemoryView":1132
- *         if mslice.shape[i] > 1:
- *             f_stride = mslice.strides[i]
- *             break             # <<<<<<<<<<<<<<
- * 
- *     if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
- */
-      goto __pyx_L7_break;
-
-      /* "View.MemoryView":1130
- * 
- *     for i in range(ndim):
- *         if mslice.shape[i] > 1:             # <<<<<<<<<<<<<<
- *             f_stride = mslice.strides[i]
- *             break
- */
-    }
-  }
-  __pyx_L7_break:;
-
-  /* "View.MemoryView":1134
- *             break
- * 
- *     if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):             # <<<<<<<<<<<<<<
- *         return 'C'
- *     else:
- */
-  __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":1135
- * 
- *     if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
- *         return 'C'             # <<<<<<<<<<<<<<
- *     else:
- *         return 'F'
- */
-    __pyx_r = 'C';
-    goto __pyx_L0;
-
-    /* "View.MemoryView":1134
- *             break
- * 
- *     if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):             # <<<<<<<<<<<<<<
- *         return 'C'
- *     else:
- */
-  }
-
-  /* "View.MemoryView":1137
- *         return 'C'
- *     else:
- *         return 'F'             # <<<<<<<<<<<<<<
- * 
- * @cython.cdivision(True)
- */
-  /*else*/ {
-    __pyx_r = 'F';
-    goto __pyx_L0;
-  }
-
-  /* "View.MemoryView":1116
- * 
- * @cname('__pyx_get_best_slice_order')
- * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil:             # <<<<<<<<<<<<<<
- *     """
- *     Figure out the best memory access order for a given slice.
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  return __pyx_r;
-}
-
-/* "View.MemoryView":1140
- * 
- * @cython.cdivision(True)
- * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides,             # <<<<<<<<<<<<<<
- *                                    char *dst_data, Py_ssize_t *dst_strides,
- *                                    Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
- */
-
-static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
-  CYTHON_UNUSED Py_ssize_t __pyx_v_i;
-  CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
-  Py_ssize_t __pyx_v_dst_extent;
-  Py_ssize_t __pyx_v_src_stride;
-  Py_ssize_t __pyx_v_dst_stride;
-  int __pyx_t_1;
-  int __pyx_t_2;
-  int __pyx_t_3;
-  Py_ssize_t __pyx_t_4;
-  Py_ssize_t __pyx_t_5;
-  Py_ssize_t __pyx_t_6;
-
-  /* "View.MemoryView":1147
- * 
- *     cdef Py_ssize_t i
- *     cdef Py_ssize_t src_extent = src_shape[0]             # <<<<<<<<<<<<<<
- *     cdef Py_ssize_t dst_extent = dst_shape[0]
- *     cdef Py_ssize_t src_stride = src_strides[0]
- */
-  __pyx_v_src_extent = (__pyx_v_src_shape[0]);
-
-  /* "View.MemoryView":1148
- *     cdef Py_ssize_t i
- *     cdef Py_ssize_t src_extent = src_shape[0]
- *     cdef Py_ssize_t dst_extent = dst_shape[0]             # <<<<<<<<<<<<<<
- *     cdef Py_ssize_t src_stride = src_strides[0]
- *     cdef Py_ssize_t dst_stride = dst_strides[0]
- */
-  __pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
-
-  /* "View.MemoryView":1149
- *     cdef Py_ssize_t src_extent = src_shape[0]
- *     cdef Py_ssize_t dst_extent = dst_shape[0]
- *     cdef Py_ssize_t src_stride = src_strides[0]             # <<<<<<<<<<<<<<
- *     cdef Py_ssize_t dst_stride = dst_strides[0]
- * 
- */
-  __pyx_v_src_stride = (__pyx_v_src_strides[0]);
-
-  /* "View.MemoryView":1150
- *     cdef Py_ssize_t dst_extent = dst_shape[0]
- *     cdef Py_ssize_t src_stride = src_strides[0]
- *     cdef Py_ssize_t dst_stride = dst_strides[0]             # <<<<<<<<<<<<<<
- * 
- *     if ndim == 1:
- */
-  __pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
-
-  /* "View.MemoryView":1152
- *     cdef Py_ssize_t dst_stride = dst_strides[0]
- * 
- *     if ndim == 1:             # <<<<<<<<<<<<<<
- *        if (src_stride > 0 and dst_stride > 0 and
- *            <size_t> src_stride == itemsize == <size_t> dst_stride):
- */
-  __pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":1153
- * 
- *     if ndim == 1:
- *        if (src_stride > 0 and dst_stride > 0 and             # <<<<<<<<<<<<<<
- *            <size_t> src_stride == itemsize == <size_t> dst_stride):
- *            memcpy(dst_data, src_data, itemsize * dst_extent)
- */
-    __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
-    if (__pyx_t_2) {
-    } else {
-      __pyx_t_1 = __pyx_t_2;
-      goto __pyx_L5_bool_binop_done;
-    }
-    __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
-    if (__pyx_t_2) {
-    } else {
-      __pyx_t_1 = __pyx_t_2;
-      goto __pyx_L5_bool_binop_done;
-    }
-
-    /* "View.MemoryView":1154
- *     if ndim == 1:
- *        if (src_stride > 0 and dst_stride > 0 and
- *            <size_t> src_stride == itemsize == <size_t> dst_stride):             # <<<<<<<<<<<<<<
- *            memcpy(dst_data, src_data, itemsize * dst_extent)
- *        else:
- */
-    __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
-    if (__pyx_t_2) {
-      __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
-    }
-    __pyx_t_3 = (__pyx_t_2 != 0);
-    __pyx_t_1 = __pyx_t_3;
-    __pyx_L5_bool_binop_done:;
-
-    /* "View.MemoryView":1153
- * 
- *     if ndim == 1:
- *        if (src_stride > 0 and dst_stride > 0 and             # <<<<<<<<<<<<<<
- *            <size_t> src_stride == itemsize == <size_t> dst_stride):
- *            memcpy(dst_data, src_data, itemsize * dst_extent)
- */
-    if (__pyx_t_1) {
-
-      /* "View.MemoryView":1155
- *        if (src_stride > 0 and dst_stride > 0 and
- *            <size_t> src_stride == itemsize == <size_t> dst_stride):
- *            memcpy(dst_data, src_data, itemsize * dst_extent)             # <<<<<<<<<<<<<<
- *        else:
- *            for i in range(dst_extent):
- */
-      (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));
-
-      /* "View.MemoryView":1153
- * 
- *     if ndim == 1:
- *        if (src_stride > 0 and dst_stride > 0 and             # <<<<<<<<<<<<<<
- *            <size_t> src_stride == itemsize == <size_t> dst_stride):
- *            memcpy(dst_data, src_data, itemsize * dst_extent)
- */
-      goto __pyx_L4;
-    }
-
-    /* "View.MemoryView":1157
- *            memcpy(dst_data, src_data, itemsize * dst_extent)
- *        else:
- *            for i in range(dst_extent):             # <<<<<<<<<<<<<<
- *                memcpy(dst_data, src_data, itemsize)
- *                src_data += src_stride
- */
-    /*else*/ {
-      __pyx_t_4 = __pyx_v_dst_extent;
-      __pyx_t_5 = __pyx_t_4;
-      for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
-        __pyx_v_i = __pyx_t_6;
-
-        /* "View.MemoryView":1158
- *        else:
- *            for i in range(dst_extent):
- *                memcpy(dst_data, src_data, itemsize)             # <<<<<<<<<<<<<<
- *                src_data += src_stride
- *                dst_data += dst_stride
- */
-        (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));
-
-        /* "View.MemoryView":1159
- *            for i in range(dst_extent):
- *                memcpy(dst_data, src_data, itemsize)
- *                src_data += src_stride             # <<<<<<<<<<<<<<
- *                dst_data += dst_stride
- *     else:
- */
-        __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
-
-        /* "View.MemoryView":1160
- *                memcpy(dst_data, src_data, itemsize)
- *                src_data += src_stride
- *                dst_data += dst_stride             # <<<<<<<<<<<<<<
- *     else:
- *         for i in range(dst_extent):
- */
-        __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
-      }
-    }
-    __pyx_L4:;
-
-    /* "View.MemoryView":1152
- *     cdef Py_ssize_t dst_stride = dst_strides[0]
- * 
- *     if ndim == 1:             # <<<<<<<<<<<<<<
- *        if (src_stride > 0 and dst_stride > 0 and
- *            <size_t> src_stride == itemsize == <size_t> dst_stride):
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":1162
- *                dst_data += dst_stride
- *     else:
- *         for i in range(dst_extent):             # <<<<<<<<<<<<<<
- *             _copy_strided_to_strided(src_data, src_strides + 1,
- *                                      dst_data, dst_strides + 1,
- */
-  /*else*/ {
-    __pyx_t_4 = __pyx_v_dst_extent;
-    __pyx_t_5 = __pyx_t_4;
-    for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
-      __pyx_v_i = __pyx_t_6;
-
-      /* "View.MemoryView":1163
- *     else:
- *         for i in range(dst_extent):
- *             _copy_strided_to_strided(src_data, src_strides + 1,             # <<<<<<<<<<<<<<
- *                                      dst_data, dst_strides + 1,
- *                                      src_shape + 1, dst_shape + 1,
- */
-      _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
-
-      /* "View.MemoryView":1167
- *                                      src_shape + 1, dst_shape + 1,
- *                                      ndim - 1, itemsize)
- *             src_data += src_stride             # <<<<<<<<<<<<<<
- *             dst_data += dst_stride
- * 
- */
-      __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
-
-      /* "View.MemoryView":1168
- *                                      ndim - 1, itemsize)
- *             src_data += src_stride
- *             dst_data += dst_stride             # <<<<<<<<<<<<<<
- * 
- * cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
- */
-      __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
-    }
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":1140
- * 
- * @cython.cdivision(True)
- * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides,             # <<<<<<<<<<<<<<
- *                                    char *dst_data, Py_ssize_t *dst_strides,
- *                                    Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
- */
-
-  /* function exit code */
-}
-
-/* "View.MemoryView":1170
- *             dst_data += dst_stride
- * 
- * cdef void copy_strided_to_strided(__Pyx_memviewslice *src,             # <<<<<<<<<<<<<<
- *                                   __Pyx_memviewslice *dst,
- *                                   int ndim, size_t itemsize) nogil:
- */
-
-static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
-
-  /* "View.MemoryView":1173
- *                                   __Pyx_memviewslice *dst,
- *                                   int ndim, size_t itemsize) nogil:
- *     _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides,             # <<<<<<<<<<<<<<
- *                              src.shape, dst.shape, ndim, itemsize)
- * 
- */
-  _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
-
-  /* "View.MemoryView":1170
- *             dst_data += dst_stride
- * 
- * cdef void copy_strided_to_strided(__Pyx_memviewslice *src,             # <<<<<<<<<<<<<<
- *                                   __Pyx_memviewslice *dst,
- *                                   int ndim, size_t itemsize) nogil:
- */
-
-  /* function exit code */
-}
-
-/* "View.MemoryView":1177
- * 
- * @cname('__pyx_memoryview_slice_get_size')
- * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil:             # <<<<<<<<<<<<<<
- *     "Return the size of the memory occupied by the slice in number of bytes"
- *     cdef Py_ssize_t shape, size = src.memview.view.itemsize
- */
-
-static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
-  Py_ssize_t __pyx_v_shape;
-  Py_ssize_t __pyx_v_size;
-  Py_ssize_t __pyx_r;
-  Py_ssize_t __pyx_t_1;
-  Py_ssize_t *__pyx_t_2;
-  Py_ssize_t *__pyx_t_3;
-  Py_ssize_t *__pyx_t_4;
-
-  /* "View.MemoryView":1179
- * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil:
- *     "Return the size of the memory occupied by the slice in number of bytes"
- *     cdef Py_ssize_t shape, size = src.memview.view.itemsize             # <<<<<<<<<<<<<<
- * 
- *     for shape in src.shape[:ndim]:
- */
-  __pyx_t_1 = __pyx_v_src->memview->view.itemsize;
-  __pyx_v_size = __pyx_t_1;
-
-  /* "View.MemoryView":1181
- *     cdef Py_ssize_t shape, size = src.memview.view.itemsize
- * 
- *     for shape in src.shape[:ndim]:             # <<<<<<<<<<<<<<
- *         size *= shape
- * 
- */
-  __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim);
-  for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
-    __pyx_t_2 = __pyx_t_4;
-    __pyx_v_shape = (__pyx_t_2[0]);
-
-    /* "View.MemoryView":1182
- * 
- *     for shape in src.shape[:ndim]:
- *         size *= shape             # <<<<<<<<<<<<<<
- * 
- *     return size
- */
-    __pyx_v_size = (__pyx_v_size * __pyx_v_shape);
-  }
-
-  /* "View.MemoryView":1184
- *         size *= shape
- * 
- *     return size             # <<<<<<<<<<<<<<
- * 
- * @cname('__pyx_fill_contig_strides_array')
- */
-  __pyx_r = __pyx_v_size;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":1177
- * 
- * @cname('__pyx_memoryview_slice_get_size')
- * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil:             # <<<<<<<<<<<<<<
- *     "Return the size of the memory occupied by the slice in number of bytes"
- *     cdef Py_ssize_t shape, size = src.memview.view.itemsize
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  return __pyx_r;
-}
-
-/* "View.MemoryView":1187
- * 
- * @cname('__pyx_fill_contig_strides_array')
- * cdef Py_ssize_t fill_contig_strides_array(             # <<<<<<<<<<<<<<
- *                 Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
- *                 int ndim, char order) nogil:
- */
-
-static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
-  int __pyx_v_idx;
-  Py_ssize_t __pyx_r;
-  int __pyx_t_1;
-  int __pyx_t_2;
-  int __pyx_t_3;
-  int __pyx_t_4;
-
-  /* "View.MemoryView":1196
- *     cdef int idx
- * 
- *     if order == 'F':             # <<<<<<<<<<<<<<
- *         for idx in range(ndim):
- *             strides[idx] = stride
- */
-  __pyx_t_1 = ((__pyx_v_order == 'F') != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":1197
- * 
- *     if order == 'F':
- *         for idx in range(ndim):             # <<<<<<<<<<<<<<
- *             strides[idx] = stride
- *             stride *= shape[idx]
- */
-    __pyx_t_2 = __pyx_v_ndim;
-    __pyx_t_3 = __pyx_t_2;
-    for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
-      __pyx_v_idx = __pyx_t_4;
-
-      /* "View.MemoryView":1198
- *     if order == 'F':
- *         for idx in range(ndim):
- *             strides[idx] = stride             # <<<<<<<<<<<<<<
- *             stride *= shape[idx]
- *     else:
- */
-      (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
-
-      /* "View.MemoryView":1199
- *         for idx in range(ndim):
- *             strides[idx] = stride
- *             stride *= shape[idx]             # <<<<<<<<<<<<<<
- *     else:
- *         for idx in range(ndim - 1, -1, -1):
- */
-      __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
-    }
-
-    /* "View.MemoryView":1196
- *     cdef int idx
- * 
- *     if order == 'F':             # <<<<<<<<<<<<<<
- *         for idx in range(ndim):
- *             strides[idx] = stride
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":1201
- *             stride *= shape[idx]
- *     else:
- *         for idx in range(ndim - 1, -1, -1):             # <<<<<<<<<<<<<<
- *             strides[idx] = stride
- *             stride *= shape[idx]
- */
-  /*else*/ {
-    for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
-      __pyx_v_idx = __pyx_t_2;
-
-      /* "View.MemoryView":1202
- *     else:
- *         for idx in range(ndim - 1, -1, -1):
- *             strides[idx] = stride             # <<<<<<<<<<<<<<
- *             stride *= shape[idx]
- * 
- */
-      (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
-
-      /* "View.MemoryView":1203
- *         for idx in range(ndim - 1, -1, -1):
- *             strides[idx] = stride
- *             stride *= shape[idx]             # <<<<<<<<<<<<<<
- * 
- *     return stride
- */
-      __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
-    }
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":1205
- *             stride *= shape[idx]
- * 
- *     return stride             # <<<<<<<<<<<<<<
- * 
- * @cname('__pyx_memoryview_copy_data_to_temp')
- */
-  __pyx_r = __pyx_v_stride;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":1187
- * 
- * @cname('__pyx_fill_contig_strides_array')
- * cdef Py_ssize_t fill_contig_strides_array(             # <<<<<<<<<<<<<<
- *                 Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
- *                 int ndim, char order) nogil:
- */
-
-  /* function exit code */
-  __pyx_L0:;
-  return __pyx_r;
-}
-
-/* "View.MemoryView":1208
- * 
- * @cname('__pyx_memoryview_copy_data_to_temp')
- * cdef void *copy_data_to_temp(__Pyx_memviewslice *src,             # <<<<<<<<<<<<<<
- *                              __Pyx_memviewslice *tmpslice,
- *                              char order,
- */
-
-static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
-  int __pyx_v_i;
-  void *__pyx_v_result;
-  size_t __pyx_v_itemsize;
-  size_t __pyx_v_size;
-  void *__pyx_r;
-  Py_ssize_t __pyx_t_1;
-  int __pyx_t_2;
-  int __pyx_t_3;
-  struct __pyx_memoryview_obj *__pyx_t_4;
-  int __pyx_t_5;
-  int __pyx_t_6;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-
-  /* "View.MemoryView":1219
- *     cdef void *result
- * 
- *     cdef size_t itemsize = src.memview.view.itemsize             # <<<<<<<<<<<<<<
- *     cdef size_t size = slice_get_size(src, ndim)
- * 
- */
-  __pyx_t_1 = __pyx_v_src->memview->view.itemsize;
-  __pyx_v_itemsize = __pyx_t_1;
-
-  /* "View.MemoryView":1220
- * 
- *     cdef size_t itemsize = src.memview.view.itemsize
- *     cdef size_t size = slice_get_size(src, ndim)             # <<<<<<<<<<<<<<
- * 
- *     result = malloc(size)
- */
-  __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
-
-  /* "View.MemoryView":1222
- *     cdef size_t size = slice_get_size(src, ndim)
- * 
- *     result = malloc(size)             # <<<<<<<<<<<<<<
- *     if not result:
- *         _err(MemoryError, NULL)
- */
-  __pyx_v_result = malloc(__pyx_v_size);
-
-  /* "View.MemoryView":1223
- * 
- *     result = malloc(size)
- *     if not result:             # <<<<<<<<<<<<<<
- *         _err(MemoryError, NULL)
- * 
- */
-  __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":1224
- *     result = malloc(size)
- *     if not result:
- *         _err(MemoryError, NULL)             # <<<<<<<<<<<<<<
- * 
- * 
- */
-    __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 1224, __pyx_L1_error)
-
-    /* "View.MemoryView":1223
- * 
- *     result = malloc(size)
- *     if not result:             # <<<<<<<<<<<<<<
- *         _err(MemoryError, NULL)
- * 
- */
-  }
-
-  /* "View.MemoryView":1227
- * 
- * 
- *     tmpslice.data = <char *> result             # <<<<<<<<<<<<<<
- *     tmpslice.memview = src.memview
- *     for i in range(ndim):
- */
-  __pyx_v_tmpslice->data = ((char *)__pyx_v_result);
-
-  /* "View.MemoryView":1228
- * 
- *     tmpslice.data = <char *> result
- *     tmpslice.memview = src.memview             # <<<<<<<<<<<<<<
- *     for i in range(ndim):
- *         tmpslice.shape[i] = src.shape[i]
- */
-  __pyx_t_4 = __pyx_v_src->memview;
-  __pyx_v_tmpslice->memview = __pyx_t_4;
-
-  /* "View.MemoryView":1229
- *     tmpslice.data = <char *> result
- *     tmpslice.memview = src.memview
- *     for i in range(ndim):             # <<<<<<<<<<<<<<
- *         tmpslice.shape[i] = src.shape[i]
- *         tmpslice.suboffsets[i] = -1
- */
-  __pyx_t_3 = __pyx_v_ndim;
-  __pyx_t_5 = __pyx_t_3;
-  for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
-    __pyx_v_i = __pyx_t_6;
-
-    /* "View.MemoryView":1230
- *     tmpslice.memview = src.memview
- *     for i in range(ndim):
- *         tmpslice.shape[i] = src.shape[i]             # <<<<<<<<<<<<<<
- *         tmpslice.suboffsets[i] = -1
- * 
- */
-    (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
-
-    /* "View.MemoryView":1231
- *     for i in range(ndim):
- *         tmpslice.shape[i] = src.shape[i]
- *         tmpslice.suboffsets[i] = -1             # <<<<<<<<<<<<<<
- * 
- *     fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
- */
-    (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
-  }
-
-  /* "View.MemoryView":1233
- *         tmpslice.suboffsets[i] = -1
- * 
- *     fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,             # <<<<<<<<<<<<<<
- *                               ndim, order)
- * 
- */
-  (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));
-
-  /* "View.MemoryView":1237
- * 
- * 
- *     for i in range(ndim):             # <<<<<<<<<<<<<<
- *         if tmpslice.shape[i] == 1:
- *             tmpslice.strides[i] = 0
- */
-  __pyx_t_3 = __pyx_v_ndim;
-  __pyx_t_5 = __pyx_t_3;
-  for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
-    __pyx_v_i = __pyx_t_6;
-
-    /* "View.MemoryView":1238
- * 
- *     for i in range(ndim):
- *         if tmpslice.shape[i] == 1:             # <<<<<<<<<<<<<<
- *             tmpslice.strides[i] = 0
- * 
- */
-    __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":1239
- *     for i in range(ndim):
- *         if tmpslice.shape[i] == 1:
- *             tmpslice.strides[i] = 0             # <<<<<<<<<<<<<<
- * 
- *     if slice_is_contig(src[0], order, ndim):
- */
-      (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
-
-      /* "View.MemoryView":1238
- * 
- *     for i in range(ndim):
- *         if tmpslice.shape[i] == 1:             # <<<<<<<<<<<<<<
- *             tmpslice.strides[i] = 0
- * 
- */
-    }
-  }
-
-  /* "View.MemoryView":1241
- *             tmpslice.strides[i] = 0
- * 
- *     if slice_is_contig(src[0], order, ndim):             # <<<<<<<<<<<<<<
- *         memcpy(result, src.data, size)
- *     else:
- */
-  __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":1242
- * 
- *     if slice_is_contig(src[0], order, ndim):
- *         memcpy(result, src.data, size)             # <<<<<<<<<<<<<<
- *     else:
- *         copy_strided_to_strided(src, tmpslice, ndim, itemsize)
- */
-    (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));
-
-    /* "View.MemoryView":1241
- *             tmpslice.strides[i] = 0
- * 
- *     if slice_is_contig(src[0], order, ndim):             # <<<<<<<<<<<<<<
- *         memcpy(result, src.data, size)
- *     else:
- */
-    goto __pyx_L9;
-  }
-
-  /* "View.MemoryView":1244
- *         memcpy(result, src.data, size)
- *     else:
- *         copy_strided_to_strided(src, tmpslice, ndim, itemsize)             # <<<<<<<<<<<<<<
- * 
- *     return result
- */
-  /*else*/ {
-    copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
-  }
-  __pyx_L9:;
-
-  /* "View.MemoryView":1246
- *         copy_strided_to_strided(src, tmpslice, ndim, itemsize)
- * 
- *     return result             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_r = __pyx_v_result;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":1208
- * 
- * @cname('__pyx_memoryview_copy_data_to_temp')
- * cdef void *copy_data_to_temp(__Pyx_memviewslice *src,             # <<<<<<<<<<<<<<
- *                              __Pyx_memviewslice *tmpslice,
- *                              char order,
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  {
-    #ifdef WITH_THREAD
-    PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
-    #endif
-    __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
-    #ifdef WITH_THREAD
-    __Pyx_PyGILState_Release(__pyx_gilstate_save);
-    #endif
-  }
-  __pyx_r = NULL;
-  __pyx_L0:;
-  return __pyx_r;
-}
-
-/* "View.MemoryView":1251
- * 
- * @cname('__pyx_memoryview_err_extents')
- * cdef int _err_extents(int i, Py_ssize_t extent1,             # <<<<<<<<<<<<<<
- *                              Py_ssize_t extent2) except -1 with gil:
- *     raise ValueError("got differing extents in dimension %d (got %d and %d)" %
- */
-
-static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  #ifdef WITH_THREAD
-  PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
-  #endif
-  __Pyx_RefNannySetupContext("_err_extents", 0);
-
-  /* "View.MemoryView":1254
- *                              Py_ssize_t extent2) except -1 with gil:
- *     raise ValueError("got differing extents in dimension %d (got %d and %d)" %
- *                                                         (i, extent1, extent2))             # <<<<<<<<<<<<<<
- * 
- * @cname('__pyx_memoryview_err_dim')
- */
-  __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1254, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1254, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1254, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1254, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_2);
-  PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
-  __Pyx_GIVEREF(__pyx_t_3);
-  PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
-  __pyx_t_1 = 0;
-  __pyx_t_2 = 0;
-  __pyx_t_3 = 0;
-
-  /* "View.MemoryView":1253
- * cdef int _err_extents(int i, Py_ssize_t extent1,
- *                              Py_ssize_t extent2) except -1 with gil:
- *     raise ValueError("got differing extents in dimension %d (got %d and %d)" %             # <<<<<<<<<<<<<<
- *                                                         (i, extent1, extent2))
- * 
- */
-  __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1253, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1253, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __Pyx_Raise(__pyx_t_4, 0, 0, 0);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __PYX_ERR(2, 1253, __pyx_L1_error)
-
-  /* "View.MemoryView":1251
- * 
- * @cname('__pyx_memoryview_err_extents')
- * cdef int _err_extents(int i, Py_ssize_t extent1,             # <<<<<<<<<<<<<<
- *                              Py_ssize_t extent2) except -1 with gil:
- *     raise ValueError("got differing extents in dimension %d (got %d and %d)" %
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = -1;
-  __Pyx_RefNannyFinishContext();
-  #ifdef WITH_THREAD
-  __Pyx_PyGILState_Release(__pyx_gilstate_save);
-  #endif
-  return __pyx_r;
-}
-
-/* "View.MemoryView":1257
- * 
- * @cname('__pyx_memoryview_err_dim')
- * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:             # <<<<<<<<<<<<<<
- *     raise error(msg.decode('ascii') % dim)
- * 
- */
-
-static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  #ifdef WITH_THREAD
-  PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
-  #endif
-  __Pyx_RefNannySetupContext("_err_dim", 0);
-  __Pyx_INCREF(__pyx_v_error);
-
-  /* "View.MemoryView":1258
- * @cname('__pyx_memoryview_err_dim')
- * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
- *     raise error(msg.decode('ascii') % dim)             # <<<<<<<<<<<<<<
- * 
- * @cname('__pyx_memoryview_err')
- */
-  __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1258, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1258, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1258, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __Pyx_INCREF(__pyx_v_error);
-  __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
-  if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
-    __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
-    if (likely(__pyx_t_2)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
-      __Pyx_INCREF(__pyx_t_2);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_3, function);
-    }
-  }
-  __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1258, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __Pyx_Raise(__pyx_t_1, 0, 0, 0);
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __PYX_ERR(2, 1258, __pyx_L1_error)
-
-  /* "View.MemoryView":1257
- * 
- * @cname('__pyx_memoryview_err_dim')
- * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:             # <<<<<<<<<<<<<<
- *     raise error(msg.decode('ascii') % dim)
- * 
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = -1;
-  __Pyx_XDECREF(__pyx_v_error);
-  __Pyx_RefNannyFinishContext();
-  #ifdef WITH_THREAD
-  __Pyx_PyGILState_Release(__pyx_gilstate_save);
-  #endif
-  return __pyx_r;
-}
-
-/* "View.MemoryView":1261
- * 
- * @cname('__pyx_memoryview_err')
- * cdef int _err(object error, char *msg) except -1 with gil:             # <<<<<<<<<<<<<<
- *     if msg != NULL:
- *         raise error(msg.decode('ascii'))
- */
-
-static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
-  int __pyx_r;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  #ifdef WITH_THREAD
-  PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
-  #endif
-  __Pyx_RefNannySetupContext("_err", 0);
-  __Pyx_INCREF(__pyx_v_error);
-
-  /* "View.MemoryView":1262
- * @cname('__pyx_memoryview_err')
- * cdef int _err(object error, char *msg) except -1 with gil:
- *     if msg != NULL:             # <<<<<<<<<<<<<<
- *         raise error(msg.decode('ascii'))
- *     else:
- */
-  __pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
-  if (unlikely(__pyx_t_1)) {
-
-    /* "View.MemoryView":1263
- * cdef int _err(object error, char *msg) except -1 with gil:
- *     if msg != NULL:
- *         raise error(msg.decode('ascii'))             # <<<<<<<<<<<<<<
- *     else:
- *         raise error
- */
-    __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1263, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_INCREF(__pyx_v_error);
-    __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
-    if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
-      __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
-      if (likely(__pyx_t_5)) {
-        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
-        __Pyx_INCREF(__pyx_t_5);
-        __Pyx_INCREF(function);
-        __Pyx_DECREF_SET(__pyx_t_4, function);
-      }
-    }
-    __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
-    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1263, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    __Pyx_Raise(__pyx_t_2, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __PYX_ERR(2, 1263, __pyx_L1_error)
-
-    /* "View.MemoryView":1262
- * @cname('__pyx_memoryview_err')
- * cdef int _err(object error, char *msg) except -1 with gil:
- *     if msg != NULL:             # <<<<<<<<<<<<<<
- *         raise error(msg.decode('ascii'))
- *     else:
- */
-  }
-
-  /* "View.MemoryView":1265
- *         raise error(msg.decode('ascii'))
- *     else:
- *         raise error             # <<<<<<<<<<<<<<
- * 
- * @cname('__pyx_memoryview_copy_contents')
- */
-  /*else*/ {
-    __Pyx_Raise(__pyx_v_error, 0, 0, 0);
-    __PYX_ERR(2, 1265, __pyx_L1_error)
-  }
-
-  /* "View.MemoryView":1261
- * 
- * @cname('__pyx_memoryview_err')
- * cdef int _err(object error, char *msg) except -1 with gil:             # <<<<<<<<<<<<<<
- *     if msg != NULL:
- *         raise error(msg.decode('ascii'))
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = -1;
-  __Pyx_XDECREF(__pyx_v_error);
-  __Pyx_RefNannyFinishContext();
-  #ifdef WITH_THREAD
-  __Pyx_PyGILState_Release(__pyx_gilstate_save);
-  #endif
-  return __pyx_r;
-}
-
-/* "View.MemoryView":1268
- * 
- * @cname('__pyx_memoryview_copy_contents')
- * cdef int memoryview_copy_contents(__Pyx_memviewslice src,             # <<<<<<<<<<<<<<
- *                                   __Pyx_memviewslice dst,
- *                                   int src_ndim, int dst_ndim,
- */
-
-static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
-  void *__pyx_v_tmpdata;
-  size_t __pyx_v_itemsize;
-  int __pyx_v_i;
-  char __pyx_v_order;
-  int __pyx_v_broadcasting;
-  int __pyx_v_direct_copy;
-  __Pyx_memviewslice __pyx_v_tmp;
-  int __pyx_v_ndim;
-  int __pyx_r;
-  Py_ssize_t __pyx_t_1;
-  int __pyx_t_2;
-  int __pyx_t_3;
-  int __pyx_t_4;
-  int __pyx_t_5;
-  int __pyx_t_6;
-  void *__pyx_t_7;
-  int __pyx_t_8;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-
-  /* "View.MemoryView":1276
- *     Check for overlapping memory and verify the shapes.
- *     """
- *     cdef void *tmpdata = NULL             # <<<<<<<<<<<<<<
- *     cdef size_t itemsize = src.memview.view.itemsize
- *     cdef int i
- */
-  __pyx_v_tmpdata = NULL;
-
-  /* "View.MemoryView":1277
- *     """
- *     cdef void *tmpdata = NULL
- *     cdef size_t itemsize = src.memview.view.itemsize             # <<<<<<<<<<<<<<
- *     cdef int i
- *     cdef char order = get_best_order(&src, src_ndim)
- */
-  __pyx_t_1 = __pyx_v_src.memview->view.itemsize;
-  __pyx_v_itemsize = __pyx_t_1;
-
-  /* "View.MemoryView":1279
- *     cdef size_t itemsize = src.memview.view.itemsize
- *     cdef int i
- *     cdef char order = get_best_order(&src, src_ndim)             # <<<<<<<<<<<<<<
- *     cdef bint broadcasting = False
- *     cdef bint direct_copy = False
- */
-  __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
-
-  /* "View.MemoryView":1280
- *     cdef int i
- *     cdef char order = get_best_order(&src, src_ndim)
- *     cdef bint broadcasting = False             # <<<<<<<<<<<<<<
- *     cdef bint direct_copy = False
- *     cdef __Pyx_memviewslice tmp
- */
-  __pyx_v_broadcasting = 0;
-
-  /* "View.MemoryView":1281
- *     cdef char order = get_best_order(&src, src_ndim)
- *     cdef bint broadcasting = False
- *     cdef bint direct_copy = False             # <<<<<<<<<<<<<<
- *     cdef __Pyx_memviewslice tmp
- * 
- */
-  __pyx_v_direct_copy = 0;
-
-  /* "View.MemoryView":1284
- *     cdef __Pyx_memviewslice tmp
- * 
- *     if src_ndim < dst_ndim:             # <<<<<<<<<<<<<<
- *         broadcast_leading(&src, src_ndim, dst_ndim)
- *     elif dst_ndim < src_ndim:
- */
-  __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":1285
- * 
- *     if src_ndim < dst_ndim:
- *         broadcast_leading(&src, src_ndim, dst_ndim)             # <<<<<<<<<<<<<<
- *     elif dst_ndim < src_ndim:
- *         broadcast_leading(&dst, dst_ndim, src_ndim)
- */
-    __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
-
-    /* "View.MemoryView":1284
- *     cdef __Pyx_memviewslice tmp
- * 
- *     if src_ndim < dst_ndim:             # <<<<<<<<<<<<<<
- *         broadcast_leading(&src, src_ndim, dst_ndim)
- *     elif dst_ndim < src_ndim:
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":1286
- *     if src_ndim < dst_ndim:
- *         broadcast_leading(&src, src_ndim, dst_ndim)
- *     elif dst_ndim < src_ndim:             # <<<<<<<<<<<<<<
- *         broadcast_leading(&dst, dst_ndim, src_ndim)
- * 
- */
-  __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":1287
- *         broadcast_leading(&src, src_ndim, dst_ndim)
- *     elif dst_ndim < src_ndim:
- *         broadcast_leading(&dst, dst_ndim, src_ndim)             # <<<<<<<<<<<<<<
- * 
- *     cdef int ndim = max(src_ndim, dst_ndim)
- */
-    __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
-
-    /* "View.MemoryView":1286
- *     if src_ndim < dst_ndim:
- *         broadcast_leading(&src, src_ndim, dst_ndim)
- *     elif dst_ndim < src_ndim:             # <<<<<<<<<<<<<<
- *         broadcast_leading(&dst, dst_ndim, src_ndim)
- * 
- */
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":1289
- *         broadcast_leading(&dst, dst_ndim, src_ndim)
- * 
- *     cdef int ndim = max(src_ndim, dst_ndim)             # <<<<<<<<<<<<<<
- * 
- *     for i in range(ndim):
- */
-  __pyx_t_3 = __pyx_v_dst_ndim;
-  __pyx_t_4 = __pyx_v_src_ndim;
-  if (((__pyx_t_3 > __pyx_t_4) != 0)) {
-    __pyx_t_5 = __pyx_t_3;
-  } else {
-    __pyx_t_5 = __pyx_t_4;
-  }
-  __pyx_v_ndim = __pyx_t_5;
-
-  /* "View.MemoryView":1291
- *     cdef int ndim = max(src_ndim, dst_ndim)
- * 
- *     for i in range(ndim):             # <<<<<<<<<<<<<<
- *         if src.shape[i] != dst.shape[i]:
- *             if src.shape[i] == 1:
- */
-  __pyx_t_5 = __pyx_v_ndim;
-  __pyx_t_3 = __pyx_t_5;
-  for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
-    __pyx_v_i = __pyx_t_4;
-
-    /* "View.MemoryView":1292
- * 
- *     for i in range(ndim):
- *         if src.shape[i] != dst.shape[i]:             # <<<<<<<<<<<<<<
- *             if src.shape[i] == 1:
- *                 broadcasting = True
- */
-    __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":1293
- *     for i in range(ndim):
- *         if src.shape[i] != dst.shape[i]:
- *             if src.shape[i] == 1:             # <<<<<<<<<<<<<<
- *                 broadcasting = True
- *                 src.strides[i] = 0
- */
-      __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
-      if (__pyx_t_2) {
-
-        /* "View.MemoryView":1294
- *         if src.shape[i] != dst.shape[i]:
- *             if src.shape[i] == 1:
- *                 broadcasting = True             # <<<<<<<<<<<<<<
- *                 src.strides[i] = 0
- *             else:
- */
-        __pyx_v_broadcasting = 1;
-
-        /* "View.MemoryView":1295
- *             if src.shape[i] == 1:
- *                 broadcasting = True
- *                 src.strides[i] = 0             # <<<<<<<<<<<<<<
- *             else:
- *                 _err_extents(i, dst.shape[i], src.shape[i])
- */
-        (__pyx_v_src.strides[__pyx_v_i]) = 0;
-
-        /* "View.MemoryView":1293
- *     for i in range(ndim):
- *         if src.shape[i] != dst.shape[i]:
- *             if src.shape[i] == 1:             # <<<<<<<<<<<<<<
- *                 broadcasting = True
- *                 src.strides[i] = 0
- */
-        goto __pyx_L7;
-      }
-
-      /* "View.MemoryView":1297
- *                 src.strides[i] = 0
- *             else:
- *                 _err_extents(i, dst.shape[i], src.shape[i])             # <<<<<<<<<<<<<<
- * 
- *         if src.suboffsets[i] >= 0:
- */
-      /*else*/ {
-        __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 1297, __pyx_L1_error)
-      }
-      __pyx_L7:;
-
-      /* "View.MemoryView":1292
- * 
- *     for i in range(ndim):
- *         if src.shape[i] != dst.shape[i]:             # <<<<<<<<<<<<<<
- *             if src.shape[i] == 1:
- *                 broadcasting = True
- */
-    }
-
-    /* "View.MemoryView":1299
- *                 _err_extents(i, dst.shape[i], src.shape[i])
- * 
- *         if src.suboffsets[i] >= 0:             # <<<<<<<<<<<<<<
- *             _err_dim(ValueError, "Dimension %d is not direct", i)
- * 
- */
-    __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":1300
- * 
- *         if src.suboffsets[i] >= 0:
- *             _err_dim(ValueError, "Dimension %d is not direct", i)             # <<<<<<<<<<<<<<
- * 
- *     if slices_overlap(&src, &dst, ndim, itemsize):
- */
-      __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 1300, __pyx_L1_error)
-
-      /* "View.MemoryView":1299
- *                 _err_extents(i, dst.shape[i], src.shape[i])
- * 
- *         if src.suboffsets[i] >= 0:             # <<<<<<<<<<<<<<
- *             _err_dim(ValueError, "Dimension %d is not direct", i)
- * 
- */
-    }
-  }
-
-  /* "View.MemoryView":1302
- *             _err_dim(ValueError, "Dimension %d is not direct", i)
- * 
- *     if slices_overlap(&src, &dst, ndim, itemsize):             # <<<<<<<<<<<<<<
- * 
- *         if not slice_is_contig(src, order, ndim):
- */
-  __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":1304
- *     if slices_overlap(&src, &dst, ndim, itemsize):
- * 
- *         if not slice_is_contig(src, order, ndim):             # <<<<<<<<<<<<<<
- *             order = get_best_order(&dst, ndim)
- * 
- */
-    __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":1305
- * 
- *         if not slice_is_contig(src, order, ndim):
- *             order = get_best_order(&dst, ndim)             # <<<<<<<<<<<<<<
- * 
- *         tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
- */
-      __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
-
-      /* "View.MemoryView":1304
- *     if slices_overlap(&src, &dst, ndim, itemsize):
- * 
- *         if not slice_is_contig(src, order, ndim):             # <<<<<<<<<<<<<<
- *             order = get_best_order(&dst, ndim)
- * 
- */
-    }
-
-    /* "View.MemoryView":1307
- *             order = get_best_order(&dst, ndim)
- * 
- *         tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)             # <<<<<<<<<<<<<<
- *         src = tmp
- * 
- */
-    __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(2, 1307, __pyx_L1_error)
-    __pyx_v_tmpdata = __pyx_t_7;
-
-    /* "View.MemoryView":1308
- * 
- *         tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
- *         src = tmp             # <<<<<<<<<<<<<<
- * 
- *     if not broadcasting:
- */
-    __pyx_v_src = __pyx_v_tmp;
-
-    /* "View.MemoryView":1302
- *             _err_dim(ValueError, "Dimension %d is not direct", i)
- * 
- *     if slices_overlap(&src, &dst, ndim, itemsize):             # <<<<<<<<<<<<<<
- * 
- *         if not slice_is_contig(src, order, ndim):
- */
-  }
-
-  /* "View.MemoryView":1310
- *         src = tmp
- * 
- *     if not broadcasting:             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
-  if (__pyx_t_2) {
-
-    /* "View.MemoryView":1313
- * 
- * 
- *         if slice_is_contig(src, 'C', ndim):             # <<<<<<<<<<<<<<
- *             direct_copy = slice_is_contig(dst, 'C', ndim)
- *         elif slice_is_contig(src, 'F', ndim):
- */
-    __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":1314
- * 
- *         if slice_is_contig(src, 'C', ndim):
- *             direct_copy = slice_is_contig(dst, 'C', ndim)             # <<<<<<<<<<<<<<
- *         elif slice_is_contig(src, 'F', ndim):
- *             direct_copy = slice_is_contig(dst, 'F', ndim)
- */
-      __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
-
-      /* "View.MemoryView":1313
- * 
- * 
- *         if slice_is_contig(src, 'C', ndim):             # <<<<<<<<<<<<<<
- *             direct_copy = slice_is_contig(dst, 'C', ndim)
- *         elif slice_is_contig(src, 'F', ndim):
- */
-      goto __pyx_L12;
-    }
-
-    /* "View.MemoryView":1315
- *         if slice_is_contig(src, 'C', ndim):
- *             direct_copy = slice_is_contig(dst, 'C', ndim)
- *         elif slice_is_contig(src, 'F', ndim):             # <<<<<<<<<<<<<<
- *             direct_copy = slice_is_contig(dst, 'F', ndim)
- * 
- */
-    __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":1316
- *             direct_copy = slice_is_contig(dst, 'C', ndim)
- *         elif slice_is_contig(src, 'F', ndim):
- *             direct_copy = slice_is_contig(dst, 'F', ndim)             # <<<<<<<<<<<<<<
- * 
- *         if direct_copy:
- */
-      __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
-
-      /* "View.MemoryView":1315
- *         if slice_is_contig(src, 'C', ndim):
- *             direct_copy = slice_is_contig(dst, 'C', ndim)
- *         elif slice_is_contig(src, 'F', ndim):             # <<<<<<<<<<<<<<
- *             direct_copy = slice_is_contig(dst, 'F', ndim)
- * 
- */
-    }
-    __pyx_L12:;
-
-    /* "View.MemoryView":1318
- *             direct_copy = slice_is_contig(dst, 'F', ndim)
- * 
- *         if direct_copy:             # <<<<<<<<<<<<<<
- * 
- *             refcount_copying(&dst, dtype_is_object, ndim, False)
- */
-    __pyx_t_2 = (__pyx_v_direct_copy != 0);
-    if (__pyx_t_2) {
-
-      /* "View.MemoryView":1320
- *         if direct_copy:
- * 
- *             refcount_copying(&dst, dtype_is_object, ndim, False)             # <<<<<<<<<<<<<<
- *             memcpy(dst.data, src.data, slice_get_size(&src, ndim))
- *             refcount_copying(&dst, dtype_is_object, ndim, True)
- */
-      __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
-
-      /* "View.MemoryView":1321
- * 
- *             refcount_copying(&dst, dtype_is_object, ndim, False)
- *             memcpy(dst.data, src.data, slice_get_size(&src, ndim))             # <<<<<<<<<<<<<<
- *             refcount_copying(&dst, dtype_is_object, ndim, True)
- *             free(tmpdata)
- */
-      (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));
-
-      /* "View.MemoryView":1322
- *             refcount_copying(&dst, dtype_is_object, ndim, False)
- *             memcpy(dst.data, src.data, slice_get_size(&src, ndim))
- *             refcount_copying(&dst, dtype_is_object, ndim, True)             # <<<<<<<<<<<<<<
- *             free(tmpdata)
- *             return 0
- */
-      __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
-
-      /* "View.MemoryView":1323
- *             memcpy(dst.data, src.data, slice_get_size(&src, ndim))
- *             refcount_copying(&dst, dtype_is_object, ndim, True)
- *             free(tmpdata)             # <<<<<<<<<<<<<<
- *             return 0
- * 
- */
-      free(__pyx_v_tmpdata);
-
-      /* "View.MemoryView":1324
- *             refcount_copying(&dst, dtype_is_object, ndim, True)
- *             free(tmpdata)
- *             return 0             # <<<<<<<<<<<<<<
- * 
- *     if order == 'F' == get_best_order(&dst, ndim):
- */
-      __pyx_r = 0;
-      goto __pyx_L0;
-
-      /* "View.MemoryView":1318
- *             direct_copy = slice_is_contig(dst, 'F', ndim)
- * 
- *         if direct_copy:             # <<<<<<<<<<<<<<
- * 
- *             refcount_copying(&dst, dtype_is_object, ndim, False)
- */
-    }
-
-    /* "View.MemoryView":1310
- *         src = tmp
- * 
- *     if not broadcasting:             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  }
-
-  /* "View.MemoryView":1326
- *             return 0
- * 
- *     if order == 'F' == get_best_order(&dst, ndim):             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_t_2 = (__pyx_v_order == 'F');
-  if (__pyx_t_2) {
-    __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
-  }
-  __pyx_t_8 = (__pyx_t_2 != 0);
-  if (__pyx_t_8) {
-
-    /* "View.MemoryView":1329
- * 
- * 
- *         transpose_memslice(&src)             # <<<<<<<<<<<<<<
- *         transpose_memslice(&dst)
- * 
- */
-    __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(2, 1329, __pyx_L1_error)
-
-    /* "View.MemoryView":1330
- * 
- *         transpose_memslice(&src)
- *         transpose_memslice(&dst)             # <<<<<<<<<<<<<<
- * 
- *     refcount_copying(&dst, dtype_is_object, ndim, False)
- */
-    __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(2, 1330, __pyx_L1_error)
-
-    /* "View.MemoryView":1326
- *             return 0
- * 
- *     if order == 'F' == get_best_order(&dst, ndim):             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  }
-
-  /* "View.MemoryView":1332
- *         transpose_memslice(&dst)
- * 
- *     refcount_copying(&dst, dtype_is_object, ndim, False)             # <<<<<<<<<<<<<<
- *     copy_strided_to_strided(&src, &dst, ndim, itemsize)
- *     refcount_copying(&dst, dtype_is_object, ndim, True)
- */
-  __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
-
-  /* "View.MemoryView":1333
- * 
- *     refcount_copying(&dst, dtype_is_object, ndim, False)
- *     copy_strided_to_strided(&src, &dst, ndim, itemsize)             # <<<<<<<<<<<<<<
- *     refcount_copying(&dst, dtype_is_object, ndim, True)
- * 
- */
-  copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
-
-  /* "View.MemoryView":1334
- *     refcount_copying(&dst, dtype_is_object, ndim, False)
- *     copy_strided_to_strided(&src, &dst, ndim, itemsize)
- *     refcount_copying(&dst, dtype_is_object, ndim, True)             # <<<<<<<<<<<<<<
- * 
- *     free(tmpdata)
- */
-  __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
-
-  /* "View.MemoryView":1336
- *     refcount_copying(&dst, dtype_is_object, ndim, True)
- * 
- *     free(tmpdata)             # <<<<<<<<<<<<<<
- *     return 0
- * 
- */
-  free(__pyx_v_tmpdata);
-
-  /* "View.MemoryView":1337
- * 
- *     free(tmpdata)
- *     return 0             # <<<<<<<<<<<<<<
- * 
- * @cname('__pyx_memoryview_broadcast_leading')
- */
-  __pyx_r = 0;
-  goto __pyx_L0;
-
-  /* "View.MemoryView":1268
- * 
- * @cname('__pyx_memoryview_copy_contents')
- * cdef int memoryview_copy_contents(__Pyx_memviewslice src,             # <<<<<<<<<<<<<<
- *                                   __Pyx_memviewslice dst,
- *                                   int src_ndim, int dst_ndim,
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  {
-    #ifdef WITH_THREAD
-    PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
-    #endif
-    __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
-    #ifdef WITH_THREAD
-    __Pyx_PyGILState_Release(__pyx_gilstate_save);
-    #endif
-  }
-  __pyx_r = -1;
-  __pyx_L0:;
-  return __pyx_r;
-}
-
-/* "View.MemoryView":1340
- * 
- * @cname('__pyx_memoryview_broadcast_leading')
- * cdef void broadcast_leading(__Pyx_memviewslice *mslice,             # <<<<<<<<<<<<<<
- *                             int ndim,
- *                             int ndim_other) nogil:
- */
-
-static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
-  int __pyx_v_i;
-  int __pyx_v_offset;
-  int __pyx_t_1;
-  int __pyx_t_2;
-  int __pyx_t_3;
-
-  /* "View.MemoryView":1344
- *                             int ndim_other) nogil:
- *     cdef int i
- *     cdef int offset = ndim_other - ndim             # <<<<<<<<<<<<<<
- * 
- *     for i in range(ndim - 1, -1, -1):
- */
-  __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
-
-  /* "View.MemoryView":1346
- *     cdef int offset = ndim_other - ndim
- * 
- *     for i in range(ndim - 1, -1, -1):             # <<<<<<<<<<<<<<
- *         mslice.shape[i + offset] = mslice.shape[i]
- *         mslice.strides[i + offset] = mslice.strides[i]
- */
-  for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
-    __pyx_v_i = __pyx_t_1;
-
-    /* "View.MemoryView":1347
- * 
- *     for i in range(ndim - 1, -1, -1):
- *         mslice.shape[i + offset] = mslice.shape[i]             # <<<<<<<<<<<<<<
- *         mslice.strides[i + offset] = mslice.strides[i]
- *         mslice.suboffsets[i + offset] = mslice.suboffsets[i]
- */
-    (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
-
-    /* "View.MemoryView":1348
- *     for i in range(ndim - 1, -1, -1):
- *         mslice.shape[i + offset] = mslice.shape[i]
- *         mslice.strides[i + offset] = mslice.strides[i]             # <<<<<<<<<<<<<<
- *         mslice.suboffsets[i + offset] = mslice.suboffsets[i]
- * 
- */
-    (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
-
-    /* "View.MemoryView":1349
- *         mslice.shape[i + offset] = mslice.shape[i]
- *         mslice.strides[i + offset] = mslice.strides[i]
- *         mslice.suboffsets[i + offset] = mslice.suboffsets[i]             # <<<<<<<<<<<<<<
- * 
- *     for i in range(offset):
- */
-    (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
-  }
-
-  /* "View.MemoryView":1351
- *         mslice.suboffsets[i + offset] = mslice.suboffsets[i]
- * 
- *     for i in range(offset):             # <<<<<<<<<<<<<<
- *         mslice.shape[i] = 1
- *         mslice.strides[i] = mslice.strides[0]
- */
-  __pyx_t_1 = __pyx_v_offset;
-  __pyx_t_2 = __pyx_t_1;
-  for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
-    __pyx_v_i = __pyx_t_3;
-
-    /* "View.MemoryView":1352
- * 
- *     for i in range(offset):
- *         mslice.shape[i] = 1             # <<<<<<<<<<<<<<
- *         mslice.strides[i] = mslice.strides[0]
- *         mslice.suboffsets[i] = -1
- */
-    (__pyx_v_mslice->shape[__pyx_v_i]) = 1;
-
-    /* "View.MemoryView":1353
- *     for i in range(offset):
- *         mslice.shape[i] = 1
- *         mslice.strides[i] = mslice.strides[0]             # <<<<<<<<<<<<<<
- *         mslice.suboffsets[i] = -1
- * 
- */
-    (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
-
-    /* "View.MemoryView":1354
- *         mslice.shape[i] = 1
- *         mslice.strides[i] = mslice.strides[0]
- *         mslice.suboffsets[i] = -1             # <<<<<<<<<<<<<<
- * 
- * 
- */
-    (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
-  }
-
-  /* "View.MemoryView":1340
- * 
- * @cname('__pyx_memoryview_broadcast_leading')
- * cdef void broadcast_leading(__Pyx_memviewslice *mslice,             # <<<<<<<<<<<<<<
- *                             int ndim,
- *                             int ndim_other) nogil:
- */
-
-  /* function exit code */
-}
-
-/* "View.MemoryView":1362
- * 
- * @cname('__pyx_memoryview_refcount_copying')
- * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object,             # <<<<<<<<<<<<<<
- *                            int ndim, bint inc) nogil:
- * 
- */
-
-static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
-  int __pyx_t_1;
-
-  /* "View.MemoryView":1366
- * 
- * 
- *     if dtype_is_object:             # <<<<<<<<<<<<<<
- *         refcount_objects_in_slice_with_gil(dst.data, dst.shape,
- *                                            dst.strides, ndim, inc)
- */
-  __pyx_t_1 = (__pyx_v_dtype_is_object != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":1367
- * 
- *     if dtype_is_object:
- *         refcount_objects_in_slice_with_gil(dst.data, dst.shape,             # <<<<<<<<<<<<<<
- *                                            dst.strides, ndim, inc)
- * 
- */
-    __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
-
-    /* "View.MemoryView":1366
- * 
- * 
- *     if dtype_is_object:             # <<<<<<<<<<<<<<
- *         refcount_objects_in_slice_with_gil(dst.data, dst.shape,
- *                                            dst.strides, ndim, inc)
- */
-  }
-
-  /* "View.MemoryView":1362
- * 
- * @cname('__pyx_memoryview_refcount_copying')
- * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object,             # <<<<<<<<<<<<<<
- *                            int ndim, bint inc) nogil:
- * 
- */
-
-  /* function exit code */
-}
-
-/* "View.MemoryView":1371
- * 
- * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
- * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape,             # <<<<<<<<<<<<<<
- *                                              Py_ssize_t *strides, int ndim,
- *                                              bint inc) with gil:
- */
-
-static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
-  __Pyx_RefNannyDeclarations
-  #ifdef WITH_THREAD
-  PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
-  #endif
-  __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
-
-  /* "View.MemoryView":1374
- *                                              Py_ssize_t *strides, int ndim,
- *                                              bint inc) with gil:
- *     refcount_objects_in_slice(data, shape, strides, ndim, inc)             # <<<<<<<<<<<<<<
- * 
- * @cname('__pyx_memoryview_refcount_objects_in_slice')
- */
-  __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
-
-  /* "View.MemoryView":1371
- * 
- * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
- * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape,             # <<<<<<<<<<<<<<
- *                                              Py_ssize_t *strides, int ndim,
- *                                              bint inc) with gil:
- */
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  #ifdef WITH_THREAD
-  __Pyx_PyGILState_Release(__pyx_gilstate_save);
-  #endif
-}
-
-/* "View.MemoryView":1377
- * 
- * @cname('__pyx_memoryview_refcount_objects_in_slice')
- * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape,             # <<<<<<<<<<<<<<
- *                                     Py_ssize_t *strides, int ndim, bint inc):
- *     cdef Py_ssize_t i
- */
-
-static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
-  CYTHON_UNUSED Py_ssize_t __pyx_v_i;
-  __Pyx_RefNannyDeclarations
-  Py_ssize_t __pyx_t_1;
-  Py_ssize_t __pyx_t_2;
-  Py_ssize_t __pyx_t_3;
-  int __pyx_t_4;
-  __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
-
-  /* "View.MemoryView":1381
- *     cdef Py_ssize_t i
- * 
- *     for i in range(shape[0]):             # <<<<<<<<<<<<<<
- *         if ndim == 1:
- *             if inc:
- */
-  __pyx_t_1 = (__pyx_v_shape[0]);
-  __pyx_t_2 = __pyx_t_1;
-  for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
-    __pyx_v_i = __pyx_t_3;
-
-    /* "View.MemoryView":1382
- * 
- *     for i in range(shape[0]):
- *         if ndim == 1:             # <<<<<<<<<<<<<<
- *             if inc:
- *                 Py_INCREF((<PyObject **> data)[0])
- */
-    __pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
-    if (__pyx_t_4) {
-
-      /* "View.MemoryView":1383
- *     for i in range(shape[0]):
- *         if ndim == 1:
- *             if inc:             # <<<<<<<<<<<<<<
- *                 Py_INCREF((<PyObject **> data)[0])
- *             else:
- */
-      __pyx_t_4 = (__pyx_v_inc != 0);
-      if (__pyx_t_4) {
-
-        /* "View.MemoryView":1384
- *         if ndim == 1:
- *             if inc:
- *                 Py_INCREF((<PyObject **> data)[0])             # <<<<<<<<<<<<<<
- *             else:
- *                 Py_DECREF((<PyObject **> data)[0])
- */
-        Py_INCREF((((PyObject **)__pyx_v_data)[0]));
-
-        /* "View.MemoryView":1383
- *     for i in range(shape[0]):
- *         if ndim == 1:
- *             if inc:             # <<<<<<<<<<<<<<
- *                 Py_INCREF((<PyObject **> data)[0])
- *             else:
- */
-        goto __pyx_L6;
-      }
-
-      /* "View.MemoryView":1386
- *                 Py_INCREF((<PyObject **> data)[0])
- *             else:
- *                 Py_DECREF((<PyObject **> data)[0])             # <<<<<<<<<<<<<<
- *         else:
- *             refcount_objects_in_slice(data, shape + 1, strides + 1,
- */
-      /*else*/ {
-        Py_DECREF((((PyObject **)__pyx_v_data)[0]));
-      }
-      __pyx_L6:;
-
-      /* "View.MemoryView":1382
- * 
- *     for i in range(shape[0]):
- *         if ndim == 1:             # <<<<<<<<<<<<<<
- *             if inc:
- *                 Py_INCREF((<PyObject **> data)[0])
- */
-      goto __pyx_L5;
-    }
-
-    /* "View.MemoryView":1388
- *                 Py_DECREF((<PyObject **> data)[0])
- *         else:
- *             refcount_objects_in_slice(data, shape + 1, strides + 1,             # <<<<<<<<<<<<<<
- *                                       ndim - 1, inc)
- * 
- */
-    /*else*/ {
-
-      /* "View.MemoryView":1389
- *         else:
- *             refcount_objects_in_slice(data, shape + 1, strides + 1,
- *                                       ndim - 1, inc)             # <<<<<<<<<<<<<<
- * 
- *         data += strides[0]
- */
-      __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
-    }
-    __pyx_L5:;
-
-    /* "View.MemoryView":1391
- *                                       ndim - 1, inc)
- * 
- *         data += strides[0]             # <<<<<<<<<<<<<<
- * 
- * 
- */
-    __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
-  }
-
-  /* "View.MemoryView":1377
- * 
- * @cname('__pyx_memoryview_refcount_objects_in_slice')
- * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape,             # <<<<<<<<<<<<<<
- *                                     Py_ssize_t *strides, int ndim, bint inc):
- *     cdef Py_ssize_t i
- */
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-}
-
-/* "View.MemoryView":1397
- * 
- * @cname('__pyx_memoryview_slice_assign_scalar')
- * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim,             # <<<<<<<<<<<<<<
- *                               size_t itemsize, void *item,
- *                               bint dtype_is_object) nogil:
- */
-
-static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
-
-  /* "View.MemoryView":1400
- *                               size_t itemsize, void *item,
- *                               bint dtype_is_object) nogil:
- *     refcount_copying(dst, dtype_is_object, ndim, False)             # <<<<<<<<<<<<<<
- *     _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
- *                          itemsize, item)
- */
-  __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
-
-  /* "View.MemoryView":1401
- *                               bint dtype_is_object) nogil:
- *     refcount_copying(dst, dtype_is_object, ndim, False)
- *     _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,             # <<<<<<<<<<<<<<
- *                          itemsize, item)
- *     refcount_copying(dst, dtype_is_object, ndim, True)
- */
-  __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
-
-  /* "View.MemoryView":1403
- *     _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
- *                          itemsize, item)
- *     refcount_copying(dst, dtype_is_object, ndim, True)             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
-
-  /* "View.MemoryView":1397
- * 
- * @cname('__pyx_memoryview_slice_assign_scalar')
- * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim,             # <<<<<<<<<<<<<<
- *                               size_t itemsize, void *item,
- *                               bint dtype_is_object) nogil:
- */
-
-  /* function exit code */
-}
-
-/* "View.MemoryView":1407
- * 
- * @cname('__pyx_memoryview__slice_assign_scalar')
- * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape,             # <<<<<<<<<<<<<<
- *                               Py_ssize_t *strides, int ndim,
- *                               size_t itemsize, void *item) nogil:
- */
-
-static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
-  CYTHON_UNUSED Py_ssize_t __pyx_v_i;
-  Py_ssize_t __pyx_v_stride;
-  Py_ssize_t __pyx_v_extent;
-  int __pyx_t_1;
-  Py_ssize_t __pyx_t_2;
-  Py_ssize_t __pyx_t_3;
-  Py_ssize_t __pyx_t_4;
-
-  /* "View.MemoryView":1411
- *                               size_t itemsize, void *item) nogil:
- *     cdef Py_ssize_t i
- *     cdef Py_ssize_t stride = strides[0]             # <<<<<<<<<<<<<<
- *     cdef Py_ssize_t extent = shape[0]
- * 
- */
-  __pyx_v_stride = (__pyx_v_strides[0]);
-
-  /* "View.MemoryView":1412
- *     cdef Py_ssize_t i
- *     cdef Py_ssize_t stride = strides[0]
- *     cdef Py_ssize_t extent = shape[0]             # <<<<<<<<<<<<<<
- * 
- *     if ndim == 1:
- */
-  __pyx_v_extent = (__pyx_v_shape[0]);
-
-  /* "View.MemoryView":1414
- *     cdef Py_ssize_t extent = shape[0]
- * 
- *     if ndim == 1:             # <<<<<<<<<<<<<<
- *         for i in range(extent):
- *             memcpy(data, item, itemsize)
- */
-  __pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
-  if (__pyx_t_1) {
-
-    /* "View.MemoryView":1415
- * 
- *     if ndim == 1:
- *         for i in range(extent):             # <<<<<<<<<<<<<<
- *             memcpy(data, item, itemsize)
- *             data += stride
- */
-    __pyx_t_2 = __pyx_v_extent;
-    __pyx_t_3 = __pyx_t_2;
-    for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
-      __pyx_v_i = __pyx_t_4;
-
-      /* "View.MemoryView":1416
- *     if ndim == 1:
- *         for i in range(extent):
- *             memcpy(data, item, itemsize)             # <<<<<<<<<<<<<<
- *             data += stride
- *     else:
- */
-      (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));
-
-      /* "View.MemoryView":1417
- *         for i in range(extent):
- *             memcpy(data, item, itemsize)
- *             data += stride             # <<<<<<<<<<<<<<
- *     else:
- *         for i in range(extent):
- */
-      __pyx_v_data = (__pyx_v_data + __pyx_v_stride);
-    }
-
-    /* "View.MemoryView":1414
- *     cdef Py_ssize_t extent = shape[0]
- * 
- *     if ndim == 1:             # <<<<<<<<<<<<<<
- *         for i in range(extent):
- *             memcpy(data, item, itemsize)
- */
-    goto __pyx_L3;
-  }
-
-  /* "View.MemoryView":1419
- *             data += stride
- *     else:
- *         for i in range(extent):             # <<<<<<<<<<<<<<
- *             _slice_assign_scalar(data, shape + 1, strides + 1,
- *                                 ndim - 1, itemsize, item)
- */
-  /*else*/ {
-    __pyx_t_2 = __pyx_v_extent;
-    __pyx_t_3 = __pyx_t_2;
-    for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
-      __pyx_v_i = __pyx_t_4;
-
-      /* "View.MemoryView":1420
- *     else:
- *         for i in range(extent):
- *             _slice_assign_scalar(data, shape + 1, strides + 1,             # <<<<<<<<<<<<<<
- *                                 ndim - 1, itemsize, item)
- *             data += stride
- */
-      __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
-
-      /* "View.MemoryView":1422
- *             _slice_assign_scalar(data, shape + 1, strides + 1,
- *                                 ndim - 1, itemsize, item)
- *             data += stride             # <<<<<<<<<<<<<<
- * 
- * 
- */
-      __pyx_v_data = (__pyx_v_data + __pyx_v_stride);
-    }
-  }
-  __pyx_L3:;
-
-  /* "View.MemoryView":1407
- * 
- * @cname('__pyx_memoryview__slice_assign_scalar')
- * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape,             # <<<<<<<<<<<<<<
- *                               Py_ssize_t *strides, int ndim,
- *                               size_t itemsize, void *item) nogil:
- */
-
-  /* function exit code */
-}
-
-/* "(tree fragment)":1
- * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state):             # <<<<<<<<<<<<<<
- *     cdef object __pyx_PickleError
- *     cdef object __pyx_result
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0};
-static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
-  PyObject *__pyx_v___pyx_type = 0;
-  long __pyx_v___pyx_checksum;
-  PyObject *__pyx_v___pyx_state = 0;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  PyObject *__pyx_r = 0;
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
-  {
-    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
-    PyObject* values[3] = {0,0,0};
-    if (unlikely(__pyx_kwds)) {
-      Py_ssize_t kw_args;
-      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
-      switch (pos_args) {
-        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-        CYTHON_FALLTHROUGH;
-        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-        CYTHON_FALLTHROUGH;
-        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-        CYTHON_FALLTHROUGH;
-        case  0: break;
-        default: goto __pyx_L5_argtuple_error;
-      }
-      kw_args = PyDict_Size(__pyx_kwds);
-      switch (pos_args) {
-        case  0:
-        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
-        else goto __pyx_L5_argtuple_error;
-        CYTHON_FALLTHROUGH;
-        case  1:
-        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(2, 1, __pyx_L3_error)
-        }
-        CYTHON_FALLTHROUGH;
-        case  2:
-        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
-        else {
-          __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(2, 1, __pyx_L3_error)
-        }
-      }
-      if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(2, 1, __pyx_L3_error)
-      }
-    } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
-      goto __pyx_L5_argtuple_error;
-    } else {
-      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
-      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
-      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
-    }
-    __pyx_v___pyx_type = values[0];
-    __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(2, 1, __pyx_L3_error)
-    __pyx_v___pyx_state = values[2];
-  }
-  goto __pyx_L4_argument_unpacking_done;
-  __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 1, __pyx_L3_error)
-  __pyx_L3_error:;
-  __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __Pyx_RefNannyFinishContext();
-  return NULL;
-  __pyx_L4_argument_unpacking_done:;
-  __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
-
-  /* function exit code */
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
-  PyObject *__pyx_v___pyx_PickleError = 0;
-  PyObject *__pyx_v___pyx_result = 0;
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  int __pyx_t_1;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  PyObject *__pyx_t_5 = NULL;
-  int __pyx_t_6;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);
-
-  /* "(tree fragment)":4
- *     cdef object __pyx_PickleError
- *     cdef object __pyx_result
- *     if __pyx_checksum != 0xb068931:             # <<<<<<<<<<<<<<
- *         from pickle import PickleError as __pyx_PickleError
- *         raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
- */
-  __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0);
-  if (__pyx_t_1) {
-
-    /* "(tree fragment)":5
- *     cdef object __pyx_result
- *     if __pyx_checksum != 0xb068931:
- *         from pickle import PickleError as __pyx_PickleError             # <<<<<<<<<<<<<<
- *         raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
- *     __pyx_result = Enum.__new__(__pyx_type)
- */
-    __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 5, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __Pyx_INCREF(__pyx_n_s_PickleError);
-    __Pyx_GIVEREF(__pyx_n_s_PickleError);
-    PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
-    __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 5, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 5, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __Pyx_INCREF(__pyx_t_2);
-    __pyx_v___pyx_PickleError = __pyx_t_2;
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-    /* "(tree fragment)":6
- *     if __pyx_checksum != 0xb068931:
- *         from pickle import PickleError as __pyx_PickleError
- *         raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)             # <<<<<<<<<<<<<<
- *     __pyx_result = Enum.__new__(__pyx_type)
- *     if __pyx_state is not None:
- */
-    __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 6, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_2);
-    __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 6, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_4);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __Pyx_INCREF(__pyx_v___pyx_PickleError);
-    __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
-    if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
-      __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
-      if (likely(__pyx_t_5)) {
-        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
-        __Pyx_INCREF(__pyx_t_5);
-        __Pyx_INCREF(function);
-        __Pyx_DECREF_SET(__pyx_t_2, function);
-      }
-    }
-    __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
-    __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
-    __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-    if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 6, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-    __Pyx_Raise(__pyx_t_3, 0, 0, 0);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-    __PYX_ERR(2, 6, __pyx_L1_error)
-
-    /* "(tree fragment)":4
- *     cdef object __pyx_PickleError
- *     cdef object __pyx_result
- *     if __pyx_checksum != 0xb068931:             # <<<<<<<<<<<<<<
- *         from pickle import PickleError as __pyx_PickleError
- *         raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
- */
-  }
-
-  /* "(tree fragment)":7
- *         from pickle import PickleError as __pyx_PickleError
- *         raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
- *     __pyx_result = Enum.__new__(__pyx_type)             # <<<<<<<<<<<<<<
- *     if __pyx_state is not None:
- *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
- */
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 7, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_4 = NULL;
-  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
-    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
-    if (likely(__pyx_t_4)) {
-      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
-      __Pyx_INCREF(__pyx_t_4);
-      __Pyx_INCREF(function);
-      __Pyx_DECREF_SET(__pyx_t_2, function);
-    }
-  }
-  __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
-  __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 7, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __pyx_v___pyx_result = __pyx_t_3;
-  __pyx_t_3 = 0;
-
-  /* "(tree fragment)":8
- *         raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
- *     __pyx_result = Enum.__new__(__pyx_type)
- *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
- *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
- *     return __pyx_result
- */
-  __pyx_t_1 = (__pyx_v___pyx_state != Py_None);
-  __pyx_t_6 = (__pyx_t_1 != 0);
-  if (__pyx_t_6) {
-
-    /* "(tree fragment)":9
- *     __pyx_result = Enum.__new__(__pyx_type)
- *     if __pyx_state is not None:
- *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)             # <<<<<<<<<<<<<<
- *     return __pyx_result
- * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
- */
-    if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(2, 9, __pyx_L1_error)
-    __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 9, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_3);
-    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-    /* "(tree fragment)":8
- *         raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
- *     __pyx_result = Enum.__new__(__pyx_type)
- *     if __pyx_state is not None:             # <<<<<<<<<<<<<<
- *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
- *     return __pyx_result
- */
-  }
-
-  /* "(tree fragment)":10
- *     if __pyx_state is not None:
- *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
- *     return __pyx_result             # <<<<<<<<<<<<<<
- * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
- *     __pyx_result.name = __pyx_state[0]
- */
-  __Pyx_XDECREF(__pyx_r);
-  __Pyx_INCREF(__pyx_v___pyx_result);
-  __pyx_r = __pyx_v___pyx_result;
-  goto __pyx_L0;
-
-  /* "(tree fragment)":1
- * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state):             # <<<<<<<<<<<<<<
- *     cdef object __pyx_PickleError
- *     cdef object __pyx_result
- */
-
-  /* function exit code */
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  __Pyx_XDECREF(__pyx_t_5);
-  __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = NULL;
-  __pyx_L0:;
-  __Pyx_XDECREF(__pyx_v___pyx_PickleError);
-  __Pyx_XDECREF(__pyx_v___pyx_result);
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-/* "(tree fragment)":11
- *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
- *     return __pyx_result
- * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):             # <<<<<<<<<<<<<<
- *     __pyx_result.name = __pyx_state[0]
- *     if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
- */
-
-static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
-  PyObject *__pyx_r = NULL;
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_t_2;
-  Py_ssize_t __pyx_t_3;
-  int __pyx_t_4;
-  int __pyx_t_5;
-  PyObject *__pyx_t_6 = NULL;
-  PyObject *__pyx_t_7 = NULL;
-  PyObject *__pyx_t_8 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);
-
-  /* "(tree fragment)":12
- *     return __pyx_result
- * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
- *     __pyx_result.name = __pyx_state[0]             # <<<<<<<<<<<<<<
- *     if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
- *         __pyx_result.__dict__.update(__pyx_state[1])
- */
-  if (unlikely(__pyx_v___pyx_state == Py_None)) {
-    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
-    __PYX_ERR(2, 12, __pyx_L1_error)
-  }
-  __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 12, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __Pyx_GIVEREF(__pyx_t_1);
-  __Pyx_GOTREF(__pyx_v___pyx_result->name);
-  __Pyx_DECREF(__pyx_v___pyx_result->name);
-  __pyx_v___pyx_result->name = __pyx_t_1;
-  __pyx_t_1 = 0;
-
-  /* "(tree fragment)":13
- * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
- *     __pyx_result.name = __pyx_state[0]
- *     if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):             # <<<<<<<<<<<<<<
- *         __pyx_result.__dict__.update(__pyx_state[1])
- */
-  if (unlikely(__pyx_v___pyx_state == Py_None)) {
-    PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
-    __PYX_ERR(2, 13, __pyx_L1_error)
-  }
-  __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(2, 13, __pyx_L1_error)
-  __pyx_t_4 = ((__pyx_t_3 > 1) != 0);
-  if (__pyx_t_4) {
-  } else {
-    __pyx_t_2 = __pyx_t_4;
-    goto __pyx_L4_bool_binop_done;
-  }
-  __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 13, __pyx_L1_error)
-  __pyx_t_5 = (__pyx_t_4 != 0);
-  __pyx_t_2 = __pyx_t_5;
-  __pyx_L4_bool_binop_done:;
-  if (__pyx_t_2) {
-
-    /* "(tree fragment)":14
- *     __pyx_result.name = __pyx_state[0]
- *     if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
- *         __pyx_result.__dict__.update(__pyx_state[1])             # <<<<<<<<<<<<<<
- */
-    __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 14, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_6);
-    __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 14, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_7);
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    if (unlikely(__pyx_v___pyx_state == Py_None)) {
-      PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
-      __PYX_ERR(2, 14, __pyx_L1_error)
-    }
-    __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 14, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_6);
-    __pyx_t_8 = NULL;
-    if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
-      __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
-      if (likely(__pyx_t_8)) {
-        PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
-        __Pyx_INCREF(__pyx_t_8);
-        __Pyx_INCREF(function);
-        __Pyx_DECREF_SET(__pyx_t_7, function);
-      }
-    }
-    __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
-    __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
-    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
-    if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 14, __pyx_L1_error)
-    __Pyx_GOTREF(__pyx_t_1);
-    __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-    /* "(tree fragment)":13
- * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
- *     __pyx_result.name = __pyx_state[0]
- *     if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):             # <<<<<<<<<<<<<<
- *         __pyx_result.__dict__.update(__pyx_state[1])
- */
-  }
-
-  /* "(tree fragment)":11
- *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
- *     return __pyx_result
- * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):             # <<<<<<<<<<<<<<
- *     __pyx_result.name = __pyx_state[0]
- *     if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
- */
-
-  /* function exit code */
-  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_6);
-  __Pyx_XDECREF(__pyx_t_7);
-  __Pyx_XDECREF(__pyx_t_8);
-  __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
-  __pyx_r = 0;
-  __pyx_L0:;
-  __Pyx_XGIVEREF(__pyx_r);
-  __Pyx_RefNannyFinishContext();
-  return __pyx_r;
-}
-
-static struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator *__pyx_freelist_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator[8];
-static int __pyx_freecount_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator = 0;
-
-static PyObject *__pyx_tp_new_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
-  struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator *p;
-  PyObject *o;
-  if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator)))) {
-    o = (PyObject*)__pyx_freelist_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator[--__pyx_freecount_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator];
-    memset(o, 0, sizeof(struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator));
-    (void) PyObject_INIT(o, t);
-    PyObject_GC_Track(o);
-  } else {
-    o = (*t->tp_alloc)(t, 0);
-    if (unlikely(!o)) return 0;
-  }
-  p = ((struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator *)o);
-  p->__pyx_v_fwd_cur.data = NULL;
-  p->__pyx_v_fwd_cur.memview = NULL;
-  p->__pyx_v_fwd_prev.data = NULL;
-  p->__pyx_v_fwd_prev.memview = NULL;
-  p->__pyx_v_om_densities.data = NULL;
-  p->__pyx_v_om_densities.memview = NULL;
-  p->__pyx_v_om_pointers.data = NULL;
-  p->__pyx_v_om_pointers.memview = NULL;
-  p->__pyx_v_tm_probabilities.data = NULL;
-  p->__pyx_v_tm_probabilities.memview = NULL;
-  p->__pyx_v_tm_ptrs.data = NULL;
-  p->__pyx_v_tm_ptrs.memview = NULL;
-  p->__pyx_v_tm_states.data = NULL;
-  p->__pyx_v_tm_states.memview = NULL;
-  return o;
-}
-
-static void __pyx_tp_dealloc_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator(PyObject *o) {
-  struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator *p = (struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator *)o;
-  PyObject_GC_UnTrack(o);
-  Py_CLEAR(p->__pyx_v_block_size);
-  Py_CLEAR(p->__pyx_v_observations);
-  Py_CLEAR(p->__pyx_v_om);
-  Py_CLEAR(p->__pyx_v_self);
-  Py_CLEAR(p->__pyx_v_tm);
-  __PYX_XDEC_MEMVIEW(&p->__pyx_v_fwd_cur, 1);
-  __PYX_XDEC_MEMVIEW(&p->__pyx_v_fwd_prev, 1);
-  __PYX_XDEC_MEMVIEW(&p->__pyx_v_om_densities, 1);
-  __PYX_XDEC_MEMVIEW(&p->__pyx_v_om_pointers, 1);
-  __PYX_XDEC_MEMVIEW(&p->__pyx_v_tm_probabilities, 1);
-  __PYX_XDEC_MEMVIEW(&p->__pyx_v_tm_ptrs, 1);
-  __PYX_XDEC_MEMVIEW(&p->__pyx_v_tm_states, 1);
-  if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator)))) {
-    __pyx_freelist_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator[__pyx_freecount_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator++] = ((struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator *)o);
-  } else {
-    (*Py_TYPE(o)->tp_free)(o);
-  }
-}
-
-static int __pyx_tp_traverse_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator(PyObject *o, visitproc v, void *a) {
-  int e;
-  struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator *p = (struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator *)o;
-  if (p->__pyx_v_block_size) {
-    e = (*v)(p->__pyx_v_block_size, a); if (e) return e;
-  }
-  if (p->__pyx_v_observations) {
-    e = (*v)(p->__pyx_v_observations, a); if (e) return e;
-  }
-  if (p->__pyx_v_om) {
-    e = (*v)(p->__pyx_v_om, a); if (e) return e;
-  }
-  if (p->__pyx_v_self) {
-    e = (*v)(p->__pyx_v_self, a); if (e) return e;
-  }
-  if (p->__pyx_v_tm) {
-    e = (*v)(p->__pyx_v_tm, a); if (e) return e;
-  }
-  return 0;
-}
-
-static PyTypeObject __pyx_type_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator = {
-  PyVarObject_HEAD_INIT(0, 0)
-  "madmom.ml.hmm.__pyx_scope_struct__forward_generator", /*tp_name*/
-  sizeof(struct __pyx_obj_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator), /*tp_basicsize*/
-  0, /*tp_itemsize*/
-  __pyx_tp_dealloc_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator, /*tp_dealloc*/
-  #if PY_VERSION_HEX < 0x030800b4
-  0, /*tp_print*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b4
-  0, /*tp_vectorcall_offset*/
-  #endif
-  0, /*tp_getattr*/
-  0, /*tp_setattr*/
-  #if PY_MAJOR_VERSION < 3
-  0, /*tp_compare*/
-  #endif
-  #if PY_MAJOR_VERSION >= 3
-  0, /*tp_as_async*/
-  #endif
-  0, /*tp_repr*/
-  0, /*tp_as_number*/
-  0, /*tp_as_sequence*/
-  0, /*tp_as_mapping*/
-  0, /*tp_hash*/
-  0, /*tp_call*/
-  0, /*tp_str*/
-  0, /*tp_getattro*/
-  0, /*tp_setattro*/
-  0, /*tp_as_buffer*/
-  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
-  0, /*tp_doc*/
-  __pyx_tp_traverse_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator, /*tp_traverse*/
-  0, /*tp_clear*/
-  0, /*tp_richcompare*/
-  0, /*tp_weaklistoffset*/
-  0, /*tp_iter*/
-  0, /*tp_iternext*/
-  0, /*tp_methods*/
-  0, /*tp_members*/
-  0, /*tp_getset*/
-  0, /*tp_base*/
-  0, /*tp_dict*/
-  0, /*tp_descr_get*/
-  0, /*tp_descr_set*/
-  0, /*tp_dictoffset*/
-  0, /*tp_init*/
-  0, /*tp_alloc*/
-  __pyx_tp_new_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator, /*tp_new*/
-  0, /*tp_free*/
-  0, /*tp_is_gc*/
-  0, /*tp_bases*/
-  0, /*tp_mro*/
-  0, /*tp_cache*/
-  0, /*tp_subclasses*/
-  0, /*tp_weaklist*/
-  0, /*tp_del*/
-  0, /*tp_version_tag*/
-  #if PY_VERSION_HEX >= 0x030400a1
-  0, /*tp_finalize*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b1
-  0, /*tp_vectorcall*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
-  0, /*tp_print*/
-  #endif
-};
-static struct __pyx_vtabstruct_array __pyx_vtable_array;
-
-static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
-  struct __pyx_array_obj *p;
-  PyObject *o;
-  if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
-    o = (*t->tp_alloc)(t, 0);
-  } else {
-    o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
-  }
-  if (unlikely(!o)) return 0;
-  p = ((struct __pyx_array_obj *)o);
-  p->__pyx_vtab = __pyx_vtabptr_array;
-  p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
-  p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
-  if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
-  return o;
-  bad:
-  Py_DECREF(o); o = 0;
-  return NULL;
-}
-
-static void __pyx_tp_dealloc_array(PyObject *o) {
-  struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
-  #if CYTHON_USE_TP_FINALIZE
-  if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
-    if (PyObject_CallFinalizerFromDealloc(o)) return;
-  }
-  #endif
-  {
-    PyObject *etype, *eval, *etb;
-    PyErr_Fetch(&etype, &eval, &etb);
-    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
-    __pyx_array___dealloc__(o);
-    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
-    PyErr_Restore(etype, eval, etb);
-  }
-  Py_CLEAR(p->mode);
-  Py_CLEAR(p->_format);
-  (*Py_TYPE(o)->tp_free)(o);
-}
-static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
-  PyObject *r;
-  PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
-  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
-  Py_DECREF(x);
-  return r;
-}
-
-static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
-  if (v) {
-    return __pyx_array___setitem__(o, i, v);
-  }
-  else {
-    PyErr_Format(PyExc_NotImplementedError,
-      "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
-    return -1;
-  }
-}
-
-static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
-  PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n);
-  if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
-    PyErr_Clear();
-    v = __pyx_array___getattr__(o, n);
-  }
-  return v;
-}
-
-static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
-  return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
-}
-
-static PyMethodDef __pyx_methods_array[] = {
-  {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
-  {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0},
-  {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0},
-  {0, 0, 0, 0}
-};
-
-static struct PyGetSetDef __pyx_getsets_array[] = {
-  {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
-  {0, 0, 0, 0, 0}
-};
-
-static PySequenceMethods __pyx_tp_as_sequence_array = {
-  __pyx_array___len__, /*sq_length*/
-  0, /*sq_concat*/
-  0, /*sq_repeat*/
-  __pyx_sq_item_array, /*sq_item*/
-  0, /*sq_slice*/
-  0, /*sq_ass_item*/
-  0, /*sq_ass_slice*/
-  0, /*sq_contains*/
-  0, /*sq_inplace_concat*/
-  0, /*sq_inplace_repeat*/
-};
-
-static PyMappingMethods __pyx_tp_as_mapping_array = {
-  __pyx_array___len__, /*mp_length*/
-  __pyx_array___getitem__, /*mp_subscript*/
-  __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
-};
-
-static PyBufferProcs __pyx_tp_as_buffer_array = {
-  #if PY_MAJOR_VERSION < 3
-  0, /*bf_getreadbuffer*/
-  #endif
-  #if PY_MAJOR_VERSION < 3
-  0, /*bf_getwritebuffer*/
-  #endif
-  #if PY_MAJOR_VERSION < 3
-  0, /*bf_getsegcount*/
-  #endif
-  #if PY_MAJOR_VERSION < 3
-  0, /*bf_getcharbuffer*/
-  #endif
-  __pyx_array_getbuffer, /*bf_getbuffer*/
-  0, /*bf_releasebuffer*/
-};
-
-static PyTypeObject __pyx_type___pyx_array = {
-  PyVarObject_HEAD_INIT(0, 0)
-  "madmom.ml.hmm.array", /*tp_name*/
-  sizeof(struct __pyx_array_obj), /*tp_basicsize*/
-  0, /*tp_itemsize*/
-  __pyx_tp_dealloc_array, /*tp_dealloc*/
-  #if PY_VERSION_HEX < 0x030800b4
-  0, /*tp_print*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b4
-  0, /*tp_vectorcall_offset*/
-  #endif
-  0, /*tp_getattr*/
-  0, /*tp_setattr*/
-  #if PY_MAJOR_VERSION < 3
-  0, /*tp_compare*/
-  #endif
-  #if PY_MAJOR_VERSION >= 3
-  0, /*tp_as_async*/
-  #endif
-  0, /*tp_repr*/
-  0, /*tp_as_number*/
-  &__pyx_tp_as_sequence_array, /*tp_as_sequence*/
-  &__pyx_tp_as_mapping_array, /*tp_as_mapping*/
-  0, /*tp_hash*/
-  0, /*tp_call*/
-  0, /*tp_str*/
-  __pyx_tp_getattro_array, /*tp_getattro*/
-  0, /*tp_setattro*/
-  &__pyx_tp_as_buffer_array, /*tp_as_buffer*/
-  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
-  0, /*tp_doc*/
-  0, /*tp_traverse*/
-  0, /*tp_clear*/
-  0, /*tp_richcompare*/
-  0, /*tp_weaklistoffset*/
-  0, /*tp_iter*/
-  0, /*tp_iternext*/
-  __pyx_methods_array, /*tp_methods*/
-  0, /*tp_members*/
-  __pyx_getsets_array, /*tp_getset*/
-  0, /*tp_base*/
-  0, /*tp_dict*/
-  0, /*tp_descr_get*/
-  0, /*tp_descr_set*/
-  0, /*tp_dictoffset*/
-  0, /*tp_init*/
-  0, /*tp_alloc*/
-  __pyx_tp_new_array, /*tp_new*/
-  0, /*tp_free*/
-  0, /*tp_is_gc*/
-  0, /*tp_bases*/
-  0, /*tp_mro*/
-  0, /*tp_cache*/
-  0, /*tp_subclasses*/
-  0, /*tp_weaklist*/
-  0, /*tp_del*/
-  0, /*tp_version_tag*/
-  #if PY_VERSION_HEX >= 0x030400a1
-  0, /*tp_finalize*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b1
-  0, /*tp_vectorcall*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
-  0, /*tp_print*/
-  #endif
-};
-
-static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
-  struct __pyx_MemviewEnum_obj *p;
-  PyObject *o;
-  if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
-    o = (*t->tp_alloc)(t, 0);
-  } else {
-    o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
-  }
-  if (unlikely(!o)) return 0;
-  p = ((struct __pyx_MemviewEnum_obj *)o);
-  p->name = Py_None; Py_INCREF(Py_None);
-  return o;
-}
-
-static void __pyx_tp_dealloc_Enum(PyObject *o) {
-  struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
-  #if CYTHON_USE_TP_FINALIZE
-  if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
-    if (PyObject_CallFinalizerFromDealloc(o)) return;
-  }
-  #endif
-  PyObject_GC_UnTrack(o);
-  Py_CLEAR(p->name);
-  (*Py_TYPE(o)->tp_free)(o);
-}
-
-static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
-  int e;
-  struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
-  if (p->name) {
-    e = (*v)(p->name, a); if (e) return e;
-  }
-  return 0;
-}
-
-static int __pyx_tp_clear_Enum(PyObject *o) {
-  PyObject* tmp;
-  struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
-  tmp = ((PyObject*)p->name);
-  p->name = Py_None; Py_INCREF(Py_None);
-  Py_XDECREF(tmp);
-  return 0;
-}
-
-static PyMethodDef __pyx_methods_Enum[] = {
-  {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0},
-  {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0},
-  {0, 0, 0, 0}
-};
-
-static PyTypeObject __pyx_type___pyx_MemviewEnum = {
-  PyVarObject_HEAD_INIT(0, 0)
-  "madmom.ml.hmm.Enum", /*tp_name*/
-  sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
-  0, /*tp_itemsize*/
-  __pyx_tp_dealloc_Enum, /*tp_dealloc*/
-  #if PY_VERSION_HEX < 0x030800b4
-  0, /*tp_print*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b4
-  0, /*tp_vectorcall_offset*/
-  #endif
-  0, /*tp_getattr*/
-  0, /*tp_setattr*/
-  #if PY_MAJOR_VERSION < 3
-  0, /*tp_compare*/
-  #endif
-  #if PY_MAJOR_VERSION >= 3
-  0, /*tp_as_async*/
-  #endif
-  __pyx_MemviewEnum___repr__, /*tp_repr*/
-  0, /*tp_as_number*/
-  0, /*tp_as_sequence*/
-  0, /*tp_as_mapping*/
-  0, /*tp_hash*/
-  0, /*tp_call*/
-  0, /*tp_str*/
-  0, /*tp_getattro*/
-  0, /*tp_setattro*/
-  0, /*tp_as_buffer*/
-  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
-  0, /*tp_doc*/
-  __pyx_tp_traverse_Enum, /*tp_traverse*/
-  __pyx_tp_clear_Enum, /*tp_clear*/
-  0, /*tp_richcompare*/
-  0, /*tp_weaklistoffset*/
-  0, /*tp_iter*/
-  0, /*tp_iternext*/
-  __pyx_methods_Enum, /*tp_methods*/
-  0, /*tp_members*/
-  0, /*tp_getset*/
-  0, /*tp_base*/
-  0, /*tp_dict*/
-  0, /*tp_descr_get*/
-  0, /*tp_descr_set*/
-  0, /*tp_dictoffset*/
-  __pyx_MemviewEnum___init__, /*tp_init*/
-  0, /*tp_alloc*/
-  __pyx_tp_new_Enum, /*tp_new*/
-  0, /*tp_free*/
-  0, /*tp_is_gc*/
-  0, /*tp_bases*/
-  0, /*tp_mro*/
-  0, /*tp_cache*/
-  0, /*tp_subclasses*/
-  0, /*tp_weaklist*/
-  0, /*tp_del*/
-  0, /*tp_version_tag*/
-  #if PY_VERSION_HEX >= 0x030400a1
-  0, /*tp_finalize*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b1
-  0, /*tp_vectorcall*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
-  0, /*tp_print*/
-  #endif
-};
-static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
-
-static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
-  struct __pyx_memoryview_obj *p;
-  PyObject *o;
-  if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
-    o = (*t->tp_alloc)(t, 0);
-  } else {
-    o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
-  }
-  if (unlikely(!o)) return 0;
-  p = ((struct __pyx_memoryview_obj *)o);
-  p->__pyx_vtab = __pyx_vtabptr_memoryview;
-  p->obj = Py_None; Py_INCREF(Py_None);
-  p->_size = Py_None; Py_INCREF(Py_None);
-  p->_array_interface = Py_None; Py_INCREF(Py_None);
-  p->view.obj = NULL;
-  if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
-  return o;
-  bad:
-  Py_DECREF(o); o = 0;
-  return NULL;
-}
-
-static void __pyx_tp_dealloc_memoryview(PyObject *o) {
-  struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
-  #if CYTHON_USE_TP_FINALIZE
-  if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
-    if (PyObject_CallFinalizerFromDealloc(o)) return;
-  }
-  #endif
-  PyObject_GC_UnTrack(o);
-  {
-    PyObject *etype, *eval, *etb;
-    PyErr_Fetch(&etype, &eval, &etb);
-    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
-    __pyx_memoryview___dealloc__(o);
-    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
-    PyErr_Restore(etype, eval, etb);
-  }
-  Py_CLEAR(p->obj);
-  Py_CLEAR(p->_size);
-  Py_CLEAR(p->_array_interface);
-  (*Py_TYPE(o)->tp_free)(o);
-}
-
-static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
-  int e;
-  struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
-  if (p->obj) {
-    e = (*v)(p->obj, a); if (e) return e;
-  }
-  if (p->_size) {
-    e = (*v)(p->_size, a); if (e) return e;
-  }
-  if (p->_array_interface) {
-    e = (*v)(p->_array_interface, a); if (e) return e;
-  }
-  if (p->view.obj) {
-    e = (*v)(p->view.obj, a); if (e) return e;
-  }
-  return 0;
-}
-
-static int __pyx_tp_clear_memoryview(PyObject *o) {
-  PyObject* tmp;
-  struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
-  tmp = ((PyObject*)p->obj);
-  p->obj = Py_None; Py_INCREF(Py_None);
-  Py_XDECREF(tmp);
-  tmp = ((PyObject*)p->_size);
-  p->_size = Py_None; Py_INCREF(Py_None);
-  Py_XDECREF(tmp);
-  tmp = ((PyObject*)p->_array_interface);
-  p->_array_interface = Py_None; Py_INCREF(Py_None);
-  Py_XDECREF(tmp);
-  Py_CLEAR(p->view.obj);
-  return 0;
-}
-static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
-  PyObject *r;
-  PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
-  r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
-  Py_DECREF(x);
-  return r;
-}
-
-static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
-  if (v) {
-    return __pyx_memoryview___setitem__(o, i, v);
-  }
-  else {
-    PyErr_Format(PyExc_NotImplementedError,
-      "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
-    return -1;
-  }
-}
-
-static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
-  return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
-}
-
-static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
-  return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
-}
-
-static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
-  return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
-}
-
-static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
-  return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
-}
-
-static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
-  return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
-}
-
-static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
-  return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
-}
-
-static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
-  return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
-}
-
-static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
-  return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
-}
-
-static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
-  return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
-}
-
-static PyMethodDef __pyx_methods_memoryview[] = {
-  {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
-  {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
-  {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
-  {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
-  {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0},
-  {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0},
-  {0, 0, 0, 0}
-};
-
-static struct PyGetSetDef __pyx_getsets_memoryview[] = {
-  {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
-  {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
-  {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
-  {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
-  {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
-  {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
-  {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
-  {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
-  {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
-  {0, 0, 0, 0, 0}
-};
-
-static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
-  __pyx_memoryview___len__, /*sq_length*/
-  0, /*sq_concat*/
-  0, /*sq_repeat*/
-  __pyx_sq_item_memoryview, /*sq_item*/
-  0, /*sq_slice*/
-  0, /*sq_ass_item*/
-  0, /*sq_ass_slice*/
-  0, /*sq_contains*/
-  0, /*sq_inplace_concat*/
-  0, /*sq_inplace_repeat*/
-};
-
-static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
-  __pyx_memoryview___len__, /*mp_length*/
-  __pyx_memoryview___getitem__, /*mp_subscript*/
-  __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
-};
-
-static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
-  #if PY_MAJOR_VERSION < 3
-  0, /*bf_getreadbuffer*/
-  #endif
-  #if PY_MAJOR_VERSION < 3
-  0, /*bf_getwritebuffer*/
-  #endif
-  #if PY_MAJOR_VERSION < 3
-  0, /*bf_getsegcount*/
-  #endif
-  #if PY_MAJOR_VERSION < 3
-  0, /*bf_getcharbuffer*/
-  #endif
-  __pyx_memoryview_getbuffer, /*bf_getbuffer*/
-  0, /*bf_releasebuffer*/
-};
-
-static PyTypeObject __pyx_type___pyx_memoryview = {
-  PyVarObject_HEAD_INIT(0, 0)
-  "madmom.ml.hmm.memoryview", /*tp_name*/
-  sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
-  0, /*tp_itemsize*/
-  __pyx_tp_dealloc_memoryview, /*tp_dealloc*/
-  #if PY_VERSION_HEX < 0x030800b4
-  0, /*tp_print*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b4
-  0, /*tp_vectorcall_offset*/
-  #endif
-  0, /*tp_getattr*/
-  0, /*tp_setattr*/
-  #if PY_MAJOR_VERSION < 3
-  0, /*tp_compare*/
-  #endif
-  #if PY_MAJOR_VERSION >= 3
-  0, /*tp_as_async*/
-  #endif
-  __pyx_memoryview___repr__, /*tp_repr*/
-  0, /*tp_as_number*/
-  &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
-  &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
-  0, /*tp_hash*/
-  0, /*tp_call*/
-  __pyx_memoryview___str__, /*tp_str*/
-  0, /*tp_getattro*/
-  0, /*tp_setattro*/
-  &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
-  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
-  0, /*tp_doc*/
-  __pyx_tp_traverse_memoryview, /*tp_traverse*/
-  __pyx_tp_clear_memoryview, /*tp_clear*/
-  0, /*tp_richcompare*/
-  0, /*tp_weaklistoffset*/
-  0, /*tp_iter*/
-  0, /*tp_iternext*/
-  __pyx_methods_memoryview, /*tp_methods*/
-  0, /*tp_members*/
-  __pyx_getsets_memoryview, /*tp_getset*/
-  0, /*tp_base*/
-  0, /*tp_dict*/
-  0, /*tp_descr_get*/
-  0, /*tp_descr_set*/
-  0, /*tp_dictoffset*/
-  0, /*tp_init*/
-  0, /*tp_alloc*/
-  __pyx_tp_new_memoryview, /*tp_new*/
-  0, /*tp_free*/
-  0, /*tp_is_gc*/
-  0, /*tp_bases*/
-  0, /*tp_mro*/
-  0, /*tp_cache*/
-  0, /*tp_subclasses*/
-  0, /*tp_weaklist*/
-  0, /*tp_del*/
-  0, /*tp_version_tag*/
-  #if PY_VERSION_HEX >= 0x030400a1
-  0, /*tp_finalize*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b1
-  0, /*tp_vectorcall*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
-  0, /*tp_print*/
-  #endif
-};
-static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
-
-static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
-  struct __pyx_memoryviewslice_obj *p;
-  PyObject *o = __pyx_tp_new_memoryview(t, a, k);
-  if (unlikely(!o)) return 0;
-  p = ((struct __pyx_memoryviewslice_obj *)o);
-  p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
-  p->from_object = Py_None; Py_INCREF(Py_None);
-  p->from_slice.memview = NULL;
-  return o;
-}
-
-static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
-  struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
-  #if CYTHON_USE_TP_FINALIZE
-  if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
-    if (PyObject_CallFinalizerFromDealloc(o)) return;
-  }
-  #endif
-  PyObject_GC_UnTrack(o);
-  {
-    PyObject *etype, *eval, *etb;
-    PyErr_Fetch(&etype, &eval, &etb);
-    __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
-    __pyx_memoryviewslice___dealloc__(o);
-    __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
-    PyErr_Restore(etype, eval, etb);
-  }
-  Py_CLEAR(p->from_object);
-  PyObject_GC_Track(o);
-  __pyx_tp_dealloc_memoryview(o);
-}
-
-static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
-  int e;
-  struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
-  e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
-  if (p->from_object) {
-    e = (*v)(p->from_object, a); if (e) return e;
-  }
-  return 0;
-}
-
-static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
-  PyObject* tmp;
-  struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
-  __pyx_tp_clear_memoryview(o);
-  tmp = ((PyObject*)p->from_object);
-  p->from_object = Py_None; Py_INCREF(Py_None);
-  Py_XDECREF(tmp);
-  __PYX_XDEC_MEMVIEW(&p->from_slice, 1);
-  return 0;
-}
-
-static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
-  return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
-}
-
-static PyMethodDef __pyx_methods__memoryviewslice[] = {
-  {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0},
-  {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0},
-  {0, 0, 0, 0}
-};
-
-static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
-  {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
-  {0, 0, 0, 0, 0}
-};
-
-static PyTypeObject __pyx_type___pyx_memoryviewslice = {
-  PyVarObject_HEAD_INIT(0, 0)
-  "madmom.ml.hmm._memoryviewslice", /*tp_name*/
-  sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
-  0, /*tp_itemsize*/
-  __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
-  #if PY_VERSION_HEX < 0x030800b4
-  0, /*tp_print*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b4
-  0, /*tp_vectorcall_offset*/
-  #endif
-  0, /*tp_getattr*/
-  0, /*tp_setattr*/
-  #if PY_MAJOR_VERSION < 3
-  0, /*tp_compare*/
-  #endif
-  #if PY_MAJOR_VERSION >= 3
-  0, /*tp_as_async*/
-  #endif
-  #if CYTHON_COMPILING_IN_PYPY
-  __pyx_memoryview___repr__, /*tp_repr*/
-  #else
-  0, /*tp_repr*/
-  #endif
-  0, /*tp_as_number*/
-  0, /*tp_as_sequence*/
-  0, /*tp_as_mapping*/
-  0, /*tp_hash*/
-  0, /*tp_call*/
-  #if CYTHON_COMPILING_IN_PYPY
-  __pyx_memoryview___str__, /*tp_str*/
-  #else
-  0, /*tp_str*/
-  #endif
-  0, /*tp_getattro*/
-  0, /*tp_setattro*/
-  0, /*tp_as_buffer*/
-  Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
-  "Internal class for passing memoryview slices to Python", /*tp_doc*/
-  __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
-  __pyx_tp_clear__memoryviewslice, /*tp_clear*/
-  0, /*tp_richcompare*/
-  0, /*tp_weaklistoffset*/
-  0, /*tp_iter*/
-  0, /*tp_iternext*/
-  __pyx_methods__memoryviewslice, /*tp_methods*/
-  0, /*tp_members*/
-  __pyx_getsets__memoryviewslice, /*tp_getset*/
-  0, /*tp_base*/
-  0, /*tp_dict*/
-  0, /*tp_descr_get*/
-  0, /*tp_descr_set*/
-  0, /*tp_dictoffset*/
-  0, /*tp_init*/
-  0, /*tp_alloc*/
-  __pyx_tp_new__memoryviewslice, /*tp_new*/
-  0, /*tp_free*/
-  0, /*tp_is_gc*/
-  0, /*tp_bases*/
-  0, /*tp_mro*/
-  0, /*tp_cache*/
-  0, /*tp_subclasses*/
-  0, /*tp_weaklist*/
-  0, /*tp_del*/
-  0, /*tp_version_tag*/
-  #if PY_VERSION_HEX >= 0x030400a1
-  0, /*tp_finalize*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b1
-  0, /*tp_vectorcall*/
-  #endif
-  #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
-  0, /*tp_print*/
-  #endif
-};
-
-static PyMethodDef __pyx_methods[] = {
-  {0, 0, 0, 0}
-};
-
-#if PY_MAJOR_VERSION >= 3
-#if CYTHON_PEP489_MULTI_PHASE_INIT
-static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
-static int __pyx_pymod_exec_hmm(PyObject* module); /*proto*/
-static PyModuleDef_Slot __pyx_moduledef_slots[] = {
-  {Py_mod_create, (void*)__pyx_pymod_create},
-  {Py_mod_exec, (void*)__pyx_pymod_exec_hmm},
-  {0, NULL}
-};
-#endif
-
-static struct PyModuleDef __pyx_moduledef = {
-    PyModuleDef_HEAD_INIT,
-    "hmm",
-    __pyx_k_This_module_contains_Hidden_Mar, /* m_doc */
-  #if CYTHON_PEP489_MULTI_PHASE_INIT
-    0, /* m_size */
-  #else
-    -1, /* m_size */
-  #endif
-    __pyx_methods /* m_methods */,
-  #if CYTHON_PEP489_MULTI_PHASE_INIT
-    __pyx_moduledef_slots, /* m_slots */
-  #else
-    NULL, /* m_reload */
-  #endif
-    NULL, /* m_traverse */
-    NULL, /* m_clear */
-    NULL /* m_free */
-};
-#endif
-#ifndef CYTHON_SMALL_CODE
-#if defined(__clang__)
-    #define CYTHON_SMALL_CODE
-#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
-    #define CYTHON_SMALL_CODE __attribute__((cold))
-#else
-    #define CYTHON_SMALL_CODE
-#endif
-#endif
-
-static __Pyx_StringTabEntry __pyx_string_tab[] = {
-  {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
-  {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
-  {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
-  {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0},
-  {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0},
-  {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
-  {&__pyx_n_s_DiscreteObservationModel, __pyx_k_DiscreteObservationModel, sizeof(__pyx_k_DiscreteObservationModel), 0, 0, 1, 1},
-  {&__pyx_n_s_DiscreteObservationModel___init, __pyx_k_DiscreteObservationModel___init, sizeof(__pyx_k_DiscreteObservationModel___init), 0, 0, 1, 1},
-  {&__pyx_n_s_DiscreteObservationModel_densiti, __pyx_k_DiscreteObservationModel_densiti, sizeof(__pyx_k_DiscreteObservationModel_densiti), 0, 0, 1, 1},
-  {&__pyx_n_s_DiscreteObservationModel_log_den, __pyx_k_DiscreteObservationModel_log_den, sizeof(__pyx_k_DiscreteObservationModel_log_den), 0, 0, 1, 1},
-  {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
-  {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
-  {&__pyx_n_s_HMM, __pyx_k_HMM, sizeof(__pyx_k_HMM), 0, 0, 1, 1},
-  {&__pyx_n_s_HiddenMarkovModel, __pyx_k_HiddenMarkovModel, sizeof(__pyx_k_HiddenMarkovModel), 0, 0, 1, 1},
-  {&__pyx_n_s_HiddenMarkovModel___getstate, __pyx_k_HiddenMarkovModel___getstate, sizeof(__pyx_k_HiddenMarkovModel___getstate), 0, 0, 1, 1},
-  {&__pyx_n_s_HiddenMarkovModel___init, __pyx_k_HiddenMarkovModel___init, sizeof(__pyx_k_HiddenMarkovModel___init), 0, 0, 1, 1},
-  {&__pyx_n_s_HiddenMarkovModel___setstate, __pyx_k_HiddenMarkovModel___setstate, sizeof(__pyx_k_HiddenMarkovModel___setstate), 0, 0, 1, 1},
-  {&__pyx_n_s_HiddenMarkovModel_forward, __pyx_k_HiddenMarkovModel_forward, sizeof(__pyx_k_HiddenMarkovModel_forward), 0, 0, 1, 1},
-  {&__pyx_n_s_HiddenMarkovModel_forward_genera, __pyx_k_HiddenMarkovModel_forward_genera, sizeof(__pyx_k_HiddenMarkovModel_forward_genera), 0, 0, 1, 1},
-  {&__pyx_n_s_HiddenMarkovModel_reset, __pyx_k_HiddenMarkovModel_reset, sizeof(__pyx_k_HiddenMarkovModel_reset), 0, 0, 1, 1},
-  {&__pyx_n_s_HiddenMarkovModel_viterbi, __pyx_k_HiddenMarkovModel_viterbi, sizeof(__pyx_k_HiddenMarkovModel_viterbi), 0, 0, 1, 1},
-  {&__pyx_kp_s_Hidden_Markov_Model_To_search_f, __pyx_k_Hidden_Markov_Model_To_search_f, sizeof(__pyx_k_Hidden_Markov_Model_To_search_f), 0, 0, 1, 0},
-  {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1},
-  {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0},
-  {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
-  {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
-  {&__pyx_kp_s_Initial_distribution_is_not_a_pr, __pyx_k_Initial_distribution_is_not_a_pr, sizeof(__pyx_k_Initial_distribution_is_not_a_pr), 0, 0, 1, 0},
-  {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
-  {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
-  {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
-  {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
-  {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
-  {&__pyx_n_s_NotImplementedError, __pyx_k_NotImplementedError, sizeof(__pyx_k_NotImplementedError), 0, 0, 1, 1},
-  {&__pyx_kp_s_Not_a_probability_distribution, __pyx_k_Not_a_probability_distribution, sizeof(__pyx_k_Not_a_probability_distribution), 0, 0, 1, 0},
-  {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
-  {&__pyx_n_s_ObservationModel, __pyx_k_ObservationModel, sizeof(__pyx_k_ObservationModel), 0, 0, 1, 1},
-  {&__pyx_n_s_ObservationModel___init, __pyx_k_ObservationModel___init, sizeof(__pyx_k_ObservationModel___init), 0, 0, 1, 1},
-  {&__pyx_n_s_ObservationModel_densities, __pyx_k_ObservationModel_densities, sizeof(__pyx_k_ObservationModel_densities), 0, 0, 1, 1},
-  {&__pyx_n_s_ObservationModel_log_densities, __pyx_k_ObservationModel_log_densities, sizeof(__pyx_k_ObservationModel_log_densities), 0, 0, 1, 1},
-  {&__pyx_kp_s_Observation_model_class_for_a_H, __pyx_k_Observation_model_class_for_a_H, sizeof(__pyx_k_Observation_model_class_for_a_H), 0, 0, 1, 0},
-  {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
-  {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
-  {&__pyx_n_s_RuntimeWarning, __pyx_k_RuntimeWarning, sizeof(__pyx_k_RuntimeWarning), 0, 0, 1, 1},
-  {&__pyx_kp_s_Simple_discrete_observation_mod, __pyx_k_Simple_discrete_observation_mod, sizeof(__pyx_k_Simple_discrete_observation_mod), 0, 0, 1, 0},
-  {&__pyx_n_s_T, __pyx_k_T, sizeof(__pyx_k_T), 0, 0, 1, 1},
-  {&__pyx_n_s_TransitionModel, __pyx_k_TransitionModel, sizeof(__pyx_k_TransitionModel), 0, 0, 1, 1},
-  {&__pyx_n_s_TransitionModel___init, __pyx_k_TransitionModel___init, sizeof(__pyx_k_TransitionModel___init), 0, 0, 1, 1},
-  {&__pyx_n_s_TransitionModel_from_dense, __pyx_k_TransitionModel_from_dense, sizeof(__pyx_k_TransitionModel_from_dense), 0, 0, 1, 1},
-  {&__pyx_n_s_TransitionModel_log_probabilitie, __pyx_k_TransitionModel_log_probabilitie, sizeof(__pyx_k_TransitionModel_log_probabilitie), 0, 0, 1, 1},
-  {&__pyx_n_s_TransitionModel_make_dense, __pyx_k_TransitionModel_make_dense, sizeof(__pyx_k_TransitionModel_make_dense), 0, 0, 1, 1},
-  {&__pyx_n_s_TransitionModel_make_sparse, __pyx_k_TransitionModel_make_sparse, sizeof(__pyx_k_TransitionModel_make_sparse), 0, 0, 1, 1},
-  {&__pyx_n_s_TransitionModel_num_states, __pyx_k_TransitionModel_num_states, sizeof(__pyx_k_TransitionModel_num_states), 0, 0, 1, 1},
-  {&__pyx_n_s_TransitionModel_num_transitions, __pyx_k_TransitionModel_num_transitions, sizeof(__pyx_k_TransitionModel_num_transitions), 0, 0, 1, 1},
-  {&__pyx_kp_s_Transition_model_class_for_a_HM, __pyx_k_Transition_model_class_for_a_HM, sizeof(__pyx_k_Transition_model_class_for_a_HM), 0, 0, 1, 0},
-  {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
-  {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
-  {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
-  {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1},
-  {&__pyx_n_s_allclose, __pyx_k_allclose, sizeof(__pyx_k_allclose), 0, 0, 1, 1},
-  {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
-  {&__pyx_n_s_arange, __pyx_k_arange, sizeof(__pyx_k_arange), 0, 0, 1, 1},
-  {&__pyx_n_s_argmax, __pyx_k_argmax, sizeof(__pyx_k_argmax), 0, 0, 1, 1},
-  {&__pyx_n_s_args, __pyx_k_args, sizeof(__pyx_k_args), 0, 0, 1, 1},
-  {&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1},
-  {&__pyx_n_s_asarray, __pyx_k_asarray, sizeof(__pyx_k_asarray), 0, 0, 1, 1},
-  {&__pyx_n_s_astype, __pyx_k_astype, sizeof(__pyx_k_astype), 0, 0, 1, 1},
-  {&__pyx_n_s_axis, __pyx_k_axis, sizeof(__pyx_k_axis), 0, 0, 1, 1},
-  {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
-  {&__pyx_n_s_bincount, __pyx_k_bincount, sizeof(__pyx_k_bincount), 0, 0, 1, 1},
-  {&__pyx_n_s_block_size, __pyx_k_block_size, sizeof(__pyx_k_block_size), 0, 0, 1, 1},
-  {&__pyx_n_s_block_sz, __pyx_k_block_sz, sizeof(__pyx_k_block_sz), 0, 0, 1, 1},
-  {&__pyx_n_s_bt_pointers, __pyx_k_bt_pointers, sizeof(__pyx_k_bt_pointers), 0, 0, 1, 1},
-  {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
-  {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
-  {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
-  {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
-  {&__pyx_n_s_close, __pyx_k_close, sizeof(__pyx_k_close), 0, 0, 1, 1},
-  {&__pyx_n_s_cls, __pyx_k_cls, sizeof(__pyx_k_cls), 0, 0, 1, 1},
-  {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
-  {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
-  {&__pyx_n_s_copy, __pyx_k_copy, sizeof(__pyx_k_copy), 0, 0, 1, 1},
-  {&__pyx_n_s_csr_matrix, __pyx_k_csr_matrix, sizeof(__pyx_k_csr_matrix), 0, 0, 1, 1},
-  {&__pyx_n_s_current_viterbi, __pyx_k_current_viterbi, sizeof(__pyx_k_current_viterbi), 0, 0, 1, 1},
-  {&__pyx_n_s_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 0, 1, 1},
-  {&__pyx_n_s_densities, __pyx_k_densities, sizeof(__pyx_k_densities), 0, 0, 1, 1},
-  {&__pyx_n_s_density, __pyx_k_density, sizeof(__pyx_k_density), 0, 0, 1, 1},
-  {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
-  {&__pyx_n_s_doc, __pyx_k_doc, sizeof(__pyx_k_doc), 0, 0, 1, 1},
-  {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
-  {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
-  {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1},
-  {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
-  {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
-  {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
-  {&__pyx_n_s_exp, __pyx_k_exp, sizeof(__pyx_k_exp), 0, 0, 1, 1},
-  {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
-  {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1},
-  {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
-  {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
-  {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
-  {&__pyx_n_s_forward, __pyx_k_forward, sizeof(__pyx_k_forward), 0, 0, 1, 1},
-  {&__pyx_n_s_forward_generator, __pyx_k_forward_generator, sizeof(__pyx_k_forward_generator), 0, 0, 1, 1},
-  {&__pyx_n_s_frame, __pyx_k_frame, sizeof(__pyx_k_frame), 0, 0, 1, 1},
-  {&__pyx_n_s_from_dense, __pyx_k_from_dense, sizeof(__pyx_k_from_dense), 0, 0, 1, 1},
-  {&__pyx_n_s_fwd, __pyx_k_fwd, sizeof(__pyx_k_fwd), 0, 0, 1, 1},
-  {&__pyx_n_s_fwd_cur, __pyx_k_fwd_cur, sizeof(__pyx_k_fwd_cur), 0, 0, 1, 1},
-  {&__pyx_n_s_fwd_prev, __pyx_k_fwd_prev, sizeof(__pyx_k_fwd_prev), 0, 0, 1, 1},
-  {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
-  {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
-  {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
-  {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
-  {&__pyx_n_s_indices, __pyx_k_indices, sizeof(__pyx_k_indices), 0, 0, 1, 1},
-  {&__pyx_n_s_indptr, __pyx_k_indptr, sizeof(__pyx_k_indptr), 0, 0, 1, 1},
-  {&__pyx_kp_s_inf_log_probability_during_Vite, __pyx_k_inf_log_probability_during_Vite, sizeof(__pyx_k_inf_log_probability_during_Vite), 0, 0, 1, 0},
-  {&__pyx_n_s_init, __pyx_k_init, sizeof(__pyx_k_init), 0, 0, 1, 1},
-  {&__pyx_n_s_initial_distribution, __pyx_k_initial_distribution, sizeof(__pyx_k_initial_distribution), 0, 0, 1, 1},
-  {&__pyx_n_s_int, __pyx_k_int, sizeof(__pyx_k_int), 0, 0, 1, 1},
-  {&__pyx_n_s_isinf, __pyx_k_isinf, sizeof(__pyx_k_isinf), 0, 0, 1, 1},
-  {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
-  {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
-  {&__pyx_n_s_log, __pyx_k_log, sizeof(__pyx_k_log), 0, 0, 1, 1},
-  {&__pyx_n_s_log_densities, __pyx_k_log_densities, sizeof(__pyx_k_log_densities), 0, 0, 1, 1},
-  {&__pyx_n_s_log_probabilities, __pyx_k_log_probabilities, sizeof(__pyx_k_log_probabilities), 0, 0, 1, 1},
-  {&__pyx_n_s_log_probability, __pyx_k_log_probability, sizeof(__pyx_k_log_probability), 0, 0, 1, 1},
-  {&__pyx_n_s_madmom_ml_hmm, __pyx_k_madmom_ml_hmm, sizeof(__pyx_k_madmom_ml_hmm), 0, 0, 1, 1},
-  {&__pyx_kp_s_madmom_ml_hmm_pyx, __pyx_k_madmom_ml_hmm_pyx, sizeof(__pyx_k_madmom_ml_hmm_pyx), 0, 0, 1, 0},
-  {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
-  {&__pyx_n_s_make_dense, __pyx_k_make_dense, sizeof(__pyx_k_make_dense), 0, 0, 1, 1},
-  {&__pyx_n_s_make_sparse, __pyx_k_make_sparse, sizeof(__pyx_k_make_sparse), 0, 0, 1, 1},
-  {&__pyx_n_s_max, __pyx_k_max, sizeof(__pyx_k_max), 0, 0, 1, 1},
-  {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
-  {&__pyx_n_s_metaclass, __pyx_k_metaclass, sizeof(__pyx_k_metaclass), 0, 0, 1, 1},
-  {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
-  {&__pyx_n_s_module, __pyx_k_module, sizeof(__pyx_k_module), 0, 0, 1, 1},
-  {&__pyx_kp_s_must_be_implemented_by_subclass, __pyx_k_must_be_implemented_by_subclass, sizeof(__pyx_k_must_be_implemented_by_subclass), 0, 0, 1, 0},
-  {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
-  {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
-  {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
-  {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
-  {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
-  {&__pyx_n_s_nonzero, __pyx_k_nonzero, sizeof(__pyx_k_nonzero), 0, 0, 1, 1},
-  {&__pyx_n_s_norm_factor, __pyx_k_norm_factor, sizeof(__pyx_k_norm_factor), 0, 0, 1, 1},
-  {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
-  {&__pyx_n_s_num_observations, __pyx_k_num_observations, sizeof(__pyx_k_num_observations), 0, 0, 1, 1},
-  {&__pyx_n_s_num_states, __pyx_k_num_states, sizeof(__pyx_k_num_states), 0, 0, 1, 1},
-  {&__pyx_n_s_num_transitions, __pyx_k_num_transitions, sizeof(__pyx_k_num_transitions), 0, 0, 1, 1},
-  {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
-  {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0},
-  {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0},
-  {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
-  {&__pyx_n_s_object, __pyx_k_object, sizeof(__pyx_k_object), 0, 0, 1, 1},
-  {&__pyx_n_s_obs_end, __pyx_k_obs_end, sizeof(__pyx_k_obs_end), 0, 0, 1, 1},
-  {&__pyx_n_s_obs_start, __pyx_k_obs_start, sizeof(__pyx_k_obs_start), 0, 0, 1, 1},
-  {&__pyx_n_s_observation_model, __pyx_k_observation_model, sizeof(__pyx_k_observation_model), 0, 0, 1, 1},
-  {&__pyx_n_s_observation_probabilities, __pyx_k_observation_probabilities, sizeof(__pyx_k_observation_probabilities), 0, 0, 1, 1},
-  {&__pyx_n_s_observations, __pyx_k_observations, sizeof(__pyx_k_observations), 0, 0, 1, 1},
-  {&__pyx_n_s_om, __pyx_k_om, sizeof(__pyx_k_om), 0, 0, 1, 1},
-  {&__pyx_n_s_om_densities, __pyx_k_om_densities, sizeof(__pyx_k_om_densities), 0, 0, 1, 1},
-  {&__pyx_n_s_om_pointers, __pyx_k_om_pointers, sizeof(__pyx_k_om_pointers), 0, 0, 1, 1},
-  {&__pyx_n_s_ones, __pyx_k_ones, sizeof(__pyx_k_ones), 0, 0, 1, 1},
-  {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
-  {&__pyx_n_s_path, __pyx_k_path, sizeof(__pyx_k_path), 0, 0, 1, 1},
-  {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
-  {&__pyx_n_s_pointer, __pyx_k_pointer, sizeof(__pyx_k_pointer), 0, 0, 1, 1},
-  {&__pyx_n_s_pointers, __pyx_k_pointers, sizeof(__pyx_k_pointers), 0, 0, 1, 1},
-  {&__pyx_n_s_pop, __pyx_k_pop, sizeof(__pyx_k_pop), 0, 0, 1, 1},
-  {&__pyx_n_s_prepare, __pyx_k_prepare, sizeof(__pyx_k_prepare), 0, 0, 1, 1},
-  {&__pyx_n_s_prev, __pyx_k_prev, sizeof(__pyx_k_prev), 0, 0, 1, 1},
-  {&__pyx_n_s_prev_pointer, __pyx_k_prev_pointer, sizeof(__pyx_k_prev_pointer), 0, 0, 1, 1},
-  {&__pyx_n_s_prev_state, __pyx_k_prev_state, sizeof(__pyx_k_prev_state), 0, 0, 1, 1},
-  {&__pyx_n_s_prev_states, __pyx_k_prev_states, sizeof(__pyx_k_prev_states), 0, 0, 1, 1},
-  {&__pyx_n_s_previous_viterbi, __pyx_k_previous_viterbi, sizeof(__pyx_k_previous_viterbi), 0, 0, 1, 1},
-  {&__pyx_n_s_prob_sum, __pyx_k_prob_sum, sizeof(__pyx_k_prob_sum), 0, 0, 1, 1},
-  {&__pyx_n_s_probabilities, __pyx_k_probabilities, sizeof(__pyx_k_probabilities), 0, 0, 1, 1},
-  {&__pyx_n_s_property, __pyx_k_property, sizeof(__pyx_k_property), 0, 0, 1, 1},
-  {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
-  {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
-  {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
-  {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
-  {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
-  {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
-  {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1},
-  {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
-  {&__pyx_n_s_qualname, __pyx_k_qualname, sizeof(__pyx_k_qualname), 0, 0, 1, 1},
-  {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
-  {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
-  {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
-  {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
-  {&__pyx_n_s_reset, __pyx_k_reset, sizeof(__pyx_k_reset), 0, 0, 1, 1},
-  {&__pyx_n_s_scipy_sparse, __pyx_k_scipy_sparse, sizeof(__pyx_k_scipy_sparse), 0, 0, 1, 1},
-  {&__pyx_n_s_self, __pyx_k_self, sizeof(__pyx_k_self), 0, 0, 1, 1},
-  {&__pyx_n_s_send, __pyx_k_send, sizeof(__pyx_k_send), 0, 0, 1, 1},
-  {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
-  {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
-  {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
-  {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
-  {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
-  {&__pyx_n_s_state, __pyx_k_state, sizeof(__pyx_k_state), 0, 0, 1, 1},
-  {&__pyx_n_s_states, __pyx_k_states, sizeof(__pyx_k_states), 0, 0, 1, 1},
-  {&__pyx_n_s_staticmethod, __pyx_k_staticmethod, sizeof(__pyx_k_staticmethod), 0, 0, 1, 1},
-  {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
-  {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
-  {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
-  {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
-  {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
-  {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
-  {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
-  {&__pyx_n_s_sum, __pyx_k_sum, sizeof(__pyx_k_sum), 0, 0, 1, 1},
-  {&__pyx_n_s_super, __pyx_k_super, sizeof(__pyx_k_super), 0, 0, 1, 1},
-  {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
-  {&__pyx_n_s_throw, __pyx_k_throw, sizeof(__pyx_k_throw), 0, 0, 1, 1},
-  {&__pyx_n_s_tm, __pyx_k_tm, sizeof(__pyx_k_tm), 0, 0, 1, 1},
-  {&__pyx_n_s_tm_pointers, __pyx_k_tm_pointers, sizeof(__pyx_k_tm_pointers), 0, 0, 1, 1},
-  {&__pyx_n_s_tm_probabilities, __pyx_k_tm_probabilities, sizeof(__pyx_k_tm_probabilities), 0, 0, 1, 1},
-  {&__pyx_n_s_tm_ptrs, __pyx_k_tm_ptrs, sizeof(__pyx_k_tm_ptrs), 0, 0, 1, 1},
-  {&__pyx_n_s_tm_states, __pyx_k_tm_states, sizeof(__pyx_k_tm_states), 0, 0, 1, 1},
-  {&__pyx_n_s_transition_model, __pyx_k_transition_model, sizeof(__pyx_k_transition_model), 0, 0, 1, 1},
-  {&__pyx_n_s_transition_prob, __pyx_k_transition_prob, sizeof(__pyx_k_transition_prob), 0, 0, 1, 1},
-  {&__pyx_n_s_transitions, __pyx_k_transitions, sizeof(__pyx_k_transitions), 0, 0, 1, 1},
-  {&__pyx_n_s_uint32, __pyx_k_uint32, sizeof(__pyx_k_uint32), 0, 0, 1, 1},
-  {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
-  {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
-  {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
-  {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
-  {&__pyx_n_s_viterbi, __pyx_k_viterbi, sizeof(__pyx_k_viterbi), 0, 0, 1, 1},
-  {&__pyx_n_s_warn, __pyx_k_warn, sizeof(__pyx_k_warn), 0, 0, 1, 1},
-  {&__pyx_n_s_warnings, __pyx_k_warnings, sizeof(__pyx_k_warnings), 0, 0, 1, 1},
-  {&__pyx_n_s_weights, __pyx_k_weights, sizeof(__pyx_k_weights), 0, 0, 1, 1},
-  {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
-  {0, 0, 0, 0, 0, 0, 0}
-};
-static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
-  __pyx_builtin_object = __Pyx_GetBuiltinName(__pyx_n_s_object); if (!__pyx_builtin_object) __PYX_ERR(0, 31, __pyx_L1_error)
-  __pyx_builtin_property = __Pyx_GetBuiltinName(__pyx_n_s_property); if (!__pyx_builtin_property) __PYX_ERR(0, 86, __pyx_L1_error)
-  __pyx_builtin_staticmethod = __Pyx_GetBuiltinName(__pyx_n_s_staticmethod); if (!__pyx_builtin_staticmethod) __PYX_ERR(0, 101, __pyx_L1_error)
-  __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 194, __pyx_L1_error)
-  __pyx_builtin_max = __Pyx_GetBuiltinName(__pyx_n_s_max); if (!__pyx_builtin_max) __PYX_ERR(0, 198, __pyx_L1_error)
-  __pyx_builtin_NotImplementedError = __Pyx_GetBuiltinName(__pyx_n_s_NotImplementedError); if (!__pyx_builtin_NotImplementedError) __PYX_ERR(0, 280, __pyx_L1_error)
-  __pyx_builtin_super = __Pyx_GetBuiltinName(__pyx_n_s_super); if (!__pyx_builtin_super) __PYX_ERR(0, 344, __pyx_L1_error)
-  __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 526, __pyx_L1_error)
-  __pyx_builtin_RuntimeWarning = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeWarning); if (!__pyx_builtin_RuntimeWarning) __PYX_ERR(0, 568, __pyx_L1_error)
-  __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 947, __pyx_L1_error)
-  __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(2, 148, __pyx_L1_error)
-  __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(2, 151, __pyx_L1_error)
-  __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(2, 2, __pyx_L1_error)
-  __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(2, 404, __pyx_L1_error)
-  __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(2, 613, __pyx_L1_error)
-  __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(2, 832, __pyx_L1_error)
-  return 0;
-  __pyx_L1_error:;
-  return -1;
-}
-
-static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
-
-  /* "madmom/ml/hmm.pyx":194
- *         probabilities = np.asarray(probabilities)
- *         if not np.allclose(np.bincount(prev_states, weights=probabilities), 1):
- *             raise ValueError('Not a probability distribution.')             # <<<<<<<<<<<<<<
- *         # convert everything into a sparse CSR matrix, make sure it is square.
- *         # looking through prev_states is enough, because there *must* be a
- */
-  __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Not_a_probability_distribution); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 194, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple_);
-  __Pyx_GIVEREF(__pyx_tuple_);
-
-  /* "madmom/ml/hmm.pyx":280
- * 
- *         """
- *         raise NotImplementedError('must be implemented by subclass')             # <<<<<<<<<<<<<<
- * 
- *     def densities(self, observations):
- */
-  __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_must_be_implemented_by_subclass); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 280, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__2);
-  __Pyx_GIVEREF(__pyx_tuple__2);
-
-  /* "madmom/ml/hmm.pyx":364
- * 
- *         """
- *         return self.observation_probabilities[:, observations].T             # <<<<<<<<<<<<<<
- * 
- *     def log_densities(self, observations):
- */
-  __pyx_slice__3 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 364, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_slice__3);
-  __Pyx_GIVEREF(__pyx_slice__3);
-
-  /* "madmom/ml/hmm.pyx":445
- *                                    transition_model.num_states
- *         if not np.allclose(initial_distribution.sum(), 1):
- *             raise ValueError('Initial distribution is not a probability '             # <<<<<<<<<<<<<<
- *                              'distribution.')
- *         self.initial_distribution = initial_distribution
- */
-  __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_Initial_distribution_is_not_a_pr); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 445, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__4);
-  __Pyx_GIVEREF(__pyx_tuple__4);
-
-  /* "madmom/ml/hmm.pyx":455
- *         state = self.__dict__.copy()
- *         # do not pickle attributes needed for stateful processing
- *         state.pop('_prev', None)             # <<<<<<<<<<<<<<
- *         return state
- * 
- */
-  __pyx_tuple__5 = PyTuple_Pack(2, __pyx_n_s_prev, Py_None); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 455, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__5);
-  __Pyx_GIVEREF(__pyx_tuple__5);
-
-  /* "madmom/ml/hmm.pyx":567
- *         # raise warning if the sequence has -inf probability
- *         if np.isinf(log_probability):
- *             warnings.warn('-inf log probability during Viterbi decoding '             # <<<<<<<<<<<<<<
- *                           'cannot find a valid path', RuntimeWarning)
- *             # return empty path sequence
- */
-  __pyx_tuple__6 = PyTuple_Pack(2, __pyx_kp_s_inf_log_probability_during_Vite, __pyx_builtin_RuntimeWarning); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 567, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__6);
-  __Pyx_GIVEREF(__pyx_tuple__6);
-
-  /* "madmom/ml/hmm.pyx":570
- *                           'cannot find a valid path', RuntimeWarning)
- *             # return empty path sequence
- *             return np.empty(0, dtype=np.uint32), log_probability             # <<<<<<<<<<<<<<
- * 
- *         # back tracked path, a.k.a. path sequence
- */
-  __pyx_tuple__7 = PyTuple_Pack(1, __pyx_int_0); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(0, 570, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__7);
-  __Pyx_GIVEREF(__pyx_tuple__7);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":947
- *         __pyx_import_array()
- *     except Exception:
- *         raise ImportError("numpy.core.multiarray failed to import")             # <<<<<<<<<<<<<<
- * 
- * cdef inline int import_umath() except -1:
- */
-  __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 947, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__9);
-  __Pyx_GIVEREF(__pyx_tuple__9);
-
-  /* "../../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":953
- *         _import_umath()
- *     except Exception:
- *         raise ImportError("numpy.core.umath failed to import")             # <<<<<<<<<<<<<<
- * 
- * cdef inline int import_ufunc() except -1:
- */
-  __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 953, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__10);
-  __Pyx_GIVEREF(__pyx_tuple__10);
-
-  /* "View.MemoryView":133
- * 
- *         if not self.ndim:
- *             raise ValueError("Empty shape tuple for cython.array")             # <<<<<<<<<<<<<<
- * 
- *         if itemsize <= 0:
- */
-  __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(2, 133, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__11);
-  __Pyx_GIVEREF(__pyx_tuple__11);
-
-  /* "View.MemoryView":136
- * 
- *         if itemsize <= 0:
- *             raise ValueError("itemsize <= 0 for cython.array")             # <<<<<<<<<<<<<<
- * 
- *         if not isinstance(format, bytes):
- */
-  __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(2, 136, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__12);
-  __Pyx_GIVEREF(__pyx_tuple__12);
-
-  /* "View.MemoryView":148
- * 
- *         if not self._shape:
- *             raise MemoryError("unable to allocate shape and strides.")             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(2, 148, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__13);
-  __Pyx_GIVEREF(__pyx_tuple__13);
-
-  /* "View.MemoryView":176
- *             self.data = <char *>malloc(self.len)
- *             if not self.data:
- *                 raise MemoryError("unable to allocate array data.")             # <<<<<<<<<<<<<<
- * 
- *             if self.dtype_is_object:
- */
-  __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(2, 176, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__14);
-  __Pyx_GIVEREF(__pyx_tuple__14);
-
-  /* "View.MemoryView":192
- *             bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- *         if not (flags & bufmode):
- *             raise ValueError("Can only create a buffer that is contiguous in memory.")             # <<<<<<<<<<<<<<
- *         info.buf = self.data
- *         info.len = self.len
- */
-  __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(2, 192, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__15);
-  __Pyx_GIVEREF(__pyx_tuple__15);
-
-  /* "(tree fragment)":2
- * def __reduce_cython__(self):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")             # <<<<<<<<<<<<<<
- * def __setstate_cython__(self, __pyx_state):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- */
-  __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(2, 2, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__16);
-  __Pyx_GIVEREF(__pyx_tuple__16);
-
-  /* "(tree fragment)":4
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")             # <<<<<<<<<<<<<<
- */
-  __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(2, 4, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__17);
-  __Pyx_GIVEREF(__pyx_tuple__17);
-
-  /* "View.MemoryView":418
- *     def __setitem__(memoryview self, object index, object value):
- *         if self.view.readonly:
- *             raise TypeError("Cannot assign to read-only memoryview")             # <<<<<<<<<<<<<<
- * 
- *         have_slices, index = _unellipsify(index, self.view.ndim)
- */
-  __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(2, 418, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__18);
-  __Pyx_GIVEREF(__pyx_tuple__18);
-
-  /* "View.MemoryView":495
- *             result = struct.unpack(self.view.format, bytesitem)
- *         except struct.error:
- *             raise ValueError("Unable to convert item to object")             # <<<<<<<<<<<<<<
- *         else:
- *             if len(self.view.format) == 1:
- */
-  __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(2, 495, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__19);
-  __Pyx_GIVEREF(__pyx_tuple__19);
-
-  /* "View.MemoryView":520
- *     def __getbuffer__(self, Py_buffer *info, int flags):
- *         if flags & PyBUF_WRITABLE and self.view.readonly:
- *             raise ValueError("Cannot create writable memory view from read-only memoryview")             # <<<<<<<<<<<<<<
- * 
- *         if flags & PyBUF_ND:
- */
-  __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(2, 520, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__20);
-  __Pyx_GIVEREF(__pyx_tuple__20);
-
-  /* "View.MemoryView":570
- *         if self.view.strides == NULL:
- * 
- *             raise ValueError("Buffer view does not expose strides")             # <<<<<<<<<<<<<<
- * 
- *         return tuple([stride for stride in self.view.strides[:self.view.ndim]])
- */
-  __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(2, 570, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__21);
-  __Pyx_GIVEREF(__pyx_tuple__21);
-
-  /* "View.MemoryView":577
- *     def suboffsets(self):
- *         if self.view.suboffsets == NULL:
- *             return (-1,) * self.view.ndim             # <<<<<<<<<<<<<<
- * 
- *         return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
- */
-  __pyx_tuple__22 = PyTuple_New(1); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(2, 577, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__22);
-  __Pyx_INCREF(__pyx_int_neg_1);
-  __Pyx_GIVEREF(__pyx_int_neg_1);
-  PyTuple_SET_ITEM(__pyx_tuple__22, 0, __pyx_int_neg_1);
-  __Pyx_GIVEREF(__pyx_tuple__22);
-
-  /* "(tree fragment)":2
- * def __reduce_cython__(self):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")             # <<<<<<<<<<<<<<
- * def __setstate_cython__(self, __pyx_state):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- */
-  __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(2, 2, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__23);
-  __Pyx_GIVEREF(__pyx_tuple__23);
-
-  /* "(tree fragment)":4
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")             # <<<<<<<<<<<<<<
- */
-  __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(2, 4, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__24);
-  __Pyx_GIVEREF(__pyx_tuple__24);
-
-  /* "View.MemoryView":703
- *     for suboffset in suboffsets[:ndim]:
- *         if suboffset >= 0:
- *             raise ValueError("Indirect dimensions not supported")             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(2, 703, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__25);
-  __Pyx_GIVEREF(__pyx_tuple__25);
-
-  /* "(tree fragment)":2
- * def __reduce_cython__(self):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")             # <<<<<<<<<<<<<<
- * def __setstate_cython__(self, __pyx_state):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- */
-  __pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(2, 2, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__26);
-  __Pyx_GIVEREF(__pyx_tuple__26);
-
-  /* "(tree fragment)":4
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")
- * def __setstate_cython__(self, __pyx_state):
- *     raise TypeError("no default __reduce__ due to non-trivial __cinit__")             # <<<<<<<<<<<<<<
- */
-  __pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(2, 4, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__27);
-  __Pyx_GIVEREF(__pyx_tuple__27);
-
-  /* "madmom/ml/hmm.pyx":31
- * 
- * 
- * class TransitionModel(object):             # <<<<<<<<<<<<<<
- *     """
- *     Transition model class for a HMM.
- */
-  __pyx_tuple__28 = PyTuple_Pack(1, __pyx_builtin_object); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(0, 31, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__28);
-  __Pyx_GIVEREF(__pyx_tuple__28);
-
-  /* "madmom/ml/hmm.pyx":80
- *     """
- * 
- *     def __init__(self, states, pointers, probabilities):             # <<<<<<<<<<<<<<
- *         # save the parameters
- *         self.states = states
- */
-  __pyx_tuple__29 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_states, __pyx_n_s_pointers, __pyx_n_s_probabilities); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(0, 80, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__29);
-  __Pyx_GIVEREF(__pyx_tuple__29);
-  __pyx_codeobj__30 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__29, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_init, 80, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__30)) __PYX_ERR(0, 80, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":87
- * 
- *     @property
- *     def num_states(self):             # <<<<<<<<<<<<<<
- *         """Number of states."""
- *         return len(self.pointers) - 1
- */
-  __pyx_tuple__31 = PyTuple_Pack(1, __pyx_n_s_self); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(0, 87, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__31);
-  __Pyx_GIVEREF(__pyx_tuple__31);
-  __pyx_codeobj__32 = (PyObject*)__Pyx_PyCode_New(1, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__31, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_num_states, 87, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__32)) __PYX_ERR(0, 87, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":92
- * 
- *     @property
- *     def num_transitions(self):             # <<<<<<<<<<<<<<
- *         """Number of transitions."""
- *         return len(self.probabilities)
- */
-  __pyx_tuple__33 = PyTuple_Pack(1, __pyx_n_s_self); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(0, 92, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__33);
-  __Pyx_GIVEREF(__pyx_tuple__33);
-  __pyx_codeobj__34 = (PyObject*)__Pyx_PyCode_New(1, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__33, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_num_transitions, 92, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__34)) __PYX_ERR(0, 92, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":97
- * 
- *     @property
- *     def log_probabilities(self):             # <<<<<<<<<<<<<<
- *         """Transition log probabilities."""
- *         return np.log(self.probabilities)
- */
-  __pyx_tuple__35 = PyTuple_Pack(1, __pyx_n_s_self); if (unlikely(!__pyx_tuple__35)) __PYX_ERR(0, 97, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__35);
-  __Pyx_GIVEREF(__pyx_tuple__35);
-  __pyx_codeobj__36 = (PyObject*)__Pyx_PyCode_New(1, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__35, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_log_probabilities, 97, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__36)) __PYX_ERR(0, 97, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":102
- * 
- *     @staticmethod
- *     def make_dense(states, pointers, probabilities):             # <<<<<<<<<<<<<<
- *         """
- *         Return a dense representation of sparse transitions.
- */
-  __pyx_tuple__37 = PyTuple_Pack(6, __pyx_n_s_states, __pyx_n_s_pointers, __pyx_n_s_probabilities, __pyx_n_s_csr_matrix, __pyx_n_s_transitions, __pyx_n_s_prev_states); if (unlikely(!__pyx_tuple__37)) __PYX_ERR(0, 102, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__37);
-  __Pyx_GIVEREF(__pyx_tuple__37);
-  __pyx_codeobj__38 = (PyObject*)__Pyx_PyCode_New(3, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__37, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_make_dense, 102, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__38)) __PYX_ERR(0, 102, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":148
- * 
- *     @staticmethod
- *     def make_sparse(states, prev_states, probabilities):             # <<<<<<<<<<<<<<
- *         """
- *         Return a sparse representation of dense transitions.
- */
-  __pyx_tuple__39 = PyTuple_Pack(7, __pyx_n_s_states, __pyx_n_s_prev_states, __pyx_n_s_probabilities, __pyx_n_s_csr_matrix, __pyx_n_s_num_states, __pyx_n_s_transitions, __pyx_n_s_pointers); if (unlikely(!__pyx_tuple__39)) __PYX_ERR(0, 148, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__39);
-  __Pyx_GIVEREF(__pyx_tuple__39);
-  __pyx_codeobj__40 = (PyObject*)__Pyx_PyCode_New(3, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__39, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_make_sparse, 148, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__40)) __PYX_ERR(0, 148, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":209
- * 
- *     @classmethod
- *     def from_dense(cls, states, prev_states, probabilities):             # <<<<<<<<<<<<<<
- *         """
- *         Instantiate a TransitionModel from dense transitions.
- */
-  __pyx_tuple__41 = PyTuple_Pack(5, __pyx_n_s_cls, __pyx_n_s_states, __pyx_n_s_prev_states, __pyx_n_s_probabilities, __pyx_n_s_transitions); if (unlikely(!__pyx_tuple__41)) __PYX_ERR(0, 209, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__41);
-  __Pyx_GIVEREF(__pyx_tuple__41);
-  __pyx_codeobj__42 = (PyObject*)__Pyx_PyCode_New(4, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__41, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_from_dense, 209, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__42)) __PYX_ERR(0, 209, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":234
- * 
- * 
- * class ObservationModel(object):             # <<<<<<<<<<<<<<
- *     """
- *     Observation model class for a HMM.
- */
-  __pyx_tuple__43 = PyTuple_Pack(1, __pyx_builtin_object); if (unlikely(!__pyx_tuple__43)) __PYX_ERR(0, 234, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__43);
-  __Pyx_GIVEREF(__pyx_tuple__43);
-
-  /* "madmom/ml/hmm.pyx":258
- *     """
- * 
- *     def __init__(self, pointers):             # <<<<<<<<<<<<<<
- *         # save parameters
- *         self.pointers = pointers
- */
-  __pyx_tuple__44 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_pointers); if (unlikely(!__pyx_tuple__44)) __PYX_ERR(0, 258, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__44);
-  __Pyx_GIVEREF(__pyx_tuple__44);
-  __pyx_codeobj__45 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__44, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_init, 258, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__45)) __PYX_ERR(0, 258, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":262
- *         self.pointers = pointers
- * 
- *     def log_densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Log densities (or probabilities) of the observations for each state.
- */
-  __pyx_tuple__46 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_observations); if (unlikely(!__pyx_tuple__46)) __PYX_ERR(0, 262, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__46);
-  __Pyx_GIVEREF(__pyx_tuple__46);
-  __pyx_codeobj__47 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__46, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_log_densities, 262, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__47)) __PYX_ERR(0, 262, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":282
- *         raise NotImplementedError('must be implemented by subclass')
- * 
- *     def densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Densities (or probabilities) of the observations for each state.
- */
-  __pyx_tuple__48 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_observations); if (unlikely(!__pyx_tuple__48)) __PYX_ERR(0, 282, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__48);
-  __Pyx_GIVEREF(__pyx_tuple__48);
-  __pyx_codeobj__49 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__48, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_densities, 282, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__49)) __PYX_ERR(0, 282, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":339
- *     """
- * 
- *     def __init__(self, observation_probabilities):             # <<<<<<<<<<<<<<
- *         # check that it is a probability distribution
- *         if not np.allclose(observation_probabilities.sum(axis=1), 1):
- */
-  __pyx_tuple__50 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_observation_probabilities); if (unlikely(!__pyx_tuple__50)) __PYX_ERR(0, 339, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__50);
-  __Pyx_GIVEREF(__pyx_tuple__50);
-  __pyx_codeobj__51 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__50, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_init, 339, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__51)) __PYX_ERR(0, 339, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":349
- *         self.observation_probabilities = observation_probabilities
- * 
- *     def densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Densities of the observations.
- */
-  __pyx_tuple__52 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_observations); if (unlikely(!__pyx_tuple__52)) __PYX_ERR(0, 349, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__52);
-  __Pyx_GIVEREF(__pyx_tuple__52);
-  __pyx_codeobj__53 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__52, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_densities, 349, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__53)) __PYX_ERR(0, 349, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":366
- *         return self.observation_probabilities[:, observations].T
- * 
- *     def log_densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Log densities of the observations.
- */
-  __pyx_tuple__54 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_observations); if (unlikely(!__pyx_tuple__54)) __PYX_ERR(0, 366, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__54);
-  __Pyx_GIVEREF(__pyx_tuple__54);
-  __pyx_codeobj__55 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__54, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_log_densities, 366, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__55)) __PYX_ERR(0, 366, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":384
- * 
- * 
- * class HiddenMarkovModel(object):             # <<<<<<<<<<<<<<
- *     """
- *     Hidden Markov Model
- */
-  __pyx_tuple__56 = PyTuple_Pack(1, __pyx_builtin_object); if (unlikely(!__pyx_tuple__56)) __PYX_ERR(0, 384, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__56);
-  __Pyx_GIVEREF(__pyx_tuple__56);
-
-  /* "madmom/ml/hmm.pyx":435
- *     """
- * 
- *     def __init__(self, transition_model, observation_model,             # <<<<<<<<<<<<<<
- *                  initial_distribution=None):
- *         # save the parameters
- */
-  __pyx_tuple__57 = PyTuple_Pack(4, __pyx_n_s_self, __pyx_n_s_transition_model, __pyx_n_s_observation_model, __pyx_n_s_initial_distribution); if (unlikely(!__pyx_tuple__57)) __PYX_ERR(0, 435, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__57);
-  __Pyx_GIVEREF(__pyx_tuple__57);
-  __pyx_codeobj__58 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__57, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_init, 435, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__58)) __PYX_ERR(0, 435, __pyx_L1_error)
-  __pyx_tuple__59 = PyTuple_Pack(1, ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__59)) __PYX_ERR(0, 435, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__59);
-  __Pyx_GIVEREF(__pyx_tuple__59);
-
-  /* "madmom/ml/hmm.pyx":451
- *         self._prev = self.initial_distribution.copy()
- * 
- *     def __getstate__(self):             # <<<<<<<<<<<<<<
- *         # copy everything to a picklable object
- *         state = self.__dict__.copy()
- */
-  __pyx_tuple__60 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_state); if (unlikely(!__pyx_tuple__60)) __PYX_ERR(0, 451, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__60);
-  __Pyx_GIVEREF(__pyx_tuple__60);
-  __pyx_codeobj__61 = (PyObject*)__Pyx_PyCode_New(1, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__60, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_getstate, 451, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__61)) __PYX_ERR(0, 451, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":458
- *         return state
- * 
- *     def __setstate__(self, state):             # <<<<<<<<<<<<<<
- *         # restore pickled instance attributes
- *         self.__dict__.update(state)
- */
-  __pyx_tuple__62 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_state); if (unlikely(!__pyx_tuple__62)) __PYX_ERR(0, 458, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__62);
-  __Pyx_GIVEREF(__pyx_tuple__62);
-  __pyx_codeobj__63 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__62, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_setstate, 458, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__63)) __PYX_ERR(0, 458, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":464
- *         self._prev = self.initial_distribution.copy()
- * 
- *     def reset(self, initial_distribution=None):             # <<<<<<<<<<<<<<
- *         """
- *         Reset the HMM to its initial state.
- */
-  __pyx_tuple__64 = PyTuple_Pack(2, __pyx_n_s_self, __pyx_n_s_initial_distribution); if (unlikely(!__pyx_tuple__64)) __PYX_ERR(0, 464, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__64);
-  __Pyx_GIVEREF(__pyx_tuple__64);
-  __pyx_codeobj__65 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__64, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_reset, 464, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__65)) __PYX_ERR(0, 464, __pyx_L1_error)
-  __pyx_tuple__66 = PyTuple_Pack(1, ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__66)) __PYX_ERR(0, 464, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__66);
-  __Pyx_GIVEREF(__pyx_tuple__66);
-
-  /* "madmom/ml/hmm.pyx":480
- *     @cython.boundscheck(False)
- *     @cython.wraparound(False)
- *     def viterbi(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Determine the best path with the Viterbi algorithm.
- */
-  __pyx_tuple__67 = PyTuple_Pack(22, __pyx_n_s_self, __pyx_n_s_observations, __pyx_n_s_tm, __pyx_n_s_tm_states, __pyx_n_s_tm_pointers, __pyx_n_s_tm_probabilities, __pyx_n_s_num_states, __pyx_n_s_om, __pyx_n_s_num_observations, __pyx_n_s_om_pointers, __pyx_n_s_om_densities, __pyx_n_s_current_viterbi, __pyx_n_s_previous_viterbi, __pyx_n_s_bt_pointers, __pyx_n_s_state, __pyx_n_s_frame, __pyx_n_s_prev_state, __pyx_n_s_pointer, __pyx_n_s_density, __pyx_n_s_transition_prob, __pyx_n_s_log_probability, __pyx_n_s_path); if (unlikely(!__pyx_tuple__67)) __PYX_ERR(0, 480, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__67);
-  __Pyx_GIVEREF(__pyx_tuple__67);
-  __pyx_codeobj__68 = (PyObject*)__Pyx_PyCode_New(2, 0, 22, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__67, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_viterbi, 480, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__68)) __PYX_ERR(0, 480, __pyx_L1_error)
-
-  /* "madmom/ml/hmm.pyx":590
- *     @cython.wraparound(False)
- *     @cython.initializedcheck(False)
- *     def forward(self, observations, reset=True):             # <<<<<<<<<<<<<<
- *         """
- *         Compute the forward variables at each time step. Instead of computing
- */
-  __pyx_tuple__69 = PyTuple_Pack(19, __pyx_n_s_self, __pyx_n_s_observations, __pyx_n_s_reset, __pyx_n_s_tm, __pyx_n_s_tm_states, __pyx_n_s_tm_pointers, __pyx_n_s_tm_probabilities, __pyx_n_s_num_states, __pyx_n_s_om, __pyx_n_s_om_pointers, __pyx_n_s_om_densities, __pyx_n_s_num_observations, __pyx_n_s_fwd_prev, __pyx_n_s_fwd, __pyx_n_s_prev_pointer, __pyx_n_s_frame, __pyx_n_s_state, __pyx_n_s_prob_sum, __pyx_n_s_norm_factor); if (unlikely(!__pyx_tuple__69)) __PYX_ERR(0, 590, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__69);
-  __Pyx_GIVEREF(__pyx_tuple__69);
-  __pyx_codeobj__70 = (PyObject*)__Pyx_PyCode_New(3, 0, 19, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__69, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_forward, 590, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__70)) __PYX_ERR(0, 590, __pyx_L1_error)
-  __pyx_tuple__71 = PyTuple_Pack(1, ((PyObject *)Py_True)); if (unlikely(!__pyx_tuple__71)) __PYX_ERR(0, 590, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__71);
-  __Pyx_GIVEREF(__pyx_tuple__71);
-
-  /* "madmom/ml/hmm.pyx":664
- *     @cython.wraparound(False)
- *     @cython.initializedcheck(False)
- *     def forward_generator(self, observations, block_size=None):             # <<<<<<<<<<<<<<
- *         """
- *         Compute the forward variables at each time step. Instead of computing
- */
-  __pyx_tuple__72 = PyTuple_Pack(22, __pyx_n_s_self, __pyx_n_s_observations, __pyx_n_s_block_size, __pyx_n_s_tm, __pyx_n_s_tm_states, __pyx_n_s_tm_ptrs, __pyx_n_s_tm_probabilities, __pyx_n_s_num_states, __pyx_n_s_om, __pyx_n_s_num_observations, __pyx_n_s_om_pointers, __pyx_n_s_om_densities, __pyx_n_s_fwd_cur, __pyx_n_s_fwd_prev, __pyx_n_s_prev_pointer, __pyx_n_s_state, __pyx_n_s_obs_start, __pyx_n_s_obs_end, __pyx_n_s_frame, __pyx_n_s_block_sz, __pyx_n_s_prob_sum, __pyx_n_s_norm_factor); if (unlikely(!__pyx_tuple__72)) __PYX_ERR(0, 664, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__72);
-  __Pyx_GIVEREF(__pyx_tuple__72);
-  __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(3, 0, 22, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__72, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_madmom_ml_hmm_pyx, __pyx_n_s_forward_generator, 664, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) __PYX_ERR(0, 664, __pyx_L1_error)
-  __pyx_tuple__73 = PyTuple_Pack(1, ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__73)) __PYX_ERR(0, 664, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__73);
-  __Pyx_GIVEREF(__pyx_tuple__73);
-
-  /* "View.MemoryView":286
- *         return self.name
- * 
- * cdef generic = Enum("<strided and direct or indirect>")             # <<<<<<<<<<<<<<
- * cdef strided = Enum("<strided and direct>") # default
- * cdef indirect = Enum("<strided and indirect>")
- */
-  __pyx_tuple__74 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__74)) __PYX_ERR(2, 286, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__74);
-  __Pyx_GIVEREF(__pyx_tuple__74);
-
-  /* "View.MemoryView":287
- * 
- * cdef generic = Enum("<strided and direct or indirect>")
- * cdef strided = Enum("<strided and direct>") # default             # <<<<<<<<<<<<<<
- * cdef indirect = Enum("<strided and indirect>")
- * 
- */
-  __pyx_tuple__75 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__75)) __PYX_ERR(2, 287, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__75);
-  __Pyx_GIVEREF(__pyx_tuple__75);
-
-  /* "View.MemoryView":288
- * cdef generic = Enum("<strided and direct or indirect>")
- * cdef strided = Enum("<strided and direct>") # default
- * cdef indirect = Enum("<strided and indirect>")             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_tuple__76 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__76)) __PYX_ERR(2, 288, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__76);
-  __Pyx_GIVEREF(__pyx_tuple__76);
-
-  /* "View.MemoryView":291
- * 
- * 
- * cdef contiguous = Enum("<contiguous and direct>")             # <<<<<<<<<<<<<<
- * cdef indirect_contiguous = Enum("<contiguous and indirect>")
- * 
- */
-  __pyx_tuple__77 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__77)) __PYX_ERR(2, 291, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__77);
-  __Pyx_GIVEREF(__pyx_tuple__77);
-
-  /* "View.MemoryView":292
- * 
- * cdef contiguous = Enum("<contiguous and direct>")
- * cdef indirect_contiguous = Enum("<contiguous and indirect>")             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_tuple__78 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__78)) __PYX_ERR(2, 292, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__78);
-  __Pyx_GIVEREF(__pyx_tuple__78);
-
-  /* "(tree fragment)":1
- * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state):             # <<<<<<<<<<<<<<
- *     cdef object __pyx_PickleError
- *     cdef object __pyx_result
- */
-  __pyx_tuple__79 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__79)) __PYX_ERR(2, 1, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_tuple__79);
-  __Pyx_GIVEREF(__pyx_tuple__79);
-  __pyx_codeobj__80 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__79, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__80)) __PYX_ERR(2, 1, __pyx_L1_error)
-  __Pyx_RefNannyFinishContext();
-  return 0;
-  __pyx_L1_error:;
-  __Pyx_RefNannyFinishContext();
-  return -1;
-}
-
-static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
-  if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
-  __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
-  __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
-  __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error)
-  __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
-  return 0;
-  __pyx_L1_error:;
-  return -1;
-}
-
-static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
-static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
-static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
-static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
-static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
-static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
-static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
-
-static int __Pyx_modinit_global_init_code(void) {
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
-  /*--- Global init code ---*/
-  generic = Py_None; Py_INCREF(Py_None);
-  strided = Py_None; Py_INCREF(Py_None);
-  indirect = Py_None; Py_INCREF(Py_None);
-  contiguous = Py_None; Py_INCREF(Py_None);
-  indirect_contiguous = Py_None; Py_INCREF(Py_None);
-  __Pyx_RefNannyFinishContext();
-  return 0;
-}
-
-static int __Pyx_modinit_variable_export_code(void) {
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
-  /*--- Variable export code ---*/
-  __Pyx_RefNannyFinishContext();
-  return 0;
-}
-
-static int __Pyx_modinit_function_export_code(void) {
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
-  /*--- Function export code ---*/
-  __Pyx_RefNannyFinishContext();
-  return 0;
-}
-
-static int __Pyx_modinit_type_init_code(void) {
-  __Pyx_RefNannyDeclarations
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
-  /*--- Type init code ---*/
-  if (PyType_Ready(&__pyx_type_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator) < 0) __PYX_ERR(0, 664, __pyx_L1_error)
-  #if PY_VERSION_HEX < 0x030800B1
-  __pyx_type_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator.tp_print = 0;
-  #endif
-  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator.tp_dictoffset && __pyx_type_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator.tp_getattro == PyObject_GenericGetAttr)) {
-    __pyx_type_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
-  }
-  __pyx_ptype_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator = &__pyx_type_6madmom_2ml_3hmm___pyx_scope_struct__forward_generator;
-  __pyx_vtabptr_array = &__pyx_vtable_array;
-  __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
-  if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error)
-  #if PY_VERSION_HEX < 0x030800B1
-  __pyx_type___pyx_array.tp_print = 0;
-  #endif
-  if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error)
-  if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error)
-  __pyx_array_type = &__pyx_type___pyx_array;
-  if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(2, 279, __pyx_L1_error)
-  #if PY_VERSION_HEX < 0x030800B1
-  __pyx_type___pyx_MemviewEnum.tp_print = 0;
-  #endif
-  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) {
-    __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr;
-  }
-  if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(2, 279, __pyx_L1_error)
-  __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
-  __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
-  __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
-  __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
-  __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
-  __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
-  __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
-  __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
-  __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
-  if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error)
-  #if PY_VERSION_HEX < 0x030800B1
-  __pyx_type___pyx_memoryview.tp_print = 0;
-  #endif
-  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) {
-    __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr;
-  }
-  if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error)
-  if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error)
-  __pyx_memoryview_type = &__pyx_type___pyx_memoryview;
-  __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
-  __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
-  __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
-  __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
-  __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
-  if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error)
-  #if PY_VERSION_HEX < 0x030800B1
-  __pyx_type___pyx_memoryviewslice.tp_print = 0;
-  #endif
-  if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) {
-    __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr;
-  }
-  if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error)
-  if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error)
-  __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
-  __Pyx_RefNannyFinishContext();
-  return 0;
-  __pyx_L1_error:;
-  __Pyx_RefNannyFinishContext();
-  return -1;
-}
-
-static int __Pyx_modinit_type_import_code(void) {
-  __Pyx_RefNannyDeclarations
-  PyObject *__pyx_t_1 = NULL;
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
-  /*--- Type import code ---*/
-  __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 9, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", 
-  #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
-  sizeof(PyTypeObject),
-  #else
-  sizeof(PyHeapTypeObject),
-  #endif
-  __Pyx_ImportType_CheckSize_Warn);
-   if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(3, 9, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 200, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore);
-   if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 200, __pyx_L1_error)
-  __pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Ignore);
-   if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 223, __pyx_L1_error)
-  __pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Ignore);
-   if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 227, __pyx_L1_error)
-  __pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore);
-   if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 239, __pyx_L1_error)
-  __pyx_ptype_5numpy_generic = __Pyx_ImportType(__pyx_t_1, "numpy", "generic", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
-   if (!__pyx_ptype_5numpy_generic) __PYX_ERR(1, 771, __pyx_L1_error)
-  __pyx_ptype_5numpy_number = __Pyx_ImportType(__pyx_t_1, "numpy", "number", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
-   if (!__pyx_ptype_5numpy_number) __PYX_ERR(1, 773, __pyx_L1_error)
-  __pyx_ptype_5numpy_integer = __Pyx_ImportType(__pyx_t_1, "numpy", "integer", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
-   if (!__pyx_ptype_5numpy_integer) __PYX_ERR(1, 775, __pyx_L1_error)
-  __pyx_ptype_5numpy_signedinteger = __Pyx_ImportType(__pyx_t_1, "numpy", "signedinteger", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
-   if (!__pyx_ptype_5numpy_signedinteger) __PYX_ERR(1, 777, __pyx_L1_error)
-  __pyx_ptype_5numpy_unsignedinteger = __Pyx_ImportType(__pyx_t_1, "numpy", "unsignedinteger", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
-   if (!__pyx_ptype_5numpy_unsignedinteger) __PYX_ERR(1, 779, __pyx_L1_error)
-  __pyx_ptype_5numpy_inexact = __Pyx_ImportType(__pyx_t_1, "numpy", "inexact", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
-   if (!__pyx_ptype_5numpy_inexact) __PYX_ERR(1, 781, __pyx_L1_error)
-  __pyx_ptype_5numpy_floating = __Pyx_ImportType(__pyx_t_1, "numpy", "floating", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
-   if (!__pyx_ptype_5numpy_floating) __PYX_ERR(1, 783, __pyx_L1_error)
-  __pyx_ptype_5numpy_complexfloating = __Pyx_ImportType(__pyx_t_1, "numpy", "complexfloating", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
-   if (!__pyx_ptype_5numpy_complexfloating) __PYX_ERR(1, 785, __pyx_L1_error)
-  __pyx_ptype_5numpy_flexible = __Pyx_ImportType(__pyx_t_1, "numpy", "flexible", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
-   if (!__pyx_ptype_5numpy_flexible) __PYX_ERR(1, 787, __pyx_L1_error)
-  __pyx_ptype_5numpy_character = __Pyx_ImportType(__pyx_t_1, "numpy", "character", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
-   if (!__pyx_ptype_5numpy_character) __PYX_ERR(1, 789, __pyx_L1_error)
-  __pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Ignore);
-   if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 827, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_RefNannyFinishContext();
-  return 0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_RefNannyFinishContext();
-  return -1;
-}
-
-static int __Pyx_modinit_variable_import_code(void) {
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
-  /*--- Variable import code ---*/
-  __Pyx_RefNannyFinishContext();
-  return 0;
-}
-
-static int __Pyx_modinit_function_import_code(void) {
-  __Pyx_RefNannyDeclarations
-  __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
-  /*--- Function import code ---*/
-  __Pyx_RefNannyFinishContext();
-  return 0;
-}
-
-
-#ifndef CYTHON_NO_PYINIT_EXPORT
-#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
-#elif PY_MAJOR_VERSION < 3
-#ifdef __cplusplus
-#define __Pyx_PyMODINIT_FUNC extern "C" void
-#else
-#define __Pyx_PyMODINIT_FUNC void
-#endif
-#else
-#ifdef __cplusplus
-#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
-#else
-#define __Pyx_PyMODINIT_FUNC PyObject *
-#endif
-#endif
-
-
-#if PY_MAJOR_VERSION < 3
-__Pyx_PyMODINIT_FUNC inithmm(void) CYTHON_SMALL_CODE; /*proto*/
-__Pyx_PyMODINIT_FUNC inithmm(void)
-#else
-__Pyx_PyMODINIT_FUNC PyInit_hmm(void) CYTHON_SMALL_CODE; /*proto*/
-__Pyx_PyMODINIT_FUNC PyInit_hmm(void)
-#if CYTHON_PEP489_MULTI_PHASE_INIT
-{
-  return PyModuleDef_Init(&__pyx_moduledef);
-}
-static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
-    #if PY_VERSION_HEX >= 0x030700A1
-    static PY_INT64_T main_interpreter_id = -1;
-    PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
-    if (main_interpreter_id == -1) {
-        main_interpreter_id = current_id;
-        return (unlikely(current_id == -1)) ? -1 : 0;
-    } else if (unlikely(main_interpreter_id != current_id))
-    #else
-    static PyInterpreterState *main_interpreter = NULL;
-    PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
-    if (!main_interpreter) {
-        main_interpreter = current_interpreter;
-    } else if (unlikely(main_interpreter != current_interpreter))
-    #endif
-    {
-        PyErr_SetString(
-            PyExc_ImportError,
-            "Interpreter change detected - this module can only be loaded into one interpreter per process.");
-        return -1;
-    }
-    return 0;
-}
-static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
-    PyObject *value = PyObject_GetAttrString(spec, from_name);
-    int result = 0;
-    if (likely(value)) {
-        if (allow_none || value != Py_None) {
-            result = PyDict_SetItemString(moddict, to_name, value);
-        }
-        Py_DECREF(value);
-    } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
-        PyErr_Clear();
-    } else {
-        result = -1;
-    }
-    return result;
-}
-static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
-    PyObject *module = NULL, *moddict, *modname;
-    if (__Pyx_check_single_interpreter())
-        return NULL;
-    if (__pyx_m)
-        return __Pyx_NewRef(__pyx_m);
-    modname = PyObject_GetAttrString(spec, "name");
-    if (unlikely(!modname)) goto bad;
-    module = PyModule_NewObject(modname);
-    Py_DECREF(modname);
-    if (unlikely(!module)) goto bad;
-    moddict = PyModule_GetDict(module);
-    if (unlikely(!moddict)) goto bad;
-    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
-    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
-    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
-    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
-    return module;
-bad:
-    Py_XDECREF(module);
-    return NULL;
-}
-
-
-static CYTHON_SMALL_CODE int __pyx_pymod_exec_hmm(PyObject *__pyx_pyinit_module)
-#endif
-#endif
-{
-  PyObject *__pyx_t_1 = NULL;
-  PyObject *__pyx_t_2 = NULL;
-  PyObject *__pyx_t_3 = NULL;
-  PyObject *__pyx_t_4 = NULL;
-  static PyThread_type_lock __pyx_t_5[8];
-  int __pyx_lineno = 0;
-  const char *__pyx_filename = NULL;
-  int __pyx_clineno = 0;
-  __Pyx_RefNannyDeclarations
-  #if CYTHON_PEP489_MULTI_PHASE_INIT
-  if (__pyx_m) {
-    if (__pyx_m == __pyx_pyinit_module) return 0;
-    PyErr_SetString(PyExc_RuntimeError, "Module 'hmm' has already been imported. Re-initialisation is not supported.");
-    return -1;
-  }
-  #elif PY_MAJOR_VERSION >= 3
-  if (__pyx_m) return __Pyx_NewRef(__pyx_m);
-  #endif
-  #if CYTHON_REFNANNY
-__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
-if (!__Pyx_RefNanny) {
-  PyErr_Clear();
-  __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
-  if (!__Pyx_RefNanny)
-      Py_FatalError("failed to import 'refnanny' module");
-}
-#endif
-  __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_hmm(void)", 0);
-  if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  #ifdef __Pxy_PyFrame_Initialize_Offsets
-  __Pxy_PyFrame_Initialize_Offsets();
-  #endif
-  __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
-  __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
-  __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
-  #ifdef __Pyx_CyFunction_USED
-  if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  #endif
-  #ifdef __Pyx_FusedFunction_USED
-  if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  #endif
-  #ifdef __Pyx_Coroutine_USED
-  if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  #endif
-  #ifdef __Pyx_Generator_USED
-  if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  #endif
-  #ifdef __Pyx_AsyncGen_USED
-  if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  #endif
-  #ifdef __Pyx_StopAsyncIteration_USED
-  if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  #endif
-  /*--- Library function declarations ---*/
-  /*--- Threads initialization code ---*/
-  #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
-  #ifdef WITH_THREAD /* Python build with threading support? */
-  PyEval_InitThreads();
-  #endif
-  #endif
-  /*--- Module creation code ---*/
-  #if CYTHON_PEP489_MULTI_PHASE_INIT
-  __pyx_m = __pyx_pyinit_module;
-  Py_INCREF(__pyx_m);
-  #else
-  #if PY_MAJOR_VERSION < 3
-  __pyx_m = Py_InitModule4("hmm", __pyx_methods, __pyx_k_This_module_contains_Hidden_Mar, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
-  #else
-  __pyx_m = PyModule_Create(&__pyx_moduledef);
-  #endif
-  if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
-  #endif
-  __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
-  Py_INCREF(__pyx_d);
-  __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
-  Py_INCREF(__pyx_b);
-  __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
-  Py_INCREF(__pyx_cython_runtime);
-  if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
-  /*--- Initialize various global constants etc. ---*/
-  if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
-  if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  #endif
-  if (__pyx_module_is_main_madmom__ml__hmm) {
-    if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  }
-  #if PY_MAJOR_VERSION >= 3
-  {
-    PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
-    if (!PyDict_GetItemString(modules, "madmom.ml.hmm")) {
-      if (unlikely(PyDict_SetItemString(modules, "madmom.ml.hmm", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
-    }
-  }
-  #endif
-  /*--- Builtin init code ---*/
-  if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  /*--- Constants init code ---*/
-  if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  /*--- Global type/function init code ---*/
-  (void)__Pyx_modinit_global_init_code();
-  (void)__Pyx_modinit_variable_export_code();
-  (void)__Pyx_modinit_function_export_code();
-  if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
-  if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
-  (void)__Pyx_modinit_variable_import_code();
-  (void)__Pyx_modinit_function_import_code();
-  /*--- Execution code ---*/
-  #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
-  if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  #endif
-
-  /* "madmom/ml/hmm.pyx":19
- * from __future__ import absolute_import, division, print_function
- * 
- * import warnings             # <<<<<<<<<<<<<<
- * import numpy as np
- * 
- */
-  __pyx_t_1 = __Pyx_Import(__pyx_n_s_warnings, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_warnings, __pyx_t_1) < 0) __PYX_ERR(0, 19, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":20
- * 
- * import warnings
- * import numpy as np             # <<<<<<<<<<<<<<
- * 
- * cimport numpy as np
- */
-  __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 20, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":31
- * 
- * 
- * class TransitionModel(object):             # <<<<<<<<<<<<<<
- *     """
- *     Transition model class for a HMM.
- */
-  __pyx_t_1 = __Pyx_CalculateMetaclass(NULL, __pyx_tuple__28); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 31, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_Py3MetaclassPrepare(__pyx_t_1, __pyx_tuple__28, __pyx_n_s_TransitionModel, __pyx_n_s_TransitionModel, (PyObject *) NULL, __pyx_n_s_madmom_ml_hmm, __pyx_kp_s_Transition_model_class_for_a_HM); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 31, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-
-  /* "madmom/ml/hmm.pyx":80
- *     """
- * 
- *     def __init__(self, states, pointers, probabilities):             # <<<<<<<<<<<<<<
- *         # save the parameters
- *         self.states = states
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_1__init__, 0, __pyx_n_s_TransitionModel___init, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__30)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 80, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_init, __pyx_t_3) < 0) __PYX_ERR(0, 80, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":87
- * 
- *     @property
- *     def num_states(self):             # <<<<<<<<<<<<<<
- *         """Number of states."""
- *         return len(self.pointers) - 1
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_3num_states, 0, __pyx_n_s_TransitionModel_num_states, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__32)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 87, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-
-  /* "madmom/ml/hmm.pyx":86
- *         self.probabilities = probabilities
- * 
- *     @property             # <<<<<<<<<<<<<<
- *     def num_states(self):
- *         """Number of states."""
- */
-  __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_property, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_num_states, __pyx_t_4) < 0) __PYX_ERR(0, 87, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
-  /* "madmom/ml/hmm.pyx":92
- * 
- *     @property
- *     def num_transitions(self):             # <<<<<<<<<<<<<<
- *         """Number of transitions."""
- *         return len(self.probabilities)
- */
-  __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_5num_transitions, 0, __pyx_n_s_TransitionModel_num_transitions, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__34)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 92, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-
-  /* "madmom/ml/hmm.pyx":91
- *         return len(self.pointers) - 1
- * 
- *     @property             # <<<<<<<<<<<<<<
- *     def num_transitions(self):
- *         """Number of transitions."""
- */
-  __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_property, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 91, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_num_transitions, __pyx_t_3) < 0) __PYX_ERR(0, 92, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":97
- * 
- *     @property
- *     def log_probabilities(self):             # <<<<<<<<<<<<<<
- *         """Transition log probabilities."""
- *         return np.log(self.probabilities)
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_7log_probabilities, 0, __pyx_n_s_TransitionModel_log_probabilitie, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__36)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 97, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-
-  /* "madmom/ml/hmm.pyx":96
- *         return len(self.probabilities)
- * 
- *     @property             # <<<<<<<<<<<<<<
- *     def log_probabilities(self):
- *         """Transition log probabilities."""
- */
-  __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_property, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 96, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_log_probabilities, __pyx_t_4) < 0) __PYX_ERR(0, 97, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
-  /* "madmom/ml/hmm.pyx":102
- * 
- *     @staticmethod
- *     def make_dense(states, pointers, probabilities):             # <<<<<<<<<<<<<<
- *         """
- *         Return a dense representation of sparse transitions.
- */
-  __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_9make_dense, __Pyx_CYFUNCTION_STATICMETHOD, __pyx_n_s_TransitionModel_make_dense, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__38)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-
-  /* "madmom/ml/hmm.pyx":101
- *         return np.log(self.probabilities)
- * 
- *     @staticmethod             # <<<<<<<<<<<<<<
- *     def make_dense(states, pointers, probabilities):
- *         """
- */
-  __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_staticmethod, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 101, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_make_dense, __pyx_t_3) < 0) __PYX_ERR(0, 102, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":148
- * 
- *     @staticmethod
- *     def make_sparse(states, prev_states, probabilities):             # <<<<<<<<<<<<<<
- *         """
- *         Return a sparse representation of dense transitions.
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_11make_sparse, __Pyx_CYFUNCTION_STATICMETHOD, __pyx_n_s_TransitionModel_make_sparse, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__40)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 148, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-
-  /* "madmom/ml/hmm.pyx":147
- *         return states, prev_states, probabilities
- * 
- *     @staticmethod             # <<<<<<<<<<<<<<
- *     def make_sparse(states, prev_states, probabilities):
- *         """
- */
-  __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_staticmethod, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 147, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_make_sparse, __pyx_t_4) < 0) __PYX_ERR(0, 148, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
-  /* "madmom/ml/hmm.pyx":209
- * 
- *     @classmethod
- *     def from_dense(cls, states, prev_states, probabilities):             # <<<<<<<<<<<<<<
- *         """
- *         Instantiate a TransitionModel from dense transitions.
- */
-  __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_15TransitionModel_13from_dense, __Pyx_CYFUNCTION_CLASSMETHOD, __pyx_n_s_TransitionModel_from_dense, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__42)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 209, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-
-  /* "madmom/ml/hmm.pyx":208
- *         return states, pointers, probabilities
- * 
- *     @classmethod             # <<<<<<<<<<<<<<
- *     def from_dense(cls, states, prev_states, probabilities):
- *         """
- */
-  __pyx_t_3 = __Pyx_Method_ClassMethod(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 208, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_from_dense, __pyx_t_3) < 0) __PYX_ERR(0, 209, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":31
- * 
- * 
- * class TransitionModel(object):             # <<<<<<<<<<<<<<
- *     """
- *     Transition model class for a HMM.
- */
-  __pyx_t_3 = __Pyx_Py3ClassCreate(__pyx_t_1, __pyx_n_s_TransitionModel, __pyx_tuple__28, __pyx_t_2, NULL, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 31, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_TransitionModel, __pyx_t_3) < 0) __PYX_ERR(0, 31, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":234
- * 
- * 
- * class ObservationModel(object):             # <<<<<<<<<<<<<<
- *     """
- *     Observation model class for a HMM.
- */
-  __pyx_t_1 = __Pyx_CalculateMetaclass(NULL, __pyx_tuple__43); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 234, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_Py3MetaclassPrepare(__pyx_t_1, __pyx_tuple__43, __pyx_n_s_ObservationModel, __pyx_n_s_ObservationModel, (PyObject *) NULL, __pyx_n_s_madmom_ml_hmm, __pyx_kp_s_Observation_model_class_for_a_H); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 234, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-
-  /* "madmom/ml/hmm.pyx":258
- *     """
- * 
- *     def __init__(self, pointers):             # <<<<<<<<<<<<<<
- *         # save parameters
- *         self.pointers = pointers
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_16ObservationModel_1__init__, 0, __pyx_n_s_ObservationModel___init, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__45)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 258, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_init, __pyx_t_3) < 0) __PYX_ERR(0, 258, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":262
- *         self.pointers = pointers
- * 
- *     def log_densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Log densities (or probabilities) of the observations for each state.
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_16ObservationModel_3log_densities, 0, __pyx_n_s_ObservationModel_log_densities, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__47)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 262, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_log_densities, __pyx_t_3) < 0) __PYX_ERR(0, 262, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":282
- *         raise NotImplementedError('must be implemented by subclass')
- * 
- *     def densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Densities (or probabilities) of the observations for each state.
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_16ObservationModel_5densities, 0, __pyx_n_s_ObservationModel_densities, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__49)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 282, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_densities, __pyx_t_3) < 0) __PYX_ERR(0, 282, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":234
- * 
- * 
- * class ObservationModel(object):             # <<<<<<<<<<<<<<
- *     """
- *     Observation model class for a HMM.
- */
-  __pyx_t_3 = __Pyx_Py3ClassCreate(__pyx_t_1, __pyx_n_s_ObservationModel, __pyx_tuple__43, __pyx_t_2, NULL, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 234, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_ObservationModel, __pyx_t_3) < 0) __PYX_ERR(0, 234, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
-  /* "madmom/ml/hmm.pyx":306
- * 
- * 
- * class DiscreteObservationModel(ObservationModel):             # <<<<<<<<<<<<<<
- *     """
- *     Simple discrete observation model that takes an observation matrix of the
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_ObservationModel); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 306, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 306, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_GIVEREF(__pyx_t_1);
-  PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
-  __pyx_t_1 = 0;
-  __pyx_t_1 = __Pyx_CalculateMetaclass(NULL, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 306, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_3 = __Pyx_Py3MetaclassPrepare(__pyx_t_1, __pyx_t_2, __pyx_n_s_DiscreteObservationModel, __pyx_n_s_DiscreteObservationModel, (PyObject *) NULL, __pyx_n_s_madmom_ml_hmm, __pyx_kp_s_Simple_discrete_observation_mod); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 306, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-
-  /* "madmom/ml/hmm.pyx":339
- *     """
- * 
- *     def __init__(self, observation_probabilities):             # <<<<<<<<<<<<<<
- *         # check that it is a probability distribution
- *         if not np.allclose(observation_probabilities.sum(axis=1), 1):
- */
-  __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_24DiscreteObservationModel_1__init__, 0, __pyx_n_s_DiscreteObservationModel___init, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__51)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 339, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_init, __pyx_t_4) < 0) __PYX_ERR(0, 339, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
-  /* "madmom/ml/hmm.pyx":349
- *         self.observation_probabilities = observation_probabilities
- * 
- *     def densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Densities of the observations.
- */
-  __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_24DiscreteObservationModel_3densities, 0, __pyx_n_s_DiscreteObservationModel_densiti, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__53)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 349, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_densities, __pyx_t_4) < 0) __PYX_ERR(0, 349, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
-  /* "madmom/ml/hmm.pyx":366
- *         return self.observation_probabilities[:, observations].T
- * 
- *     def log_densities(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Log densities of the observations.
- */
-  __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_24DiscreteObservationModel_5log_densities, 0, __pyx_n_s_DiscreteObservationModel_log_den, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__55)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 366, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_log_densities, __pyx_t_4) < 0) __PYX_ERR(0, 366, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
-  /* "madmom/ml/hmm.pyx":306
- * 
- * 
- * class DiscreteObservationModel(ObservationModel):             # <<<<<<<<<<<<<<
- *     """
- *     Simple discrete observation model that takes an observation matrix of the
- */
-  __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_1, __pyx_n_s_DiscreteObservationModel, __pyx_t_2, __pyx_t_3, NULL, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 306, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_4);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_DiscreteObservationModel, __pyx_t_4) < 0) __PYX_ERR(0, 306, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
-  /* "madmom/ml/hmm.pyx":384
- * 
- * 
- * class HiddenMarkovModel(object):             # <<<<<<<<<<<<<<
- *     """
- *     Hidden Markov Model
- */
-  __pyx_t_2 = __Pyx_CalculateMetaclass(NULL, __pyx_tuple__56); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 384, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_1 = __Pyx_Py3MetaclassPrepare(__pyx_t_2, __pyx_tuple__56, __pyx_n_s_HiddenMarkovModel, __pyx_n_s_HiddenMarkovModel, (PyObject *) NULL, __pyx_n_s_madmom_ml_hmm, __pyx_kp_s_Hidden_Markov_Model_To_search_f); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 384, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_1);
-
-  /* "madmom/ml/hmm.pyx":435
- *     """
- * 
- *     def __init__(self, transition_model, observation_model,             # <<<<<<<<<<<<<<
- *                  initial_distribution=None):
- *         # save the parameters
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_1__init__, 0, __pyx_n_s_HiddenMarkovModel___init, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__58)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 435, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__59);
-  if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_init, __pyx_t_3) < 0) __PYX_ERR(0, 435, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":451
- *         self._prev = self.initial_distribution.copy()
- * 
- *     def __getstate__(self):             # <<<<<<<<<<<<<<
- *         # copy everything to a picklable object
- *         state = self.__dict__.copy()
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_3__getstate__, 0, __pyx_n_s_HiddenMarkovModel___getstate, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__61)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 451, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_getstate, __pyx_t_3) < 0) __PYX_ERR(0, 451, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":458
- *         return state
- * 
- *     def __setstate__(self, state):             # <<<<<<<<<<<<<<
- *         # restore pickled instance attributes
- *         self.__dict__.update(state)
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_5__setstate__, 0, __pyx_n_s_HiddenMarkovModel___setstate, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__63)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 458, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_setstate, __pyx_t_3) < 0) __PYX_ERR(0, 458, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":464
- *         self._prev = self.initial_distribution.copy()
- * 
- *     def reset(self, initial_distribution=None):             # <<<<<<<<<<<<<<
- *         """
- *         Reset the HMM to its initial state.
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_7reset, 0, __pyx_n_s_HiddenMarkovModel_reset, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__65)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 464, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__66);
-  if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_reset, __pyx_t_3) < 0) __PYX_ERR(0, 464, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":480
- *     @cython.boundscheck(False)
- *     @cython.wraparound(False)
- *     def viterbi(self, observations):             # <<<<<<<<<<<<<<
- *         """
- *         Determine the best path with the Viterbi algorithm.
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_9viterbi, 0, __pyx_n_s_HiddenMarkovModel_viterbi, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__68)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 480, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_viterbi, __pyx_t_3) < 0) __PYX_ERR(0, 480, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":590
- *     @cython.wraparound(False)
- *     @cython.initializedcheck(False)
- *     def forward(self, observations, reset=True):             # <<<<<<<<<<<<<<
- *         """
- *         Compute the forward variables at each time step. Instead of computing
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_11forward, 0, __pyx_n_s_HiddenMarkovModel_forward, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__70)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 590, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__71);
-  if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_forward, __pyx_t_3) < 0) __PYX_ERR(0, 590, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":664
- *     @cython.wraparound(False)
- *     @cython.initializedcheck(False)
- *     def forward_generator(self, observations, block_size=None):             # <<<<<<<<<<<<<<
- *         """
- *         Compute the forward variables at each time step. Instead of computing
- */
-  __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6madmom_2ml_3hmm_17HiddenMarkovModel_13forward_generator, 0, __pyx_n_s_HiddenMarkovModel_forward_genera, NULL, __pyx_n_s_madmom_ml_hmm, __pyx_d, ((PyObject *)__pyx_codeobj__8)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 664, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__73);
-  if (__Pyx_SetNameInClass(__pyx_t_1, __pyx_n_s_forward_generator, __pyx_t_3) < 0) __PYX_ERR(0, 664, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
-  /* "madmom/ml/hmm.pyx":384
- * 
- * 
- * class HiddenMarkovModel(object):             # <<<<<<<<<<<<<<
- *     """
- *     Hidden Markov Model
- */
-  __pyx_t_3 = __Pyx_Py3ClassCreate(__pyx_t_2, __pyx_n_s_HiddenMarkovModel, __pyx_tuple__56, __pyx_t_1, NULL, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 384, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_3);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_HiddenMarkovModel, __pyx_t_3) < 0) __PYX_ERR(0, 384, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
-  /* "madmom/ml/hmm.pyx":752
- * 
- * # alias
- * HMM = HiddenMarkovModel             # <<<<<<<<<<<<<<
- */
-  __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_HiddenMarkovModel); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 752, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_HMM, __pyx_t_2) < 0) __PYX_ERR(0, 752, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
-  /* "madmom/ml/hmm.pyx":1
- * # encoding: utf-8             # <<<<<<<<<<<<<<
- * # cython: embedsignature=True
- * """
- */
-  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
-  /* "View.MemoryView":209
- *         info.obj = self
- * 
- *     __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")             # <<<<<<<<<<<<<<
- * 
- *     def __dealloc__(array self):
- */
-  __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 209, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(2, 209, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  PyType_Modified(__pyx_array_type);
-
-  /* "View.MemoryView":286
- *         return self.name
- * 
- * cdef generic = Enum("<strided and direct or indirect>")             # <<<<<<<<<<<<<<
- * cdef strided = Enum("<strided and direct>") # default
- * cdef indirect = Enum("<strided and indirect>")
- */
-  __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__74, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 286, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_XGOTREF(generic);
-  __Pyx_DECREF_SET(generic, __pyx_t_2);
-  __Pyx_GIVEREF(__pyx_t_2);
-  __pyx_t_2 = 0;
-
-  /* "View.MemoryView":287
- * 
- * cdef generic = Enum("<strided and direct or indirect>")
- * cdef strided = Enum("<strided and direct>") # default             # <<<<<<<<<<<<<<
- * cdef indirect = Enum("<strided and indirect>")
- * 
- */
-  __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__75, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 287, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_XGOTREF(strided);
-  __Pyx_DECREF_SET(strided, __pyx_t_2);
-  __Pyx_GIVEREF(__pyx_t_2);
-  __pyx_t_2 = 0;
-
-  /* "View.MemoryView":288
- * cdef generic = Enum("<strided and direct or indirect>")
- * cdef strided = Enum("<strided and direct>") # default
- * cdef indirect = Enum("<strided and indirect>")             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__76, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 288, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_XGOTREF(indirect);
-  __Pyx_DECREF_SET(indirect, __pyx_t_2);
-  __Pyx_GIVEREF(__pyx_t_2);
-  __pyx_t_2 = 0;
-
-  /* "View.MemoryView":291
- * 
- * 
- * cdef contiguous = Enum("<contiguous and direct>")             # <<<<<<<<<<<<<<
- * cdef indirect_contiguous = Enum("<contiguous and indirect>")
- * 
- */
-  __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__77, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 291, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_XGOTREF(contiguous);
-  __Pyx_DECREF_SET(contiguous, __pyx_t_2);
-  __Pyx_GIVEREF(__pyx_t_2);
-  __pyx_t_2 = 0;
-
-  /* "View.MemoryView":292
- * 
- * cdef contiguous = Enum("<contiguous and direct>")
- * cdef indirect_contiguous = Enum("<contiguous and indirect>")             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__78, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 292, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  __Pyx_XGOTREF(indirect_contiguous);
-  __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_2);
-  __Pyx_GIVEREF(__pyx_t_2);
-  __pyx_t_2 = 0;
-
-  /* "View.MemoryView":316
- * 
- * DEF THREAD_LOCKS_PREALLOCATED = 8
- * cdef int __pyx_memoryview_thread_locks_used = 0             # <<<<<<<<<<<<<<
- * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
- *     PyThread_allocate_lock(),
- */
-  __pyx_memoryview_thread_locks_used = 0;
-
-  /* "View.MemoryView":317
- * DEF THREAD_LOCKS_PREALLOCATED = 8
- * cdef int __pyx_memoryview_thread_locks_used = 0
- * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [             # <<<<<<<<<<<<<<
- *     PyThread_allocate_lock(),
- *     PyThread_allocate_lock(),
- */
-  __pyx_t_5[0] = PyThread_allocate_lock();
-  __pyx_t_5[1] = PyThread_allocate_lock();
-  __pyx_t_5[2] = PyThread_allocate_lock();
-  __pyx_t_5[3] = PyThread_allocate_lock();
-  __pyx_t_5[4] = PyThread_allocate_lock();
-  __pyx_t_5[5] = PyThread_allocate_lock();
-  __pyx_t_5[6] = PyThread_allocate_lock();
-  __pyx_t_5[7] = PyThread_allocate_lock();
-  memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_5, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
-
-  /* "View.MemoryView":549
- *         info.obj = self
- * 
- *     __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 549, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(2, 549, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  PyType_Modified(__pyx_memoryview_type);
-
-  /* "View.MemoryView":995
- *         return self.from_object
- * 
- *     __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")             # <<<<<<<<<<<<<<
- * 
- * 
- */
-  __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 995, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(2, 995, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  PyType_Modified(__pyx_memoryviewslice_type);
-
-  /* "(tree fragment)":1
- * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state):             # <<<<<<<<<<<<<<
- *     cdef object __pyx_PickleError
- *     cdef object __pyx_result
- */
-  __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1, __pyx_L1_error)
-  __Pyx_GOTREF(__pyx_t_2);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_2) < 0) __PYX_ERR(2, 1, __pyx_L1_error)
-  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
-  /* "(tree fragment)":11
- *         __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
- *     return __pyx_result
- * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):             # <<<<<<<<<<<<<<
- *     __pyx_result.name = __pyx_state[0]
- *     if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
- */
-
-  /*--- Wrapped vars code ---*/
-
-  goto __pyx_L0;
-  __pyx_L1_error:;
-  __Pyx_XDECREF(__pyx_t_1);
-  __Pyx_XDECREF(__pyx_t_2);
-  __Pyx_XDECREF(__pyx_t_3);
-  __Pyx_XDECREF(__pyx_t_4);
-  if (__pyx_m) {
-    if (__pyx_d) {
-      __Pyx_AddTraceback("init madmom.ml.hmm", __pyx_clineno, __pyx_lineno, __pyx_filename);
-    }
-    Py_CLEAR(__pyx_m);
-  } else if (!PyErr_Occurred()) {
-    PyErr_SetString(PyExc_ImportError, "init madmom.ml.hmm");
-  }
-  __pyx_L0:;
-  __Pyx_RefNannyFinishContext();
-  #if CYTHON_PEP489_MULTI_PHASE_INIT
-  return (__pyx_m != NULL) ? 0 : -1;
-  #elif PY_MAJOR_VERSION >= 3
-  return __pyx_m;
-  #else
-  return;
-  #endif
-}
-
-/* --- Runtime support code --- */
-/* Refnanny */
-#if CYTHON_REFNANNY
-static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
-    PyObject *m = NULL, *p = NULL;
-    void *r = NULL;
-    m = PyImport_ImportModule(modname);
-    if (!m) goto end;
-    p = PyObject_GetAttrString(m, "RefNannyAPI");
-    if (!p) goto end;
-    r = PyLong_AsVoidPtr(p);
-end:
-    Py_XDECREF(p);
-    Py_XDECREF(m);
-    return (__Pyx_RefNannyAPIStruct *)r;
-}
-#endif
-
-/* PyObjectGetAttrStr */
-#if CYTHON_USE_TYPE_SLOTS
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
-    PyTypeObject* tp = Py_TYPE(obj);
-    if (likely(tp->tp_getattro))
-        return tp->tp_getattro(obj, attr_name);
-#if PY_MAJOR_VERSION < 3
-    if (likely(tp->tp_getattr))
-        return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
-#endif
-    return PyObject_GetAttr(obj, attr_name);
-}
-#endif
-
-/* GetBuiltinName */
-static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
-    PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
-    if (unlikely(!result)) {
-        PyErr_Format(PyExc_NameError,
-#if PY_MAJOR_VERSION >= 3
-            "name '%U' is not defined", name);
-#else
-            "name '%.200s' is not defined", PyString_AS_STRING(name));
-#endif
-    }
-    return result;
-}
-
-/* RaiseArgTupleInvalid */
-static void __Pyx_RaiseArgtupleInvalid(
-    const char* func_name,
-    int exact,
-    Py_ssize_t num_min,
-    Py_ssize_t num_max,
-    Py_ssize_t num_found)
-{
-    Py_ssize_t num_expected;
-    const char *more_or_less;
-    if (num_found < num_min) {
-        num_expected = num_min;
-        more_or_less = "at least";
-    } else {
-        num_expected = num_max;
-        more_or_less = "at most";
-    }
-    if (exact) {
-        more_or_less = "exactly";
-    }
-    PyErr_Format(PyExc_TypeError,
-                 "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
-                 func_name, more_or_less, num_expected,
-                 (num_expected == 1) ? "" : "s", num_found);
-}
-
-/* RaiseDoubleKeywords */
-static void __Pyx_RaiseDoubleKeywordsError(
-    const char* func_name,
-    PyObject* kw_name)
-{
-    PyErr_Format(PyExc_TypeError,
-        #if PY_MAJOR_VERSION >= 3
-        "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
-        #else
-        "%s() got multiple values for keyword argument '%s'", func_name,
-        PyString_AsString(kw_name));
-        #endif
-}
-
-/* ParseKeywords */
-static int __Pyx_ParseOptionalKeywords(
-    PyObject *kwds,
-    PyObject **argnames[],
-    PyObject *kwds2,
-    PyObject *values[],
-    Py_ssize_t num_pos_args,
-    const char* function_name)
-{
-    PyObject *key = 0, *value = 0;
-    Py_ssize_t pos = 0;
-    PyObject*** name;
-    PyObject*** first_kw_arg = argnames + num_pos_args;
-    while (PyDict_Next(kwds, &pos, &key, &value)) {
-        name = first_kw_arg;
-        while (*name && (**name != key)) name++;
-        if (*name) {
-            values[name-argnames] = value;
-            continue;
-        }
-        name = first_kw_arg;
-        #if PY_MAJOR_VERSION < 3
-        if (likely(PyString_Check(key))) {
-            while (*name) {
-                if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
-                        && _PyString_Eq(**name, key)) {
-                    values[name-argnames] = value;
-                    break;
-                }
-                name++;
-            }
-            if (*name) continue;
-            else {
-                PyObject*** argname = argnames;
-                while (argname != first_kw_arg) {
-                    if ((**argname == key) || (
-                            (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
-                             && _PyString_Eq(**argname, key))) {
-                        goto arg_passed_twice;
-                    }
-                    argname++;
-                }
-            }
-        } else
-        #endif
-        if (likely(PyUnicode_Check(key))) {
-            while (*name) {
-                int cmp = (**name == key) ? 0 :
-                #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
-                    (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
-                #endif
-                    PyUnicode_Compare(**name, key);
-                if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
-                if (cmp == 0) {
-                    values[name-argnames] = value;
-                    break;
-                }
-                name++;
-            }
-            if (*name) continue;
-            else {
-                PyObject*** argname = argnames;
-                while (argname != first_kw_arg) {
-                    int cmp = (**argname == key) ? 0 :
-                    #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
-                        (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
-                    #endif
-                        PyUnicode_Compare(**argname, key);
-                    if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
-                    if (cmp == 0) goto arg_passed_twice;
-                    argname++;
-                }
-            }
-        } else
-            goto invalid_keyword_type;
-        if (kwds2) {
-            if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
-        } else {
-            goto invalid_keyword;
-        }
-    }
-    return 0;
-arg_passed_twice:
-    __Pyx_RaiseDoubleKeywordsError(function_name, key);
-    goto bad;
-invalid_keyword_type:
-    PyErr_Format(PyExc_TypeError,
-        "%.200s() keywords must be strings", function_name);
-    goto bad;
-invalid_keyword:
-    PyErr_Format(PyExc_TypeError,
-    #if PY_MAJOR_VERSION < 3
-        "%.200s() got an unexpected keyword argument '%.200s'",
-        function_name, PyString_AsString(key));
-    #else
-        "%s() got an unexpected keyword argument '%U'",
-        function_name, key);
-    #endif
-bad:
-    return -1;
-}
-
-/* PyObjectSetAttrStr */
-#if CYTHON_USE_TYPE_SLOTS
-static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) {
-    PyTypeObject* tp = Py_TYPE(obj);
-    if (likely(tp->tp_setattro))
-        return tp->tp_setattro(obj, attr_name, value);
-#if PY_MAJOR_VERSION < 3
-    if (likely(tp->tp_setattr))
-        return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value);
-#endif
-    return PyObject_SetAttr(obj, attr_name, value);
-}
-#endif
-
-/* PyDictVersioning */
-#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
-static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
-    PyObject *dict = Py_TYPE(obj)->tp_dict;
-    return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
-}
-static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
-    PyObject **dictptr = NULL;
-    Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
-    if (offset) {
-#if CYTHON_COMPILING_IN_CPYTHON
-        dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
-#else
-        dictptr = _PyObject_GetDictPtr(obj);
-#endif
-    }
-    return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
-}
-static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
-    PyObject *dict = Py_TYPE(obj)->tp_dict;
-    if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
-        return 0;
-    return obj_dict_version == __Pyx_get_object_dict_version(obj);
-}
-#endif
-
-/* GetModuleGlobalName */
-#if CYTHON_USE_DICT_VERSIONS
-static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
-#else
-static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
-#endif
-{
-    PyObject *result;
-#if !CYTHON_AVOID_BORROWED_REFS
-#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
-    result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
-    __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
-    if (likely(result)) {
-        return __Pyx_NewRef(result);
-    } else if (unlikely(PyErr_Occurred())) {
-        return NULL;
-    }
-#else
-    result = PyDict_GetItem(__pyx_d, name);
-    __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
-    if (likely(result)) {
-        return __Pyx_NewRef(result);
-    }
-#endif
-#else
-    result = PyObject_GetItem(__pyx_d, name);
-    __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
-    if (likely(result)) {
-        return __Pyx_NewRef(result);
-    }
-    PyErr_Clear();
-#endif
-    return __Pyx_GetBuiltinName(name);
-}
-
-/* PyCFunctionFastCall */
-#if CYTHON_FAST_PYCCALL
-static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
-    PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
-    PyCFunction meth = PyCFunction_GET_FUNCTION(func);
-    PyObject *self = PyCFunction_GET_SELF(func);
-    int flags = PyCFunction_GET_FLAGS(func);
-    assert(PyCFunction_Check(func));
-    assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
-    assert(nargs >= 0);
-    assert(nargs == 0 || args != NULL);
-    /* _PyCFunction_FastCallDict() must not be called with an exception set,
-       because it may clear it (directly or indirectly) and so the
-       caller loses its exception */
-    assert(!PyErr_Occurred());
-    if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
-        return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
-    } else {
-        return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
-    }
-}
-#endif
-
-/* PyFunctionFastCall */
-#if CYTHON_FAST_PYCALL
-static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
-                                               PyObject *globals) {
-    PyFrameObject *f;
-    PyThreadState *tstate = __Pyx_PyThreadState_Current;
-    PyObject **fastlocals;
-    Py_ssize_t i;
-    PyObject *result;
-    assert(globals != NULL);
-    /* XXX Perhaps we should create a specialized
-       PyFrame_New() that doesn't take locals, but does
-       take builtins without sanity checking them.
-       */
-    assert(tstate != NULL);
-    f = PyFrame_New(tstate, co, globals, NULL);
-    if (f == NULL) {
-        return NULL;
-    }
-    fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
-    for (i = 0; i < na; i++) {
-        Py_INCREF(*args);
-        fastlocals[i] = *args++;
-    }
-    result = PyEval_EvalFrameEx(f,0);
-    ++tstate->recursion_depth;
-    Py_DECREF(f);
-    --tstate->recursion_depth;
-    return result;
-}
-#if 1 || PY_VERSION_HEX < 0x030600B1
-static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
-    PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
-    PyObject *globals = PyFunction_GET_GLOBALS(func);
-    PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
-    PyObject *closure;
-#if PY_MAJOR_VERSION >= 3
-    PyObject *kwdefs;
-#endif
-    PyObject *kwtuple, **k;
-    PyObject **d;
-    Py_ssize_t nd;
-    Py_ssize_t nk;
-    PyObject *result;
-    assert(kwargs == NULL || PyDict_Check(kwargs));
-    nk = kwargs ? PyDict_Size(kwargs) : 0;
-    if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
-        return NULL;
-    }
-    if (
-#if PY_MAJOR_VERSION >= 3
-            co->co_kwonlyargcount == 0 &&
-#endif
-            likely(kwargs == NULL || nk == 0) &&
-            co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
-        if (argdefs == NULL && co->co_argcount == nargs) {
-            result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
-            goto done;
-        }
-        else if (nargs == 0 && argdefs != NULL
-                 && co->co_argcount == Py_SIZE(argdefs)) {
-            /* function called with no arguments, but all parameters have
-               a default value: use default values as arguments .*/
-            args = &PyTuple_GET_ITEM(argdefs, 0);
-            result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
-            goto done;
-        }
-    }
-    if (kwargs != NULL) {
-        Py_ssize_t pos, i;
-        kwtuple = PyTuple_New(2 * nk);
-        if (kwtuple == NULL) {
-            result = NULL;
-            goto done;
-        }
-        k = &PyTuple_GET_ITEM(kwtuple, 0);
-        pos = i = 0;
-        while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
-            Py_INCREF(k[i]);
-            Py_INCREF(k[i+1]);
-            i += 2;
-        }
-        nk = i / 2;
-    }
-    else {
-        kwtuple = NULL;
-        k = NULL;
-    }
-    closure = PyFunction_GET_CLOSURE(func);
-#if PY_MAJOR_VERSION >= 3
-    kwdefs = PyFunction_GET_KW_DEFAULTS(func);
-#endif
-    if (argdefs != NULL) {
-        d = &PyTuple_GET_ITEM(argdefs, 0);
-        nd = Py_SIZE(argdefs);
-    }
-    else {
-        d = NULL;
-        nd = 0;
-    }
-#if PY_MAJOR_VERSION >= 3
-    result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
-                               args, (int)nargs,
-                               k, (int)nk,
-                               d, (int)nd, kwdefs, closure);
-#else
-    result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
-                               args, (int)nargs,
-                               k, (int)nk,
-                               d, (int)nd, closure);
-#endif
-    Py_XDECREF(kwtuple);
-done:
-    Py_LeaveRecursiveCall();
-    return result;
-}
-#endif
-#endif
-
-/* PyObjectCall */
-#if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
-    PyObject *result;
-    ternaryfunc call = func->ob_type->tp_call;
-    if (unlikely(!call))
-        return PyObject_Call(func, arg, kw);
-    if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
-        return NULL;
-    result = (*call)(func, arg, kw);
-    Py_LeaveRecursiveCall();
-    if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
-        PyErr_SetString(
-            PyExc_SystemError,
-            "NULL result without error in PyObject_Call");
-    }
-    return result;
-}
-#endif
-
-/* PyObjectCall2Args */
-static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
-    PyObject *args, *result = NULL;
-    #if CYTHON_FAST_PYCALL
-    if (PyFunction_Check(function)) {
-        PyObject *args[2] = {arg1, arg2};
-        return __Pyx_PyFunction_FastCall(function, args, 2);
-    }
-    #endif
-    #if CYTHON_FAST_PYCCALL
-    if (__Pyx_PyFastCFunction_Check(function)) {
-        PyObject *args[2] = {arg1, arg2};
-        return __Pyx_PyCFunction_FastCall(function, args, 2);
-    }
-    #endif
-    args = PyTuple_New(2);
-    if (unlikely(!args)) goto done;
-    Py_INCREF(arg1);
-    PyTuple_SET_ITEM(args, 0, arg1);
-    Py_INCREF(arg2);
-    PyTuple_SET_ITEM(args, 1, arg2);
-    Py_INCREF(function);
-    result = __Pyx_PyObject_Call(function, args, NULL);
-    Py_DECREF(args);
-    Py_DECREF(function);
-done:
-    return result;
-}
-
-/* PyObjectCallMethO */
-#if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
-    PyObject *self, *result;
-    PyCFunction cfunc;
-    cfunc = PyCFunction_GET_FUNCTION(func);
-    self = PyCFunction_GET_SELF(func);
-    if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
-        return NULL;
-    result = cfunc(self, arg);
-    Py_LeaveRecursiveCall();
-    if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
-        PyErr_SetString(
-            PyExc_SystemError,
-            "NULL result without error in PyObject_Call");
-    }
-    return result;
-}
-#endif
-
-/* PyObjectCallOneArg */
-#if CYTHON_COMPILING_IN_CPYTHON
-static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
-    PyObject *result;
-    PyObject *args = PyTuple_New(1);
-    if (unlikely(!args)) return NULL;
-    Py_INCREF(arg);
-    PyTuple_SET_ITEM(args, 0, arg);
-    result = __Pyx_PyObject_Call(func, args, NULL);
-    Py_DECREF(args);
-    return result;
-}
-static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
-#if CYTHON_FAST_PYCALL
-    if (PyFunction_Check(func)) {
-        return __Pyx_PyFunction_FastCall(func, &arg, 1);
-    }
-#endif
-    if (likely(PyCFunction_Check(func))) {
-        if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
-            return __Pyx_PyObject_CallMethO(func, arg);
-#if CYTHON_FAST_PYCCALL
-        } else if (__Pyx_PyFastCFunction_Check(func)) {
-            return __Pyx_PyCFunction_FastCall(func, &arg, 1);
-#endif
-        }
-    }
-    return __Pyx__PyObject_CallOneArg(func, arg);
-}
-#else
-static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
-    PyObject *result;
-    PyObject *args = PyTuple_Pack(1, arg);
-    if (unlikely(!args)) return NULL;
-    result = __Pyx_PyObject_Call(func, args, NULL);
-    Py_DECREF(args);
-    return result;
-}
-#endif
-
-/* Import */
-static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
-    PyObject *empty_list = 0;
-    PyObject *module = 0;
-    PyObject *global_dict = 0;
-    PyObject *empty_dict = 0;
-    PyObject *list;
-    #if PY_MAJOR_VERSION < 3
-    PyObject *py_import;
-    py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
-    if (!py_import)
-        goto bad;
-    #endif
-    if (from_list)
-        list = from_list;
-    else {
-        empty_list = PyList_New(0);
-        if (!empty_list)
-            goto bad;
-        list = empty_list;
-    }
-    global_dict = PyModule_GetDict(__pyx_m);
-    if (!global_dict)
-        goto bad;
-    empty_dict = PyDict_New();
-    if (!empty_dict)
-        goto bad;
-    {
-        #if PY_MAJOR_VERSION >= 3
-        if (level == -1) {
-            if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
-                module = PyImport_ImportModuleLevelObject(
-                    name, global_dict, empty_dict, list, 1);
-                if (!module) {
-                    if (!PyErr_ExceptionMatches(PyExc_ImportError))
-                        goto bad;
-                    PyErr_Clear();
-                }
-            }
-            level = 0;
-        }
-        #endif
-        if (!module) {
-            #if PY_MAJOR_VERSION < 3
-            PyObject *py_level = PyInt_FromLong(level);
-            if (!py_level)
-                goto bad;
-            module = PyObject_CallFunctionObjArgs(py_import,
-                name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
-            Py_DECREF(py_level);
-            #else
-            module = PyImport_ImportModuleLevelObject(
-                name, global_dict, empty_dict, list, level);
-            #endif
-        }
-    }
-bad:
-    #if PY_MAJOR_VERSION < 3
-    Py_XDECREF(py_import);
-    #endif
-    Py_XDECREF(empty_list);
-    Py_XDECREF(empty_dict);
-    return module;
-}
-
-/* ImportFrom */
-static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
-    PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
-    if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
-        PyErr_Format(PyExc_ImportError,
-        #if PY_MAJOR_VERSION < 3
-            "cannot import name %.230s", PyString_AS_STRING(name));
-        #else
-            "cannot import name %S", name);
-        #endif
-    }
-    return value;
-}
-
-/* PyObjectCallNoArg */
-#if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
-#if CYTHON_FAST_PYCALL
-    if (PyFunction_Check(func)) {
-        return __Pyx_PyFunction_FastCall(func, NULL, 0);
-    }
-#endif
-#ifdef __Pyx_CyFunction_USED
-    if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
-#else
-    if (likely(PyCFunction_Check(func)))
-#endif
-    {
-        if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
-            return __Pyx_PyObject_CallMethO(func, NULL);
-        }
-    }
-    return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
-}
-#endif
-
-/* RaiseTooManyValuesToUnpack */
-static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
-    PyErr_Format(PyExc_ValueError,
-                 "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
-}
-
-/* RaiseNeedMoreValuesToUnpack */
-static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
-    PyErr_Format(PyExc_ValueError,
-                 "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
-                 index, (index == 1) ? "" : "s");
-}
-
-/* IterFinish */
-static CYTHON_INLINE int __Pyx_IterFinish(void) {
-#if CYTHON_FAST_THREAD_STATE
-    PyThreadState *tstate = __Pyx_PyThreadState_Current;
-    PyObject* exc_type = tstate->curexc_type;
-    if (unlikely(exc_type)) {
-        if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) {
-            PyObject *exc_value, *exc_tb;
-            exc_value = tstate->curexc_value;
-            exc_tb = tstate->curexc_traceback;
-            tstate->curexc_type = 0;
-            tstate->curexc_value = 0;
-            tstate->curexc_traceback = 0;
-            Py_DECREF(exc_type);
-            Py_XDECREF(exc_value);
-            Py_XDECREF(exc_tb);
-            return 0;
-        } else {
-            return -1;
-        }
-    }
-    return 0;
-#else
-    if (unlikely(PyErr_Occurred())) {
-        if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {
-            PyErr_Clear();
-            return 0;
-        } else {
-            return -1;
-        }
-    }
-    return 0;
-#endif
-}
-
-/* UnpackItemEndCheck */
-static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
-    if (unlikely(retval)) {
-        Py_DECREF(retval);
-        __Pyx_RaiseTooManyValuesError(expected);
-        return -1;
-    } else {
-        return __Pyx_IterFinish();
-    }
-    return 0;
-}
-
-/* PyErrFetchRestore */
-#if CYTHON_FAST_THREAD_STATE
-static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
-    PyObject *tmp_type, *tmp_value, *tmp_tb;
-    tmp_type = tstate->curexc_type;
-    tmp_value = tstate->curexc_value;
-    tmp_tb = tstate->curexc_traceback;
-    tstate->curexc_type = type;
-    tstate->curexc_value = value;
-    tstate->curexc_traceback = tb;
-    Py_XDECREF(tmp_type);
-    Py_XDECREF(tmp_value);
-    Py_XDECREF(tmp_tb);
-}
-static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
-    *type = tstate->curexc_type;
-    *value = tstate->curexc_value;
-    *tb = tstate->curexc_traceback;
-    tstate->curexc_type = 0;
-    tstate->curexc_value = 0;
-    tstate->curexc_traceback = 0;
-}
-#endif
-
-/* RaiseException */
-#if PY_MAJOR_VERSION < 3
-static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
-                        CYTHON_UNUSED PyObject *cause) {
-    __Pyx_PyThreadState_declare
-    Py_XINCREF(type);
-    if (!value || value == Py_None)
-        value = NULL;
-    else
-        Py_INCREF(value);
-    if (!tb || tb == Py_None)
-        tb = NULL;
-    else {
-        Py_INCREF(tb);
-        if (!PyTraceBack_Check(tb)) {
-            PyErr_SetString(PyExc_TypeError,
-                "raise: arg 3 must be a traceback or None");
-            goto raise_error;
-        }
-    }
-    if (PyType_Check(type)) {
-#if CYTHON_COMPILING_IN_PYPY
-        if (!value) {
-            Py_INCREF(Py_None);
-            value = Py_None;
-        }
-#endif
-        PyErr_NormalizeException(&type, &value, &tb);
-    } else {
-        if (value) {
-            PyErr_SetString(PyExc_TypeError,
-                "instance exception may not have a separate value");
-            goto raise_error;
-        }
-        value = type;
-        type = (PyObject*) Py_TYPE(type);
-        Py_INCREF(type);
-        if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
-            PyErr_SetString(PyExc_TypeError,
-                "raise: exception class must be a subclass of BaseException");
-            goto raise_error;
-        }
-    }
-    __Pyx_PyThreadState_assign
-    __Pyx_ErrRestore(type, value, tb);
-    return;
-raise_error:
-    Py_XDECREF(value);
-    Py_XDECREF(type);
-    Py_XDECREF(tb);
-    return;
-}
-#else
-static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
-    PyObject* owned_instance = NULL;
-    if (tb == Py_None) {
-        tb = 0;
-    } else if (tb && !PyTraceBack_Check(tb)) {
-        PyErr_SetString(PyExc_TypeError,
-            "raise: arg 3 must be a traceback or None");
-        goto bad;
-    }
-    if (value == Py_None)
-        value = 0;
-    if (PyExceptionInstance_Check(type)) {
-        if (value) {
-            PyErr_SetString(PyExc_TypeError,
-                "instance exception may not have a separate value");
-            goto bad;
-        }
-        value = type;
-        type = (PyObject*) Py_TYPE(value);
-    } else if (PyExceptionClass_Check(type)) {
-        PyObject *instance_class = NULL;
-        if (value && PyExceptionInstance_Check(value)) {
-            instance_class = (PyObject*) Py_TYPE(value);
-            if (instance_class != type) {
-                int is_subclass = PyObject_IsSubclass(instance_class, type);
-                if (!is_subclass) {
-                    instance_class = NULL;
-                } else if (unlikely(is_subclass == -1)) {
-                    goto bad;
-                } else {
-                    type = instance_class;
-                }
-            }
-        }
-        if (!instance_class) {
-            PyObject *args;
-            if (!value)
-                args = PyTuple_New(0);
-            else if (PyTuple_Check(value)) {
-                Py_INCREF(value);
-                args = value;
-            } else
-                args = PyTuple_Pack(1, value);
-            if (!args)
-                goto bad;
-            owned_instance = PyObject_Call(type, args, NULL);
-            Py_DECREF(args);
-            if (!owned_instance)
-                goto bad;
-            value = owned_instance;
-            if (!PyExceptionInstance_Check(value)) {
-                PyErr_Format(PyExc_TypeError,
-                             "calling %R should have returned an instance of "
-                             "BaseException, not %R",
-                             type, Py_TYPE(value));
-                goto bad;
-            }
-        }
-    } else {
-        PyErr_SetString(PyExc_TypeError,
-            "raise: exception class must be a subclass of BaseException");
-        goto bad;
-    }
-    if (cause) {
-        PyObject *fixed_cause;
-        if (cause == Py_None) {
-            fixed_cause = NULL;
-        } else if (PyExceptionClass_Check(cause)) {
-            fixed_cause = PyObject_CallObject(cause, NULL);
-            if (fixed_cause == NULL)
-                goto bad;
-        } else if (PyExceptionInstance_Check(cause)) {
-            fixed_cause = cause;
-            Py_INCREF(fixed_cause);
-        } else {
-            PyErr_SetString(PyExc_TypeError,
-                            "exception causes must derive from "
-                            "BaseException");
-            goto bad;
-        }
-        PyException_SetCause(value, fixed_cause);
-    }
-    PyErr_SetObject(type, value);
-    if (tb) {
-#if CYTHON_COMPILING_IN_PYPY
-        PyObject *tmp_type, *tmp_value, *tmp_tb;
-        PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
-        Py_INCREF(tb);
-        PyErr_Restore(tmp_type, tmp_value, tb);
-        Py_XDECREF(tmp_tb);
-#else
-        PyThreadState *tstate = __Pyx_PyThreadState_Current;
-        PyObject* tmp_tb = tstate->curexc_traceback;
-        if (tb != tmp_tb) {
-            Py_INCREF(tb);
-            tstate->curexc_traceback = tb;
-            Py_XDECREF(tmp_tb);
-        }
-#endif
-    }
-bad:
-    Py_XDECREF(owned_instance);
-    return;
-}
-#endif
-
-/* PyIntBinop */
-#if !CYTHON_COMPILING_IN_PYPY
-static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
-    (void)inplace;
-    (void)zerodivision_check;
-    #if PY_MAJOR_VERSION < 3
-    if (likely(PyInt_CheckExact(op1))) {
-        const long b = intval;
-        long x;
-        long a = PyInt_AS_LONG(op1);
-            x = (long)((unsigned long)a + b);
-            if (likely((x^a) >= 0 || (x^b) >= 0))
-                return PyInt_FromLong(x);
-            return PyLong_Type.tp_as_number->nb_add(op1, op2);
-    }
-    #endif
-    #if CYTHON_USE_PYLONG_INTERNALS
-    if (likely(PyLong_CheckExact(op1))) {
-        const long b = intval;
-        long a, x;
-#ifdef HAVE_LONG_LONG
-        const PY_LONG_LONG llb = intval;
-        PY_LONG_LONG lla, llx;
-#endif
-        const digit* digits = ((PyLongObject*)op1)->ob_digit;
-        const Py_ssize_t size = Py_SIZE(op1);
-        if (likely(__Pyx_sst_abs(size) <= 1)) {
-            a = likely(size) ? digits[0] : 0;
-            if (size == -1) a = -a;
-        } else {
-            switch (size) {
-                case -2:
-                    if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
-                        a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
-                        break;
-#ifdef HAVE_LONG_LONG
-                    } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
-                        lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
-                        goto long_long;
-#endif
-                    }
-                    CYTHON_FALLTHROUGH;
-                case 2:
-                    if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
-                        a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
-                        break;
-#ifdef HAVE_LONG_LONG
-                    } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
-                        lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
-                        goto long_long;
-#endif
-                    }
-                    CYTHON_FALLTHROUGH;
-                case -3:
-                    if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
-                        a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
-                        break;
-#ifdef HAVE_LONG_LONG
-                    } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
-                        lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
-                        goto long_long;
-#endif
-                    }
-                    CYTHON_FALLTHROUGH;
-                case 3:
-                    if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
-                        a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
-                        break;
-#ifdef HAVE_LONG_LONG
-                    } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
-                        lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
-                        goto long_long;
-#endif
-                    }
-                    CYTHON_FALLTHROUGH;
-                case -4:
-                    if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
-                        a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
-                        break;
-#ifdef HAVE_LONG_LONG
-                    } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
-                        lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
-                        goto long_long;
-#endif
-                    }
-                    CYTHON_FALLTHROUGH;
-                case 4:
-                    if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
-                        a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
-                        break;
-#ifdef HAVE_LONG_LONG
-                    } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
-                        lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
-                        goto long_long;
-#endif
-                    }
-                    CYTHON_FALLTHROUGH;
-                default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
-            }
-        }
-                x = a + b;
-            return PyLong_FromLong(x);
-#ifdef HAVE_LONG_LONG
-        long_long:
-                llx = lla + llb;
-            return PyLong_FromLongLong(llx);
-#endif
-        
-        
-    }
-    #endif
-    if (PyFloat_CheckExact(op1)) {
-        const long b = intval;
-        double a = PyFloat_AS_DOUBLE(op1);
-            double result;
-            PyFPE_START_PROTECT("add", return NULL)
-            result = ((double)a) + (double)b;
-            PyFPE_END_PROTECT(result)
-            return PyFloat_FromDouble(result);
-    }
-    return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
-}
-#endif
-
-/* GetItemInt */
-static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
-    PyObject *r;
-    if (!j) return NULL;
-    r = PyObject_GetItem(o, j);
-    Py_DECREF(j);
-    return r;
-}
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
-                                                              CYTHON_NCP_UNUSED int wraparound,
-                                                              CYTHON_NCP_UNUSED int boundscheck) {
-#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-    Py_ssize_t wrapped_i = i;
-    if (wraparound & unlikely(i < 0)) {
-        wrapped_i += PyList_GET_SIZE(o);
-    }
-    if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
-        PyObject *r = PyList_GET_ITEM(o, wrapped_i);
-        Py_INCREF(r);
-        return r;
-    }
-    return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
-#else
-    return PySequence_GetItem(o, i);
-#endif
-}
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
-                                                              CYTHON_NCP_UNUSED int wraparound,
-                                                              CYTHON_NCP_UNUSED int boundscheck) {
-#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-    Py_ssize_t wrapped_i = i;
-    if (wraparound & unlikely(i < 0)) {
-        wrapped_i += PyTuple_GET_SIZE(o);
-    }
-    if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
-        PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
-        Py_INCREF(r);
-        return r;
-    }
-    return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
-#else
-    return PySequence_GetItem(o, i);
-#endif
-}
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
-                                                     CYTHON_NCP_UNUSED int wraparound,
-                                                     CYTHON_NCP_UNUSED int boundscheck) {
-#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
-    if (is_list || PyList_CheckExact(o)) {
-        Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
-        if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
-            PyObject *r = PyList_GET_ITEM(o, n);
-            Py_INCREF(r);
-            return r;
-        }
-    }
-    else if (PyTuple_CheckExact(o)) {
-        Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
-        if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
-            PyObject *r = PyTuple_GET_ITEM(o, n);
-            Py_INCREF(r);
-            return r;
-        }
-    } else {
-        PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
-        if (likely(m && m->sq_item)) {
-            if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
-                Py_ssize_t l = m->sq_length(o);
-                if (likely(l >= 0)) {
-                    i += l;
-                } else {
-                    if (!PyErr_ExceptionMatches(PyExc_OverflowError))
-                        return NULL;
-                    PyErr_Clear();
-                }
-            }
-            return m->sq_item(o, i);
-        }
-    }
-#else
-    if (is_list || PySequence_Check(o)) {
-        return PySequence_GetItem(o, i);
-    }
-#endif
-    return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
-}
-
-/* ObjectGetItem */
-#if CYTHON_USE_TYPE_SLOTS
-static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
-    PyObject *runerr;
-    Py_ssize_t key_value;
-    PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
-    if (unlikely(!(m && m->sq_item))) {
-        PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
-        return NULL;
-    }
-    key_value = __Pyx_PyIndex_AsSsize_t(index);
-    if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
-        return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
-    }
-    if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
-        PyErr_Clear();
-        PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
-    }
-    return NULL;
-}
-static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
-    PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
-    if (likely(m && m->mp_subscript)) {
-        return m->mp_subscript(obj, key);
-    }
-    return __Pyx_PyObject_GetIndex(obj, key);
-}
-#endif
-
-/* SetItemInt */
-static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) {
-    int r;
-    if (!j) return -1;
-    r = PyObject_SetItem(o, j, v);
-    Py_DECREF(j);
-    return r;
-}
-static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list,
-                                               CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) {
-#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
-    if (is_list || PyList_CheckExact(o)) {
-        Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o));
-        if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) {
-            PyObject* old = PyList_GET_ITEM(o, n);
-            Py_INCREF(v);
-            PyList_SET_ITEM(o, n, v);
-            Py_DECREF(old);
-            return 1;
-        }
-    } else {
-        PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
-        if (likely(m && m->sq_ass_item)) {
-            if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
-                Py_ssize_t l = m->sq_length(o);
-                if (likely(l >= 0)) {
-                    i += l;
-                } else {
-                    if (!PyErr_ExceptionMatches(PyExc_OverflowError))
-                        return -1;
-                    PyErr_Clear();
-                }
-            }
-            return m->sq_ass_item(o, i, v);
-        }
-    }
-#else
-#if CYTHON_COMPILING_IN_PYPY
-    if (is_list || (PySequence_Check(o) && !PyDict_Check(o)))
-#else
-    if (is_list || PySequence_Check(o))
-#endif
-    {
-        return PySequence_SetItem(o, i, v);
-    }
-#endif
-    return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v);
-}
-
-/* MemviewSliceInit */
-static int
-__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
-                        int ndim,
-                        __Pyx_memviewslice *memviewslice,
-                        int memview_is_new_reference)
-{
-    __Pyx_RefNannyDeclarations
-    int i, retval=-1;
-    Py_buffer *buf = &memview->view;
-    __Pyx_RefNannySetupContext("init_memviewslice", 0);
-    if (unlikely(memviewslice->memview || memviewslice->data)) {
-        PyErr_SetString(PyExc_ValueError,
-            "memviewslice is already initialized!");
-        goto fail;
-    }
-    if (buf->strides) {
-        for (i = 0; i < ndim; i++) {
-            memviewslice->strides[i] = buf->strides[i];
-        }
-    } else {
-        Py_ssize_t stride = buf->itemsize;
-        for (i = ndim - 1; i >= 0; i--) {
-            memviewslice->strides[i] = stride;
-            stride *= buf->shape[i];
-        }
-    }
-    for (i = 0; i < ndim; i++) {
-        memviewslice->shape[i]   = buf->shape[i];
-        if (buf->suboffsets) {
-            memviewslice->suboffsets[i] = buf->suboffsets[i];
-        } else {
-            memviewslice->suboffsets[i] = -1;
-        }
-    }
-    memviewslice->memview = memview;
-    memviewslice->data = (char *)buf->buf;
-    if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
-        Py_INCREF(memview);
-    }
-    retval = 0;
-    goto no_fail;
-fail:
-    memviewslice->memview = 0;
-    memviewslice->data = 0;
-    retval = -1;
-no_fail:
-    __Pyx_RefNannyFinishContext();
-    return retval;
-}
-#ifndef Py_NO_RETURN
-#define Py_NO_RETURN
-#endif
-static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
-    va_list vargs;
-    char msg[200];
-#ifdef HAVE_STDARG_PROTOTYPES
-    va_start(vargs, fmt);
-#else
-    va_start(vargs);
-#endif
-    vsnprintf(msg, 200, fmt, vargs);
-    va_end(vargs);
-    Py_FatalError(msg);
-}
-static CYTHON_INLINE int
-__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
-                                   PyThread_type_lock lock)
-{
-    int result;
-    PyThread_acquire_lock(lock, 1);
-    result = (*acquisition_count)++;
-    PyThread_release_lock(lock);
-    return result;
-}
-static CYTHON_INLINE int
-__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
-                                   PyThread_type_lock lock)
-{
-    int result;
-    PyThread_acquire_lock(lock, 1);
-    result = (*acquisition_count)--;
-    PyThread_release_lock(lock);
-    return result;
-}
-static CYTHON_INLINE void
-__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
-{
-    int first_time;
-    struct __pyx_memoryview_obj *memview = memslice->memview;
-    if (unlikely(!memview || (PyObject *) memview == Py_None))
-        return;
-    if (unlikely(__pyx_get_slice_count(memview) < 0))
-        __pyx_fatalerror("Acquisition count is %d (line %d)",
-                         __pyx_get_slice_count(memview), lineno);
-    first_time = __pyx_add_acquisition_count(memview) == 0;
-    if (unlikely(first_time)) {
-        if (have_gil) {
-            Py_INCREF((PyObject *) memview);
-        } else {
-            PyGILState_STATE _gilstate = PyGILState_Ensure();
-            Py_INCREF((PyObject *) memview);
-            PyGILState_Release(_gilstate);
-        }
-    }
-}
-static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
-                                             int have_gil, int lineno) {
-    int last_time;
-    struct __pyx_memoryview_obj *memview = memslice->memview;
-    if (unlikely(!memview || (PyObject *) memview == Py_None)) {
-        memslice->memview = NULL;
-        return;
-    }
-    if (unlikely(__pyx_get_slice_count(memview) <= 0))
-        __pyx_fatalerror("Acquisition count is %d (line %d)",
-                         __pyx_get_slice_count(memview), lineno);
-    last_time = __pyx_sub_acquisition_count(memview) == 1;
-    memslice->data = NULL;
-    if (unlikely(last_time)) {
-        if (have_gil) {
-            Py_CLEAR(memslice->memview);
-        } else {
-            PyGILState_STATE _gilstate = PyGILState_Ensure();
-            Py_CLEAR(memslice->memview);
-            PyGILState_Release(_gilstate);
-        }
-    } else {
-        memslice->memview = NULL;
-    }
-}
-
-/* SliceObject */
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj,
-        Py_ssize_t cstart, Py_ssize_t cstop,
-        PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
-        int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
-#if CYTHON_USE_TYPE_SLOTS
-    PyMappingMethods* mp;
-#if PY_MAJOR_VERSION < 3
-    PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence;
-    if (likely(ms && ms->sq_slice)) {
-        if (!has_cstart) {
-            if (_py_start && (*_py_start != Py_None)) {
-                cstart = __Pyx_PyIndex_AsSsize_t(*_py_start);
-                if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
-            } else
-                cstart = 0;
-        }
-        if (!has_cstop) {
-            if (_py_stop && (*_py_stop != Py_None)) {
-                cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop);
-                if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
-            } else
-                cstop = PY_SSIZE_T_MAX;
-        }
-        if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) {
-            Py_ssize_t l = ms->sq_length(obj);
-            if (likely(l >= 0)) {
-                if (cstop < 0) {
-                    cstop += l;
-                    if (cstop < 0) cstop = 0;
-                }
-                if (cstart < 0) {
-                    cstart += l;
-                    if (cstart < 0) cstart = 0;
-                }
-            } else {
-                if (!PyErr_ExceptionMatches(PyExc_OverflowError))
-                    goto bad;
-                PyErr_Clear();
-            }
-        }
-        return ms->sq_slice(obj, cstart, cstop);
-    }
-#endif
-    mp = Py_TYPE(obj)->tp_as_mapping;
-    if (likely(mp && mp->mp_subscript))
-#endif
-    {
-        PyObject* result;
-        PyObject *py_slice, *py_start, *py_stop;
-        if (_py_slice) {
-            py_slice = *_py_slice;
-        } else {
-            PyObject* owned_start = NULL;
-            PyObject* owned_stop = NULL;
-            if (_py_start) {
-                py_start = *_py_start;
-            } else {
-                if (has_cstart) {
-                    owned_start = py_start = PyInt_FromSsize_t(cstart);
-                    if (unlikely(!py_start)) goto bad;
-                } else
-                    py_start = Py_None;
-            }
-            if (_py_stop) {
-                py_stop = *_py_stop;
-            } else {
-                if (has_cstop) {
-                    owned_stop = py_stop = PyInt_FromSsize_t(cstop);
-                    if (unlikely(!py_stop)) {
-                        Py_XDECREF(owned_start);
-                        goto bad;
-                    }
-                } else
-                    py_stop = Py_None;
-            }
-            py_slice = PySlice_New(py_start, py_stop, Py_None);
-            Py_XDECREF(owned_start);
-            Py_XDECREF(owned_stop);
-            if (unlikely(!py_slice)) goto bad;
-        }
-#if CYTHON_USE_TYPE_SLOTS
-        result = mp->mp_subscript(obj, py_slice);
-#else
-        result = PyObject_GetItem(obj, py_slice);
-#endif
-        if (!_py_slice) {
-            Py_DECREF(py_slice);
-        }
-        return result;
-    }
-    PyErr_Format(PyExc_TypeError,
-        "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name);
-bad:
-    return NULL;
-}
-
-/* GetTopmostException */
-#if CYTHON_USE_EXC_INFO_STACK
-static _PyErr_StackItem *
-__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
-{
-    _PyErr_StackItem *exc_info = tstate->exc_info;
-    while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
-           exc_info->previous_item != NULL)
-    {
-        exc_info = exc_info->previous_item;
-    }
-    return exc_info;
-}
-#endif
-
-/* SaveResetException */
-#if CYTHON_FAST_THREAD_STATE
-static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
-    #if CYTHON_USE_EXC_INFO_STACK
-    _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
-    *type = exc_info->exc_type;
-    *value = exc_info->exc_value;
-    *tb = exc_info->exc_traceback;
-    #else
-    *type = tstate->exc_type;
-    *value = tstate->exc_value;
-    *tb = tstate->exc_traceback;
-    #endif
-    Py_XINCREF(*type);
-    Py_XINCREF(*value);
-    Py_XINCREF(*tb);
-}
-static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
-    PyObject *tmp_type, *tmp_value, *tmp_tb;
-    #if CYTHON_USE_EXC_INFO_STACK
-    _PyErr_StackItem *exc_info = tstate->exc_info;
-    tmp_type = exc_info->exc_type;
-    tmp_value = exc_info->exc_value;
-    tmp_tb = exc_info->exc_traceback;
-    exc_info->exc_type = type;
-    exc_info->exc_value = value;
-    exc_info->exc_traceback = tb;
-    #else
-    tmp_type = tstate->exc_type;
-    tmp_value = tstate->exc_value;
-    tmp_tb = tstate->exc_traceback;
-    tstate->exc_type = type;
-    tstate->exc_value = value;
-    tstate->exc_traceback = tb;
-    #endif
-    Py_XDECREF(tmp_type);
-    Py_XDECREF(tmp_value);
-    Py_XDECREF(tmp_tb);
-}
-#endif
-
-/* PyErrExceptionMatches */
-#if CYTHON_FAST_THREAD_STATE
-static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
-    Py_ssize_t i, n;
-    n = PyTuple_GET_SIZE(tuple);
-#if PY_MAJOR_VERSION >= 3
-    for (i=0; i<n; i++) {
-        if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
-    }
-#endif
-    for (i=0; i<n; i++) {
-        if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
-    }
-    return 0;
-}
-static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
-    PyObject *exc_type = tstate->curexc_type;
-    if (exc_type == err) return 1;
-    if (unlikely(!exc_type)) return 0;
-    if (unlikely(PyTuple_Check(err)))
-        return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
-    return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
-}
-#endif
-
-/* GetException */
-#if CYTHON_FAST_THREAD_STATE
-static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
-#else
-static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
-#endif
-{
-    PyObject *local_type, *local_value, *local_tb;
-#if CYTHON_FAST_THREAD_STATE
-    PyObject *tmp_type, *tmp_value, *tmp_tb;
-    local_type = tstate->curexc_type;
-    local_value = tstate->curexc_value;
-    local_tb = tstate->curexc_traceback;
-    tstate->curexc_type = 0;
-    tstate->curexc_value = 0;
-    tstate->curexc_traceback = 0;
-#else
-    PyErr_Fetch(&local_type, &local_value, &local_tb);
-#endif
-    PyErr_NormalizeException(&local_type, &local_value, &local_tb);
-#if CYTHON_FAST_THREAD_STATE
-    if (unlikely(tstate->curexc_type))
-#else
-    if (unlikely(PyErr_Occurred()))
-#endif
-        goto bad;
-    #if PY_MAJOR_VERSION >= 3
-    if (local_tb) {
-        if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
-            goto bad;
-    }
-    #endif
-    Py_XINCREF(local_tb);
-    Py_XINCREF(local_type);
-    Py_XINCREF(local_value);
-    *type = local_type;
-    *value = local_value;
-    *tb = local_tb;
-#if CYTHON_FAST_THREAD_STATE
-    #if CYTHON_USE_EXC_INFO_STACK
-    {
-        _PyErr_StackItem *exc_info = tstate->exc_info;
-        tmp_type = exc_info->exc_type;
-        tmp_value = exc_info->exc_value;
-        tmp_tb = exc_info->exc_traceback;
-        exc_info->exc_type = local_type;
-        exc_info->exc_value = local_value;
-        exc_info->exc_traceback = local_tb;
-    }
-    #else
-    tmp_type = tstate->exc_type;
-    tmp_value = tstate->exc_value;
-    tmp_tb = tstate->exc_traceback;
-    tstate->exc_type = local_type;
-    tstate->exc_value = local_value;
-    tstate->exc_traceback = local_tb;
-    #endif
-    Py_XDECREF(tmp_type);
-    Py_XDECREF(tmp_value);
-    Py_XDECREF(tmp_tb);
-#else
-    PyErr_SetExcInfo(local_type, local_value, local_tb);
-#endif
-    return 0;
-bad:
-    *type = 0;
-    *value = 0;
-    *tb = 0;
-    Py_XDECREF(local_type);
-    Py_XDECREF(local_value);
-    Py_XDECREF(local_tb);
-    return -1;
-}
-
-/* ArgTypeTest */
-static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
-{
-    if (unlikely(!type)) {
-        PyErr_SetString(PyExc_SystemError, "Missing type object");
-        return 0;
-    }
-    else if (exact) {
-        #if PY_MAJOR_VERSION == 2
-        if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
-        #endif
-    }
-    else {
-        if (likely(__Pyx_TypeCheck(obj, type))) return 1;
-    }
-    PyErr_Format(PyExc_TypeError,
-        "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
-        name, type->tp_name, Py_TYPE(obj)->tp_name);
-    return 0;
-}
-
-/* BytesEquals */
-static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
-#if CYTHON_COMPILING_IN_PYPY
-    return PyObject_RichCompareBool(s1, s2, equals);
-#else
-    if (s1 == s2) {
-        return (equals == Py_EQ);
-    } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
-        const char *ps1, *ps2;
-        Py_ssize_t length = PyBytes_GET_SIZE(s1);
-        if (length != PyBytes_GET_SIZE(s2))
-            return (equals == Py_NE);
-        ps1 = PyBytes_AS_STRING(s1);
-        ps2 = PyBytes_AS_STRING(s2);
-        if (ps1[0] != ps2[0]) {
-            return (equals == Py_NE);
-        } else if (length == 1) {
-            return (equals == Py_EQ);
-        } else {
-            int result;
-#if CYTHON_USE_UNICODE_INTERNALS
-            Py_hash_t hash1, hash2;
-            hash1 = ((PyBytesObject*)s1)->ob_shash;
-            hash2 = ((PyBytesObject*)s2)->ob_shash;
-            if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
-                return (equals == Py_NE);
-            }
-#endif
-            result = memcmp(ps1, ps2, (size_t)length);
-            return (equals == Py_EQ) ? (result == 0) : (result != 0);
-        }
-    } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
-        return (equals == Py_NE);
-    } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
-        return (equals == Py_NE);
-    } else {
-        int result;
-        PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
-        if (!py_result)
-            return -1;
-        result = __Pyx_PyObject_IsTrue(py_result);
-        Py_DECREF(py_result);
-        return result;
-    }
-#endif
-}
-
-/* UnicodeEquals */
-static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
-#if CYTHON_COMPILING_IN_PYPY
-    return PyObject_RichCompareBool(s1, s2, equals);
-#else
-#if PY_MAJOR_VERSION < 3
-    PyObject* owned_ref = NULL;
-#endif
-    int s1_is_unicode, s2_is_unicode;
-    if (s1 == s2) {
-        goto return_eq;
-    }
-    s1_is_unicode = PyUnicode_CheckExact(s1);
-    s2_is_unicode = PyUnicode_CheckExact(s2);
-#if PY_MAJOR_VERSION < 3
-    if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
-        owned_ref = PyUnicode_FromObject(s2);
-        if (unlikely(!owned_ref))
-            return -1;
-        s2 = owned_ref;
-        s2_is_unicode = 1;
-    } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
-        owned_ref = PyUnicode_FromObject(s1);
-        if (unlikely(!owned_ref))
-            return -1;
-        s1 = owned_ref;
-        s1_is_unicode = 1;
-    } else if (((!s2_is_unicode) & (!s1_is_unicode))) {
-        return __Pyx_PyBytes_Equals(s1, s2, equals);
-    }
-#endif
-    if (s1_is_unicode & s2_is_unicode) {
-        Py_ssize_t length;
-        int kind;
-        void *data1, *data2;
-        if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
-            return -1;
-        length = __Pyx_PyUnicode_GET_LENGTH(s1);
-        if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
-            goto return_ne;
-        }
-#if CYTHON_USE_UNICODE_INTERNALS
-        {
-            Py_hash_t hash1, hash2;
-        #if CYTHON_PEP393_ENABLED
-            hash1 = ((PyASCIIObject*)s1)->hash;
-            hash2 = ((PyASCIIObject*)s2)->hash;
-        #else
-            hash1 = ((PyUnicodeObject*)s1)->hash;
-            hash2 = ((PyUnicodeObject*)s2)->hash;
-        #endif
-            if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
-                goto return_ne;
-            }
-        }
-#endif
-        kind = __Pyx_PyUnicode_KIND(s1);
-        if (kind != __Pyx_PyUnicode_KIND(s2)) {
-            goto return_ne;
-        }
-        data1 = __Pyx_PyUnicode_DATA(s1);
-        data2 = __Pyx_PyUnicode_DATA(s2);
-        if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
-            goto return_ne;
-        } else if (length == 1) {
-            goto return_eq;
-        } else {
-            int result = memcmp(data1, data2, (size_t)(length * kind));
-            #if PY_MAJOR_VERSION < 3
-            Py_XDECREF(owned_ref);
-            #endif
-            return (equals == Py_EQ) ? (result == 0) : (result != 0);
-        }
-    } else if ((s1 == Py_None) & s2_is_unicode) {
-        goto return_ne;
-    } else if ((s2 == Py_None) & s1_is_unicode) {
-        goto return_ne;
-    } else {
-        int result;
-        PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
-        #if PY_MAJOR_VERSION < 3
-        Py_XDECREF(owned_ref);
-        #endif
-        if (!py_result)
-            return -1;
-        result = __Pyx_PyObject_IsTrue(py_result);
-        Py_DECREF(py_result);
-        return result;
-    }
-return_eq:
-    #if PY_MAJOR_VERSION < 3
-    Py_XDECREF(owned_ref);
-    #endif
-    return (equals == Py_EQ);
-return_ne:
-    #if PY_MAJOR_VERSION < 3
-    Py_XDECREF(owned_ref);
-    #endif
-    return (equals == Py_NE);
-#endif
-}
-
-/* None */
-static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
-    Py_ssize_t q = a / b;
-    Py_ssize_t r = a - q*b;
-    q -= ((r != 0) & ((r ^ b) < 0));
-    return q;
-}
-
-/* GetAttr */
-static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
-#if CYTHON_USE_TYPE_SLOTS
-#if PY_MAJOR_VERSION >= 3
-    if (likely(PyUnicode_Check(n)))
-#else
-    if (likely(PyString_Check(n)))
-#endif
-        return __Pyx_PyObject_GetAttrStr(o, n);
-#endif
-    return PyObject_GetAttr(o, n);
-}
-
-/* decode_c_string */
-static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
-         const char* cstring, Py_ssize_t start, Py_ssize_t stop,
-         const char* encoding, const char* errors,
-         PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
-    Py_ssize_t length;
-    if (unlikely((start < 0) | (stop < 0))) {
-        size_t slen = strlen(cstring);
-        if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
-            PyErr_SetString(PyExc_OverflowError,
-                            "c-string too long to convert to Python");
-            return NULL;
-        }
-        length = (Py_ssize_t) slen;
-        if (start < 0) {
-            start += length;
-            if (start < 0)
-                start = 0;
-        }
-        if (stop < 0)
-            stop += length;
-    }
-    if (unlikely(stop <= start))
-        return __Pyx_NewRef(__pyx_empty_unicode);
-    length = stop - start;
-    cstring += start;
-    if (decode_func) {
-        return decode_func(cstring, length, errors);
-    } else {
-        return PyUnicode_Decode(cstring, length, encoding, errors);
-    }
-}
-
-/* GetAttr3 */
-static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
-    __Pyx_PyThreadState_declare
-    __Pyx_PyThreadState_assign
-    if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
-        return NULL;
-    __Pyx_PyErr_Clear();
-    Py_INCREF(d);
-    return d;
-}
-static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
-    PyObject *r = __Pyx_GetAttr(o, n);
-    return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
-}
-
-/* RaiseNoneIterError */
-static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
-    PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
-}
-
-/* ExtTypeTest */
-static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
-    if (unlikely(!type)) {
-        PyErr_SetString(PyExc_SystemError, "Missing type object");
-        return 0;
-    }
-    if (likely(__Pyx_TypeCheck(obj, type)))
-        return 1;
-    PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
-                 Py_TYPE(obj)->tp_name, type->tp_name);
-    return 0;
-}
-
-/* SwapException */
-#if CYTHON_FAST_THREAD_STATE
-static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
-    PyObject *tmp_type, *tmp_value, *tmp_tb;
-    #if CYTHON_USE_EXC_INFO_STACK
-    _PyErr_StackItem *exc_info = tstate->exc_info;
-    tmp_type = exc_info->exc_type;
-    tmp_value = exc_info->exc_value;
-    tmp_tb = exc_info->exc_traceback;
-    exc_info->exc_type = *type;
-    exc_info->exc_value = *value;
-    exc_info->exc_traceback = *tb;
-    #else
-    tmp_type = tstate->exc_type;
-    tmp_value = tstate->exc_value;
-    tmp_tb = tstate->exc_traceback;
-    tstate->exc_type = *type;
-    tstate->exc_value = *value;
-    tstate->exc_traceback = *tb;
-    #endif
-    *type = tmp_type;
-    *value = tmp_value;
-    *tb = tmp_tb;
-}
-#else
-static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
-    PyObject *tmp_type, *tmp_value, *tmp_tb;
-    PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
-    PyErr_SetExcInfo(*type, *value, *tb);
-    *type = tmp_type;
-    *value = tmp_value;
-    *tb = tmp_tb;
-}
-#endif
-
-/* FastTypeChecks */
-#if CYTHON_COMPILING_IN_CPYTHON
-static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
-    while (a) {
-        a = a->tp_base;
-        if (a == b)
-            return 1;
-    }
-    return b == &PyBaseObject_Type;
-}
-static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
-    PyObject *mro;
-    if (a == b) return 1;
-    mro = a->tp_mro;
-    if (likely(mro)) {
-        Py_ssize_t i, n;
-        n = PyTuple_GET_SIZE(mro);
-        for (i = 0; i < n; i++) {
-            if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
-                return 1;
-        }
-        return 0;
-    }
-    return __Pyx_InBases(a, b);
-}
-#if PY_MAJOR_VERSION == 2
-static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
-    PyObject *exception, *value, *tb;
-    int res;
-    __Pyx_PyThreadState_declare
-    __Pyx_PyThreadState_assign
-    __Pyx_ErrFetch(&exception, &value, &tb);
-    res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
-    if (unlikely(res == -1)) {
-        PyErr_WriteUnraisable(err);
-        res = 0;
-    }
-    if (!res) {
-        res = PyObject_IsSubclass(err, exc_type2);
-        if (unlikely(res == -1)) {
-            PyErr_WriteUnraisable(err);
-            res = 0;
-        }
-    }
-    __Pyx_ErrRestore(exception, value, tb);
-    return res;
-}
-#else
-static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
-    int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
-    if (!res) {
-        res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
-    }
-    return res;
-}
-#endif
-static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
-    Py_ssize_t i, n;
-    assert(PyExceptionClass_Check(exc_type));
-    n = PyTuple_GET_SIZE(tuple);
-#if PY_MAJOR_VERSION >= 3
-    for (i=0; i<n; i++) {
-        if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
-    }
-#endif
-    for (i=0; i<n; i++) {
-        PyObject *t = PyTuple_GET_ITEM(tuple, i);
-        #if PY_MAJOR_VERSION < 3
-        if (likely(exc_type == t)) return 1;
-        #endif
-        if (likely(PyExceptionClass_Check(t))) {
-            if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
-        } else {
-        }
-    }
-    return 0;
-}
-static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
-    if (likely(err == exc_type)) return 1;
-    if (likely(PyExceptionClass_Check(err))) {
-        if (likely(PyExceptionClass_Check(exc_type))) {
-            return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
-        } else if (likely(PyTuple_Check(exc_type))) {
-            return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
-        } else {
-        }
-    }
-    return PyErr_GivenExceptionMatches(err, exc_type);
-}
-static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
-    assert(PyExceptionClass_Check(exc_type1));
-    assert(PyExceptionClass_Check(exc_type2));
-    if (likely(err == exc_type1 || err == exc_type2)) return 1;
-    if (likely(PyExceptionClass_Check(err))) {
-        return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
-    }
-    return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
-}
-#endif
-
-/* None */
-static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
-    PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
-}
-
-/* None */
-static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
-    long q = a / b;
-    long r = a - q*b;
-    q -= ((r != 0) & ((r ^ b) < 0));
-    return q;
-}
-
-/* HasAttr */
-static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
-    PyObject *r;
-    if (unlikely(!__Pyx_PyBaseString_Check(n))) {
-        PyErr_SetString(PyExc_TypeError,
-                        "hasattr(): attribute name must be string");
-        return -1;
-    }
-    r = __Pyx_GetAttr(o, n);
-    if (unlikely(!r)) {
-        PyErr_Clear();
-        return 0;
-    } else {
-        Py_DECREF(r);
-        return 1;
-    }
-}
-
-/* PyObject_GenericGetAttrNoDict */
-#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
-static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
-    PyErr_Format(PyExc_AttributeError,
-#if PY_MAJOR_VERSION >= 3
-                 "'%.50s' object has no attribute '%U'",
-                 tp->tp_name, attr_name);
-#else
-                 "'%.50s' object has no attribute '%.400s'",
-                 tp->tp_name, PyString_AS_STRING(attr_name));
-#endif
-    return NULL;
-}
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
-    PyObject *descr;
-    PyTypeObject *tp = Py_TYPE(obj);
-    if (unlikely(!PyString_Check(attr_name))) {
-        return PyObject_GenericGetAttr(obj, attr_name);
-    }
-    assert(!tp->tp_dictoffset);
-    descr = _PyType_Lookup(tp, attr_name);
-    if (unlikely(!descr)) {
-        return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
-    }
-    Py_INCREF(descr);
-    #if PY_MAJOR_VERSION < 3
-    if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
-    #endif
-    {
-        descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
-        if (unlikely(f)) {
-            PyObject *res = f(descr, obj, (PyObject *)tp);
-            Py_DECREF(descr);
-            return res;
-        }
-    }
-    return descr;
-}
-#endif
-
-/* PyObject_GenericGetAttr */
-#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
-static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
-    if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
-        return PyObject_GenericGetAttr(obj, attr_name);
-    }
-    return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
-}
-#endif
-
-/* SetVTable */
-static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
-#if PY_VERSION_HEX >= 0x02070000
-    PyObject *ob = PyCapsule_New(vtable, 0, 0);
-#else
-    PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
-#endif
-    if (!ob)
-        goto bad;
-    if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
-        goto bad;
-    Py_DECREF(ob);
-    return 0;
-bad:
-    Py_XDECREF(ob);
-    return -1;
-}
-
-/* PyObjectGetAttrStrNoError */
-static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
-    __Pyx_PyThreadState_declare
-    __Pyx_PyThreadState_assign
-    if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
-        __Pyx_PyErr_Clear();
-}
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
-    PyObject *result;
-#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
-    PyTypeObject* tp = Py_TYPE(obj);
-    if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
-        return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
-    }
-#endif
-    result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
-    if (unlikely(!result)) {
-        __Pyx_PyObject_GetAttrStr_ClearAttributeError();
-    }
-    return result;
-}
-
-/* SetupReduce */
-static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
-  int ret;
-  PyObject *name_attr;
-  name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2);
-  if (likely(name_attr)) {
-      ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
-  } else {
-      ret = -1;
-  }
-  if (unlikely(ret < 0)) {
-      PyErr_Clear();
-      ret = 0;
-  }
-  Py_XDECREF(name_attr);
-  return ret;
-}
-static int __Pyx_setup_reduce(PyObject* type_obj) {
-    int ret = 0;
-    PyObject *object_reduce = NULL;
-    PyObject *object_reduce_ex = NULL;
-    PyObject *reduce = NULL;
-    PyObject *reduce_ex = NULL;
-    PyObject *reduce_cython = NULL;
-    PyObject *setstate = NULL;
-    PyObject *setstate_cython = NULL;
-#if CYTHON_USE_PYTYPE_LOOKUP
-    if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
-#else
-    if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
-#endif
-#if CYTHON_USE_PYTYPE_LOOKUP
-    object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
-#else
-    object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
-#endif
-    reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
-    if (reduce_ex == object_reduce_ex) {
-#if CYTHON_USE_PYTYPE_LOOKUP
-        object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
-#else
-        object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
-#endif
-        reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
-        if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
-            reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
-            if (likely(reduce_cython)) {
-                ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
-                ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
-            } else if (reduce == object_reduce || PyErr_Occurred()) {
-                goto __PYX_BAD;
-            }
-            setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
-            if (!setstate) PyErr_Clear();
-            if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
-                setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
-                if (likely(setstate_cython)) {
-                    ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
-                    ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
-                } else if (!setstate || PyErr_Occurred()) {
-                    goto __PYX_BAD;
-                }
-            }
-            PyType_Modified((PyTypeObject*)type_obj);
-        }
-    }
-    goto __PYX_GOOD;
-__PYX_BAD:
-    if (!PyErr_Occurred())
-        PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
-    ret = -1;
-__PYX_GOOD:
-#if !CYTHON_USE_PYTYPE_LOOKUP
-    Py_XDECREF(object_reduce);
-    Py_XDECREF(object_reduce_ex);
-#endif
-    Py_XDECREF(reduce);
-    Py_XDECREF(reduce_ex);
-    Py_XDECREF(reduce_cython);
-    Py_XDECREF(setstate);
-    Py_XDECREF(setstate_cython);
-    return ret;
-}
-
-/* TypeImport */
-#ifndef __PYX_HAVE_RT_ImportType
-#define __PYX_HAVE_RT_ImportType
-static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
-    size_t size, enum __Pyx_ImportType_CheckSize check_size)
-{
-    PyObject *result = 0;
-    char warning[200];
-    Py_ssize_t basicsize;
-#ifdef Py_LIMITED_API
-    PyObject *py_basicsize;
-#endif
-    result = PyObject_GetAttrString(module, class_name);
-    if (!result)
-        goto bad;
-    if (!PyType_Check(result)) {
-        PyErr_Format(PyExc_TypeError,
-            "%.200s.%.200s is not a type object",
-            module_name, class_name);
-        goto bad;
-    }
-#ifndef Py_LIMITED_API
-    basicsize = ((PyTypeObject *)result)->tp_basicsize;
-#else
-    py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
-    if (!py_basicsize)
-        goto bad;
-    basicsize = PyLong_AsSsize_t(py_basicsize);
-    Py_DECREF(py_basicsize);
-    py_basicsize = 0;
-    if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
-        goto bad;
-#endif
-    if ((size_t)basicsize < size) {
-        PyErr_Format(PyExc_ValueError,
-            "%.200s.%.200s size changed, may indicate binary incompatibility. "
-            "Expected %zd from C header, got %zd from PyObject",
-            module_name, class_name, size, basicsize);
-        goto bad;
-    }
-    if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
-        PyErr_Format(PyExc_ValueError,
-            "%.200s.%.200s size changed, may indicate binary incompatibility. "
-            "Expected %zd from C header, got %zd from PyObject",
-            module_name, class_name, size, basicsize);
-        goto bad;
-    }
-    else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
-        PyOS_snprintf(warning, sizeof(warning),
-            "%s.%s size changed, may indicate binary incompatibility. "
-            "Expected %zd from C header, got %zd from PyObject",
-            module_name, class_name, size, basicsize);
-        if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
-    }
-    return (PyTypeObject *)result;
-bad:
-    Py_XDECREF(result);
-    return NULL;
-}
-#endif
-
-/* CalculateMetaclass */
-static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) {
-    Py_ssize_t i, nbases = PyTuple_GET_SIZE(bases);
-    for (i=0; i < nbases; i++) {
-        PyTypeObject *tmptype;
-        PyObject *tmp = PyTuple_GET_ITEM(bases, i);
-        tmptype = Py_TYPE(tmp);
-#if PY_MAJOR_VERSION < 3
-        if (tmptype == &PyClass_Type)
-            continue;
-#endif
-        if (!metaclass) {
-            metaclass = tmptype;
-            continue;
-        }
-        if (PyType_IsSubtype(metaclass, tmptype))
-            continue;
-        if (PyType_IsSubtype(tmptype, metaclass)) {
-            metaclass = tmptype;
-            continue;
-        }
-        PyErr_SetString(PyExc_TypeError,
-                        "metaclass conflict: "
-                        "the metaclass of a derived class "
-                        "must be a (non-strict) subclass "
-                        "of the metaclasses of all its bases");
-        return NULL;
-    }
-    if (!metaclass) {
-#if PY_MAJOR_VERSION < 3
-        metaclass = &PyClass_Type;
-#else
-        metaclass = &PyType_Type;
-#endif
-    }
-    Py_INCREF((PyObject*) metaclass);
-    return (PyObject*) metaclass;
-}
-
-/* FetchCommonType */
-static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) {
-    PyObject* fake_module;
-    PyTypeObject* cached_type = NULL;
-    fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI);
-    if (!fake_module) return NULL;
-    Py_INCREF(fake_module);
-    cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name);
-    if (cached_type) {
-        if (!PyType_Check((PyObject*)cached_type)) {
-            PyErr_Format(PyExc_TypeError,
-                "Shared Cython type %.200s is not a type object",
-                type->tp_name);
-            goto bad;
-        }
-        if (cached_type->tp_basicsize != type->tp_basicsize) {
-            PyErr_Format(PyExc_TypeError,
-                "Shared Cython type %.200s has the wrong size, try recompiling",
-                type->tp_name);
-            goto bad;
-        }
-    } else {
-        if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad;
-        PyErr_Clear();
-        if (PyType_Ready(type) < 0) goto bad;
-        if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0)
-            goto bad;
-        Py_INCREF(type);
-        cached_type = type;
-    }
-done:
-    Py_DECREF(fake_module);
-    return cached_type;
-bad:
-    Py_XDECREF(cached_type);
-    cached_type = NULL;
-    goto done;
-}
-
-/* CythonFunctionShared */
-#include <structmember.h>
-static PyObject *
-__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure)
-{
-    if (unlikely(op->func_doc == NULL)) {
-        if (op->func.m_ml->ml_doc) {
-#if PY_MAJOR_VERSION >= 3
-            op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc);
-#else
-            op->func_doc = PyString_FromString(op->func.m_ml->ml_doc);
-#endif
-            if (unlikely(op->func_doc == NULL))
-                return NULL;
-        } else {
-            Py_INCREF(Py_None);
-            return Py_None;
-        }
-    }
-    Py_INCREF(op->func_doc);
-    return op->func_doc;
-}
-static int
-__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
-{
-    PyObject *tmp = op->func_doc;
-    if (value == NULL) {
-        value = Py_None;
-    }
-    Py_INCREF(value);
-    op->func_doc = value;
-    Py_XDECREF(tmp);
-    return 0;
-}
-static PyObject *
-__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
-{
-    if (unlikely(op->func_name == NULL)) {
-#if PY_MAJOR_VERSION >= 3
-        op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name);
-#else
-        op->func_name = PyString_InternFromString(op->func.m_ml->ml_name);
-#endif
-        if (unlikely(op->func_name == NULL))
-            return NULL;
-    }
-    Py_INCREF(op->func_name);
-    return op->func_name;
-}
-static int
-__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
-{
-    PyObject *tmp;
-#if PY_MAJOR_VERSION >= 3
-    if (unlikely(value == NULL || !PyUnicode_Check(value)))
-#else
-    if (unlikely(value == NULL || !PyString_Check(value)))
-#endif
-    {
-        PyErr_SetString(PyExc_TypeError,
-                        "__name__ must be set to a string object");
-        return -1;
-    }
-    tmp = op->func_name;
-    Py_INCREF(value);
-    op->func_name = value;
-    Py_XDECREF(tmp);
-    return 0;
-}
-static PyObject *
-__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
-{
-    Py_INCREF(op->func_qualname);
-    return op->func_qualname;
-}
-static int
-__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
-{
-    PyObject *tmp;
-#if PY_MAJOR_VERSION >= 3
-    if (unlikely(value == NULL || !PyUnicode_Check(value)))
-#else
-    if (unlikely(value == NULL || !PyString_Check(value)))
-#endif
-    {
-        PyErr_SetString(PyExc_TypeError,
-                        "__qualname__ must be set to a string object");
-        return -1;
-    }
-    tmp = op->func_qualname;
-    Py_INCREF(value);
-    op->func_qualname = value;
-    Py_XDECREF(tmp);
-    return 0;
-}
-static PyObject *
-__Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure)
-{
-    PyObject *self;
-    self = m->func_closure;
-    if (self == NULL)
-        self = Py_None;
-    Py_INCREF(self);
-    return self;
-}
-static PyObject *
-__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
-{
-    if (unlikely(op->func_dict == NULL)) {
-        op->func_dict = PyDict_New();
-        if (unlikely(op->func_dict == NULL))
-            return NULL;
-    }
-    Py_INCREF(op->func_dict);
-    return op->func_dict;
-}
-static int
-__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
-{
-    PyObject *tmp;
-    if (unlikely(value == NULL)) {
-        PyErr_SetString(PyExc_TypeError,
-               "function's dictionary may not be deleted");
-        return -1;
-    }
-    if (unlikely(!PyDict_Check(value))) {
-        PyErr_SetString(PyExc_TypeError,
-               "setting function's dictionary to a non-dict");
-        return -1;
-    }
-    tmp = op->func_dict;
-    Py_INCREF(value);
-    op->func_dict = value;
-    Py_XDECREF(tmp);
-    return 0;
-}
-static PyObject *
-__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
-{
-    Py_INCREF(op->func_globals);
-    return op->func_globals;
-}
-static PyObject *
-__Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
-{
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-static PyObject *
-__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
-{
-    PyObject* result = (op->func_code) ? op->func_code : Py_None;
-    Py_INCREF(result);
-    return result;
-}
-static int
-__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) {
-    int result = 0;
-    PyObject *res = op->defaults_getter((PyObject *) op);
-    if (unlikely(!res))
-        return -1;
-    #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-    op->defaults_tuple = PyTuple_GET_ITEM(res, 0);
-    Py_INCREF(op->defaults_tuple);
-    op->defaults_kwdict = PyTuple_GET_ITEM(res, 1);
-    Py_INCREF(op->defaults_kwdict);
-    #else
-    op->defaults_tuple = PySequence_ITEM(res, 0);
-    if (unlikely(!op->defaults_tuple)) result = -1;
-    else {
-        op->defaults_kwdict = PySequence_ITEM(res, 1);
-        if (unlikely(!op->defaults_kwdict)) result = -1;
-    }
-    #endif
-    Py_DECREF(res);
-    return result;
-}
-static int
-__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
-    PyObject* tmp;
-    if (!value) {
-        value = Py_None;
-    } else if (value != Py_None && !PyTuple_Check(value)) {
-        PyErr_SetString(PyExc_TypeError,
-                        "__defaults__ must be set to a tuple object");
-        return -1;
-    }
-    Py_INCREF(value);
-    tmp = op->defaults_tuple;
-    op->defaults_tuple = value;
-    Py_XDECREF(tmp);
-    return 0;
-}
-static PyObject *
-__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
-    PyObject* result = op->defaults_tuple;
-    if (unlikely(!result)) {
-        if (op->defaults_getter) {
-            if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
-            result = op->defaults_tuple;
-        } else {
-            result = Py_None;
-        }
-    }
-    Py_INCREF(result);
-    return result;
-}
-static int
-__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
-    PyObject* tmp;
-    if (!value) {
-        value = Py_None;
-    } else if (value != Py_None && !PyDict_Check(value)) {
-        PyErr_SetString(PyExc_TypeError,
-                        "__kwdefaults__ must be set to a dict object");
-        return -1;
-    }
-    Py_INCREF(value);
-    tmp = op->defaults_kwdict;
-    op->defaults_kwdict = value;
-    Py_XDECREF(tmp);
-    return 0;
-}
-static PyObject *
-__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
-    PyObject* result = op->defaults_kwdict;
-    if (unlikely(!result)) {
-        if (op->defaults_getter) {
-            if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
-            result = op->defaults_kwdict;
-        } else {
-            result = Py_None;
-        }
-    }
-    Py_INCREF(result);
-    return result;
-}
-static int
-__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
-    PyObject* tmp;
-    if (!value || value == Py_None) {
-        value = NULL;
-    } else if (!PyDict_Check(value)) {
-        PyErr_SetString(PyExc_TypeError,
-                        "__annotations__ must be set to a dict object");
-        return -1;
-    }
-    Py_XINCREF(value);
-    tmp = op->func_annotations;
-    op->func_annotations = value;
-    Py_XDECREF(tmp);
-    return 0;
-}
-static PyObject *
-__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
-    PyObject* result = op->func_annotations;
-    if (unlikely(!result)) {
-        result = PyDict_New();
-        if (unlikely(!result)) return NULL;
-        op->func_annotations = result;
-    }
-    Py_INCREF(result);
-    return result;
-}
-static PyGetSetDef __pyx_CyFunction_getsets[] = {
-    {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
-    {(char *) "__doc__",  (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
-    {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
-    {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
-    {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0},
-    {(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0},
-    {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
-    {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
-    {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
-    {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
-    {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
-    {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
-    {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
-    {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
-    {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
-    {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
-    {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0},
-    {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0},
-    {0, 0, 0, 0, 0}
-};
-static PyMemberDef __pyx_CyFunction_members[] = {
-    {(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), PY_WRITE_RESTRICTED, 0},
-    {0, 0, 0,  0, 0}
-};
-static PyObject *
-__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args)
-{
-#if PY_MAJOR_VERSION >= 3
-    return PyUnicode_FromString(m->func.m_ml->ml_name);
-#else
-    return PyString_FromString(m->func.m_ml->ml_name);
-#endif
-}
-static PyMethodDef __pyx_CyFunction_methods[] = {
-    {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0},
-    {0, 0, 0, 0}
-};
-#if PY_VERSION_HEX < 0x030500A0
-#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist)
-#else
-#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist)
-#endif
-static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname,
-                                       PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) {
-    if (unlikely(op == NULL))
-        return NULL;
-    op->flags = flags;
-    __Pyx_CyFunction_weakreflist(op) = NULL;
-    op->func.m_ml = ml;
-    op->func.m_self = (PyObject *) op;
-    Py_XINCREF(closure);
-    op->func_closure = closure;
-    Py_XINCREF(module);
-    op->func.m_module = module;
-    op->func_dict = NULL;
-    op->func_name = NULL;
-    Py_INCREF(qualname);
-    op->func_qualname = qualname;
-    op->func_doc = NULL;
-    op->func_classobj = NULL;
-    op->func_globals = globals;
-    Py_INCREF(op->func_globals);
-    Py_XINCREF(code);
-    op->func_code = code;
-    op->defaults_pyobjects = 0;
-    op->defaults_size = 0;
-    op->defaults = NULL;
-    op->defaults_tuple = NULL;
-    op->defaults_kwdict = NULL;
-    op->defaults_getter = NULL;
-    op->func_annotations = NULL;
-    return (PyObject *) op;
-}
-static int
-__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m)
-{
-    Py_CLEAR(m->func_closure);
-    Py_CLEAR(m->func.m_module);
-    Py_CLEAR(m->func_dict);
-    Py_CLEAR(m->func_name);
-    Py_CLEAR(m->func_qualname);
-    Py_CLEAR(m->func_doc);
-    Py_CLEAR(m->func_globals);
-    Py_CLEAR(m->func_code);
-    Py_CLEAR(m->func_classobj);
-    Py_CLEAR(m->defaults_tuple);
-    Py_CLEAR(m->defaults_kwdict);
-    Py_CLEAR(m->func_annotations);
-    if (m->defaults) {
-        PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
-        int i;
-        for (i = 0; i < m->defaults_pyobjects; i++)
-            Py_XDECREF(pydefaults[i]);
-        PyObject_Free(m->defaults);
-        m->defaults = NULL;
-    }
-    return 0;
-}
-static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m)
-{
-    if (__Pyx_CyFunction_weakreflist(m) != NULL)
-        PyObject_ClearWeakRefs((PyObject *) m);
-    __Pyx_CyFunction_clear(m);
-    PyObject_GC_Del(m);
-}
-static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m)
-{
-    PyObject_GC_UnTrack(m);
-    __Pyx__CyFunction_dealloc(m);
-}
-static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg)
-{
-    Py_VISIT(m->func_closure);
-    Py_VISIT(m->func.m_module);
-    Py_VISIT(m->func_dict);
-    Py_VISIT(m->func_name);
-    Py_VISIT(m->func_qualname);
-    Py_VISIT(m->func_doc);
-    Py_VISIT(m->func_globals);
-    Py_VISIT(m->func_code);
-    Py_VISIT(m->func_classobj);
-    Py_VISIT(m->defaults_tuple);
-    Py_VISIT(m->defaults_kwdict);
-    if (m->defaults) {
-        PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
-        int i;
-        for (i = 0; i < m->defaults_pyobjects; i++)
-            Py_VISIT(pydefaults[i]);
-    }
-    return 0;
-}
-static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type)
-{
-#if PY_MAJOR_VERSION < 3
-    __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
-    if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) {
-        Py_INCREF(func);
-        return func;
-    }
-    if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) {
-        if (type == NULL)
-            type = (PyObject *)(Py_TYPE(obj));
-        return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type)));
-    }
-    if (obj == Py_None)
-        obj = NULL;
-#endif
-    return __Pyx_PyMethod_New(func, obj, type);
-}
-static PyObject*
-__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op)
-{
-#if PY_MAJOR_VERSION >= 3
-    return PyUnicode_FromFormat("<cyfunction %U at %p>",
-                                op->func_qualname, (void *)op);
-#else
-    return PyString_FromFormat("<cyfunction %s at %p>",
-                               PyString_AsString(op->func_qualname), (void *)op);
-#endif
-}
-static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) {
-    PyCFunctionObject* f = (PyCFunctionObject*)func;
-    PyCFunction meth = f->m_ml->ml_meth;
-    Py_ssize_t size;
-    switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) {
-    case METH_VARARGS:
-        if (likely(kw == NULL || PyDict_Size(kw) == 0))
-            return (*meth)(self, arg);
-        break;
-    case METH_VARARGS | METH_KEYWORDS:
-        return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw);
-    case METH_NOARGS:
-        if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
-            size = PyTuple_GET_SIZE(arg);
-            if (likely(size == 0))
-                return (*meth)(self, NULL);
-            PyErr_Format(PyExc_TypeError,
-                "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)",
-                f->m_ml->ml_name, size);
-            return NULL;
-        }
-        break;
-    case METH_O:
-        if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
-            size = PyTuple_GET_SIZE(arg);
-            if (likely(size == 1)) {
-                PyObject *result, *arg0;
-                #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-                arg0 = PyTuple_GET_ITEM(arg, 0);
-                #else
-                arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL;
-                #endif
-                result = (*meth)(self, arg0);
-                #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
-                Py_DECREF(arg0);
-                #endif
-                return result;
-            }
-            PyErr_Format(PyExc_TypeError,
-                "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)",
-                f->m_ml->ml_name, size);
-            return NULL;
-        }
-        break;
-    default:
-        PyErr_SetString(PyExc_SystemError, "Bad call flags in "
-                        "__Pyx_CyFunction_Call. METH_OLDARGS is no "
-                        "longer supported!");
-        return NULL;
-    }
-    PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments",
-                 f->m_ml->ml_name);
-    return NULL;
-}
-static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) {
-    return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw);
-}
-static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) {
-    PyObject *result;
-    __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func;
-    if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) {
-        Py_ssize_t argc;
-        PyObject *new_args;
-        PyObject *self;
-        argc = PyTuple_GET_SIZE(args);
-        new_args = PyTuple_GetSlice(args, 1, argc);
-        if (unlikely(!new_args))
-            return NULL;
-        self = PyTuple_GetItem(args, 0);
-        if (unlikely(!self)) {
-            Py_DECREF(new_args);
-            return NULL;
-        }
-        result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw);
-        Py_DECREF(new_args);
-    } else {
-        result = __Pyx_CyFunction_Call(func, args, kw);
-    }
-    return result;
-}
-static PyTypeObject __pyx_CyFunctionType_type = {
-    PyVarObject_HEAD_INIT(0, 0)
-    "cython_function_or_method",
-    sizeof(__pyx_CyFunctionObject),
-    0,
-    (destructor) __Pyx_CyFunction_dealloc,
-    0,
-    0,
-    0,
-#if PY_MAJOR_VERSION < 3
-    0,
-#else
-    0,
-#endif
-    (reprfunc) __Pyx_CyFunction_repr,
-    0,
-    0,
-    0,
-    0,
-    __Pyx_CyFunction_CallAsMethod,
-    0,
-    0,
-    0,
-    0,
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
-    0,
-    (traverseproc) __Pyx_CyFunction_traverse,
-    (inquiry) __Pyx_CyFunction_clear,
-    0,
-#if PY_VERSION_HEX < 0x030500A0
-    offsetof(__pyx_CyFunctionObject, func_weakreflist),
-#else
-    offsetof(PyCFunctionObject, m_weakreflist),
-#endif
-    0,
-    0,
-    __pyx_CyFunction_methods,
-    __pyx_CyFunction_members,
-    __pyx_CyFunction_getsets,
-    0,
-    0,
-    __Pyx_CyFunction_descr_get,
-    0,
-    offsetof(__pyx_CyFunctionObject, func_dict),
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-#if PY_VERSION_HEX >= 0x030400a1
-    0,
-#endif
-#if PY_VERSION_HEX >= 0x030800b1
-    0,
-#endif
-#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
-    0,
-#endif
-};
-static int __pyx_CyFunction_init(void) {
-    __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type);
-    if (unlikely(__pyx_CyFunctionType == NULL)) {
-        return -1;
-    }
-    return 0;
-}
-static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) {
-    __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
-    m->defaults = PyObject_Malloc(size);
-    if (unlikely(!m->defaults))
-        return PyErr_NoMemory();
-    memset(m->defaults, 0, size);
-    m->defaults_pyobjects = pyobjects;
-    m->defaults_size = size;
-    return m->defaults;
-}
-static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) {
-    __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
-    m->defaults_tuple = tuple;
-    Py_INCREF(tuple);
-}
-static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) {
-    __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
-    m->defaults_kwdict = dict;
-    Py_INCREF(dict);
-}
-static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) {
-    __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
-    m->func_annotations = dict;
-    Py_INCREF(dict);
-}
-
-/* CythonFunction */
-static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname,
-                                      PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) {
-    PyObject *op = __Pyx_CyFunction_Init(
-        PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType),
-        ml, flags, qualname, closure, module, globals, code
-    );
-    if (likely(op)) {
-        PyObject_GC_Track(op);
-    }
-    return op;
-}
-
-/* ClassMethod */
-static PyObject* __Pyx_Method_ClassMethod(PyObject *method) {
-#if CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM <= 0x05080000
-    if (PyObject_TypeCheck(method, &PyWrapperDescr_Type)) {
-        return PyClassMethod_New(method);
-    }
-#else
-#if CYTHON_COMPILING_IN_PYSTON || CYTHON_COMPILING_IN_PYPY
-    if (PyMethodDescr_Check(method))
-#else
-    #if PY_MAJOR_VERSION == 2
-    static PyTypeObject *methoddescr_type = NULL;
-    if (methoddescr_type == NULL) {
-       PyObject *meth = PyObject_GetAttrString((PyObject*)&PyList_Type, "append");
-       if (!meth) return NULL;
-       methoddescr_type = Py_TYPE(meth);
-       Py_DECREF(meth);
-    }
-    #else
-    PyTypeObject *methoddescr_type = &PyMethodDescr_Type;
-    #endif
-    if (__Pyx_TypeCheck(method, methoddescr_type))
-#endif
-    {
-        PyMethodDescrObject *descr = (PyMethodDescrObject *)method;
-        #if PY_VERSION_HEX < 0x03020000
-        PyTypeObject *d_type = descr->d_type;
-        #else
-        PyTypeObject *d_type = descr->d_common.d_type;
-        #endif
-        return PyDescr_NewClassMethod(d_type, descr->d_method);
-    }
-#endif
-    else if (PyMethod_Check(method)) {
-        return PyClassMethod_New(PyMethod_GET_FUNCTION(method));
-    }
-    else {
-        return PyClassMethod_New(method);
-    }
-}
-
-/* Py3ClassCreate */
-static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name,
-                                           PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) {
-    PyObject *ns;
-    if (metaclass) {
-        PyObject *prep = __Pyx_PyObject_GetAttrStr(metaclass, __pyx_n_s_prepare);
-        if (prep) {
-            PyObject *pargs = PyTuple_Pack(2, name, bases);
-            if (unlikely(!pargs)) {
-                Py_DECREF(prep);
-                return NULL;
-            }
-            ns = PyObject_Call(prep, pargs, mkw);
-            Py_DECREF(prep);
-            Py_DECREF(pargs);
-        } else {
-            if (unlikely(!PyErr_ExceptionMatches(PyExc_AttributeError)))
-                return NULL;
-            PyErr_Clear();
-            ns = PyDict_New();
-        }
-    } else {
-        ns = PyDict_New();
-    }
-    if (unlikely(!ns))
-        return NULL;
-    if (unlikely(PyObject_SetItem(ns, __pyx_n_s_module, modname) < 0)) goto bad;
-    if (unlikely(PyObject_SetItem(ns, __pyx_n_s_qualname, qualname) < 0)) goto bad;
-    if (unlikely(doc && PyObject_SetItem(ns, __pyx_n_s_doc, doc) < 0)) goto bad;
-    return ns;
-bad:
-    Py_DECREF(ns);
-    return NULL;
-}
-static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases,
-                                      PyObject *dict, PyObject *mkw,
-                                      int calculate_metaclass, int allow_py2_metaclass) {
-    PyObject *result, *margs;
-    PyObject *owned_metaclass = NULL;
-    if (allow_py2_metaclass) {
-        owned_metaclass = PyObject_GetItem(dict, __pyx_n_s_metaclass);
-        if (owned_metaclass) {
-            metaclass = owned_metaclass;
-        } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) {
-            PyErr_Clear();
-        } else {
-            return NULL;
-        }
-    }
-    if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) {
-        metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases);
-        Py_XDECREF(owned_metaclass);
-        if (unlikely(!metaclass))
-            return NULL;
-        owned_metaclass = metaclass;
-    }
-    margs = PyTuple_Pack(3, name, bases, dict);
-    if (unlikely(!margs)) {
-        result = NULL;
-    } else {
-        result = PyObject_Call(metaclass, margs, mkw);
-        Py_DECREF(margs);
-    }
-    Py_XDECREF(owned_metaclass);
-    return result;
-}
-
-/* CLineInTraceback */
-#ifndef CYTHON_CLINE_IN_TRACEBACK
-static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
-    PyObject *use_cline;
-    PyObject *ptype, *pvalue, *ptraceback;
-#if CYTHON_COMPILING_IN_CPYTHON
-    PyObject **cython_runtime_dict;
-#endif
-    if (unlikely(!__pyx_cython_runtime)) {
-        return c_line;
-    }
-    __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
-#if CYTHON_COMPILING_IN_CPYTHON
-    cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
-    if (likely(cython_runtime_dict)) {
-        __PYX_PY_DICT_LOOKUP_IF_MODIFIED(
-            use_cline, *cython_runtime_dict,
-            __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
-    } else
-#endif
-    {
-      PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
-      if (use_cline_obj) {
-        use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
-        Py_DECREF(use_cline_obj);
-      } else {
-        PyErr_Clear();
-        use_cline = NULL;
-      }
-    }
-    if (!use_cline) {
-        c_line = 0;
-        PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
-    }
-    else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
-        c_line = 0;
-    }
-    __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
-    return c_line;
-}
-#endif
-
-/* CodeObjectCache */
-static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
-    int start = 0, mid = 0, end = count - 1;
-    if (end >= 0 && code_line > entries[end].code_line) {
-        return count;
-    }
-    while (start < end) {
-        mid = start + (end - start) / 2;
-        if (code_line < entries[mid].code_line) {
-            end = mid;
-        } else if (code_line > entries[mid].code_line) {
-             start = mid + 1;
-        } else {
-            return mid;
-        }
-    }
-    if (code_line <= entries[mid].code_line) {
-        return mid;
-    } else {
-        return mid + 1;
-    }
-}
-static PyCodeObject *__pyx_find_code_object(int code_line) {
-    PyCodeObject* code_object;
-    int pos;
-    if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
-        return NULL;
-    }
-    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
-    if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
-        return NULL;
-    }
-    code_object = __pyx_code_cache.entries[pos].code_object;
-    Py_INCREF(code_object);
-    return code_object;
-}
-static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
-    int pos, i;
-    __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
-    if (unlikely(!code_line)) {
-        return;
-    }
-    if (unlikely(!entries)) {
-        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
-        if (likely(entries)) {
-            __pyx_code_cache.entries = entries;
-            __pyx_code_cache.max_count = 64;
-            __pyx_code_cache.count = 1;
-            entries[0].code_line = code_line;
-            entries[0].code_object = code_object;
-            Py_INCREF(code_object);
-        }
-        return;
-    }
-    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
-    if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
-        PyCodeObject* tmp = entries[pos].code_object;
-        entries[pos].code_object = code_object;
-        Py_DECREF(tmp);
-        return;
-    }
-    if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
-        int new_max = __pyx_code_cache.max_count + 64;
-        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
-            __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
-        if (unlikely(!entries)) {
-            return;
-        }
-        __pyx_code_cache.entries = entries;
-        __pyx_code_cache.max_count = new_max;
-    }
-    for (i=__pyx_code_cache.count; i>pos; i--) {
-        entries[i] = entries[i-1];
-    }
-    entries[pos].code_line = code_line;
-    entries[pos].code_object = code_object;
-    __pyx_code_cache.count++;
-    Py_INCREF(code_object);
-}
-
-/* AddTraceback */
-#include "compile.h"
-#include "frameobject.h"
-#include "traceback.h"
-static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
-            const char *funcname, int c_line,
-            int py_line, const char *filename) {
-    PyCodeObject *py_code = 0;
-    PyObject *py_srcfile = 0;
-    PyObject *py_funcname = 0;
-    #if PY_MAJOR_VERSION < 3
-    py_srcfile = PyString_FromString(filename);
-    #else
-    py_srcfile = PyUnicode_FromString(filename);
-    #endif
-    if (!py_srcfile) goto bad;
-    if (c_line) {
-        #if PY_MAJOR_VERSION < 3
-        py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
-        #else
-        py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
-        #endif
-    }
-    else {
-        #if PY_MAJOR_VERSION < 3
-        py_funcname = PyString_FromString(funcname);
-        #else
-        py_funcname = PyUnicode_FromString(funcname);
-        #endif
-    }
-    if (!py_funcname) goto bad;
-    py_code = __Pyx_PyCode_New(
-        0,
-        0,
-        0,
-        0,
-        0,
-        __pyx_empty_bytes, /*PyObject *code,*/
-        __pyx_empty_tuple, /*PyObject *consts,*/
-        __pyx_empty_tuple, /*PyObject *names,*/
-        __pyx_empty_tuple, /*PyObject *varnames,*/
-        __pyx_empty_tuple, /*PyObject *freevars,*/
-        __pyx_empty_tuple, /*PyObject *cellvars,*/
-        py_srcfile,   /*PyObject *filename,*/
-        py_funcname,  /*PyObject *name,*/
-        py_line,
-        __pyx_empty_bytes  /*PyObject *lnotab*/
-    );
-    Py_DECREF(py_srcfile);
-    Py_DECREF(py_funcname);
-    return py_code;
-bad:
-    Py_XDECREF(py_srcfile);
-    Py_XDECREF(py_funcname);
-    return NULL;
-}
-static void __Pyx_AddTraceback(const char *funcname, int c_line,
-                               int py_line, const char *filename) {
-    PyCodeObject *py_code = 0;
-    PyFrameObject *py_frame = 0;
-    PyThreadState *tstate = __Pyx_PyThreadState_Current;
-    if (c_line) {
-        c_line = __Pyx_CLineForTraceback(tstate, c_line);
-    }
-    py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
-    if (!py_code) {
-        py_code = __Pyx_CreateCodeObjectForTraceback(
-            funcname, c_line, py_line, filename);
-        if (!py_code) goto bad;
-        __pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
-    }
-    py_frame = PyFrame_New(
-        tstate,            /*PyThreadState *tstate,*/
-        py_code,           /*PyCodeObject *code,*/
-        __pyx_d,    /*PyObject *globals,*/
-        0                  /*PyObject *locals*/
-    );
-    if (!py_frame) goto bad;
-    __Pyx_PyFrame_SetLineNumber(py_frame, py_line);
-    PyTraceBack_Here(py_frame);
-bad:
-    Py_XDECREF(py_code);
-    Py_XDECREF(py_frame);
-}
-
-#if PY_MAJOR_VERSION < 3
-static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
-    if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
-        if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
-        if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
-    PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
-    return -1;
-}
-static void __Pyx_ReleaseBuffer(Py_buffer *view) {
-    PyObject *obj = view->obj;
-    if (!obj) return;
-    if (PyObject_CheckBuffer(obj)) {
-        PyBuffer_Release(view);
-        return;
-    }
-    if ((0)) {}
-    view->obj = NULL;
-    Py_DECREF(obj);
-}
-#endif
-
-
-/* MemviewSliceIsContig */
-static int
-__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
-{
-    int i, index, step, start;
-    Py_ssize_t itemsize = mvs.memview->view.itemsize;
-    if (order == 'F') {
-        step = 1;
-        start = 0;
-    } else {
-        step = -1;
-        start = ndim - 1;
-    }
-    for (i = 0; i < ndim; i++) {
-        index = start + step * i;
-        if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
-            return 0;
-        itemsize *= mvs.shape[index];
-    }
-    return 1;
-}
-
-/* OverlappingSlices */
-static void
-__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
-                               void **out_start, void **out_end,
-                               int ndim, size_t itemsize)
-{
-    char *start, *end;
-    int i;
-    start = end = slice->data;
-    for (i = 0; i < ndim; i++) {
-        Py_ssize_t stride = slice->strides[i];
-        Py_ssize_t extent = slice->shape[i];
-        if (extent == 0) {
-            *out_start = *out_end = start;
-            return;
-        } else {
-            if (stride > 0)
-                end += stride * (extent - 1);
-            else
-                start += stride * (extent - 1);
-        }
-    }
-    *out_start = start;
-    *out_end = end + itemsize;
-}
-static int
-__pyx_slices_overlap(__Pyx_memviewslice *slice1,
-                     __Pyx_memviewslice *slice2,
-                     int ndim, size_t itemsize)
-{
-    void *start1, *end1, *start2, *end2;
-    __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
-    __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
-    return (start1 < end2) && (start2 < end1);
-}
-
-/* Capsule */
-static CYTHON_INLINE PyObject *
-__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
-{
-    PyObject *cobj;
-#if PY_VERSION_HEX >= 0x02070000
-    cobj = PyCapsule_New(p, sig, NULL);
-#else
-    cobj = PyCObject_FromVoidPtr(p, NULL);
-#endif
-    return cobj;
-}
-
-/* CIntFromPyVerify */
-#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
-    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
-#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
-    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
-#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
-    {\
-        func_type value = func_value;\
-        if (sizeof(target_type) < sizeof(func_type)) {\
-            if (unlikely(value != (func_type) (target_type) value)) {\
-                func_type zero = 0;\
-                if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
-                    return (target_type) -1;\
-                if (is_unsigned && unlikely(value < zero))\
-                    goto raise_neg_overflow;\
-                else\
-                    goto raise_overflow;\
-            }\
-        }\
-        return (target_type) value;\
-    }
-
-/* MemviewDtypeToObject */
-static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) {
-    return (PyObject *) PyFloat_FromDouble(*(double *) itemp);
-}
-static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) {
-    double value = __pyx_PyFloat_AsDouble(obj);
-    if ((value == (double)-1) && PyErr_Occurred())
-        return 0;
-    *(double *) itemp = value;
-    return 1;
-}
-
-/* Declarations */
-#if CYTHON_CCOMPLEX
-  #ifdef __cplusplus
-    static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
-      return ::std::complex< float >(x, y);
-    }
-  #else
-    static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
-      return x + y*(__pyx_t_float_complex)_Complex_I;
-    }
-  #endif
-#else
-    static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
-      __pyx_t_float_complex z;
-      z.real = x;
-      z.imag = y;
-      return z;
-    }
-#endif
-
-/* Arithmetic */
-#if CYTHON_CCOMPLEX
-#else
-    static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
-       return (a.real == b.real) && (a.imag == b.imag);
-    }
-    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
-        __pyx_t_float_complex z;
-        z.real = a.real + b.real;
-        z.imag = a.imag + b.imag;
-        return z;
-    }
-    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
-        __pyx_t_float_complex z;
-        z.real = a.real - b.real;
-        z.imag = a.imag - b.imag;
-        return z;
-    }
-    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
-        __pyx_t_float_complex z;
-        z.real = a.real * b.real - a.imag * b.imag;
-        z.imag = a.real * b.imag + a.imag * b.real;
-        return z;
-    }
-    #if 1
-    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
-        if (b.imag == 0) {
-            return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
-        } else if (fabsf(b.real) >= fabsf(b.imag)) {
-            if (b.real == 0 && b.imag == 0) {
-                return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag);
-            } else {
-                float r = b.imag / b.real;
-                float s = (float)(1.0) / (b.real + b.imag * r);
-                return __pyx_t_float_complex_from_parts(
-                    (a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
-            }
-        } else {
-            float r = b.real / b.imag;
-            float s = (float)(1.0) / (b.imag + b.real * r);
-            return __pyx_t_float_complex_from_parts(
-                (a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
-        }
-    }
-    #else
-    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
-        if (b.imag == 0) {
-            return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
-        } else {
-            float denom = b.real * b.real + b.imag * b.imag;
-            return __pyx_t_float_complex_from_parts(
-                (a.real * b.real + a.imag * b.imag) / denom,
-                (a.imag * b.real - a.real * b.imag) / denom);
-        }
-    }
-    #endif
-    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) {
-        __pyx_t_float_complex z;
-        z.real = -a.real;
-        z.imag = -a.imag;
-        return z;
-    }
-    static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) {
-       return (a.real == 0) && (a.imag == 0);
-    }
-    static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) {
-        __pyx_t_float_complex z;
-        z.real =  a.real;
-        z.imag = -a.imag;
-        return z;
-    }
-    #if 1
-        static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) {
-          #if !defined(HAVE_HYPOT) || defined(_MSC_VER)
-            return sqrtf(z.real*z.real + z.imag*z.imag);
-          #else
-            return hypotf(z.real, z.imag);
-          #endif
-        }
-        static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
-            __pyx_t_float_complex z;
-            float r, lnr, theta, z_r, z_theta;
-            if (b.imag == 0 && b.real == (int)b.real) {
-                if (b.real < 0) {
-                    float denom = a.real * a.real + a.imag * a.imag;
-                    a.real = a.real / denom;
-                    a.imag = -a.imag / denom;
-                    b.real = -b.real;
-                }
-                switch ((int)b.real) {
-                    case 0:
-                        z.real = 1;
-                        z.imag = 0;
-                        return z;
-                    case 1:
-                        return a;
-                    case 2:
-                        return __Pyx_c_prod_float(a, a);
-                    case 3:
-                        z = __Pyx_c_prod_float(a, a);
-                        return __Pyx_c_prod_float(z, a);
-                    case 4:
-                        z = __Pyx_c_prod_float(a, a);
-                        return __Pyx_c_prod_float(z, z);
-                }
-            }
-            if (a.imag == 0) {
-                if (a.real == 0) {
-                    return a;
-                } else if (b.imag == 0) {
-                    z.real = powf(a.real, b.real);
-                    z.imag = 0;
-                    return z;
-                } else if (a.real > 0) {
-                    r = a.real;
-                    theta = 0;
-                } else {
-                    r = -a.real;
-                    theta = atan2f(0.0, -1.0);
-                }
-            } else {
-                r = __Pyx_c_abs_float(a);
-                theta = atan2f(a.imag, a.real);
-            }
-            lnr = logf(r);
-            z_r = expf(lnr * b.real - theta * b.imag);
-            z_theta = theta * b.real + lnr * b.imag;
-            z.real = z_r * cosf(z_theta);
-            z.imag = z_r * sinf(z_theta);
-            return z;
-        }
-    #endif
-#endif
-
-/* Declarations */
-#if CYTHON_CCOMPLEX
-  #ifdef __cplusplus
-    static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
-      return ::std::complex< double >(x, y);
-    }
-  #else
-    static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
-      return x + y*(__pyx_t_double_complex)_Complex_I;
-    }
-  #endif
-#else
-    static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
-      __pyx_t_double_complex z;
-      z.real = x;
-      z.imag = y;
-      return z;
-    }
-#endif
-
-/* Arithmetic */
-#if CYTHON_CCOMPLEX
-#else
-    static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
-       return (a.real == b.real) && (a.imag == b.imag);
-    }
-    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
-        __pyx_t_double_complex z;
-        z.real = a.real + b.real;
-        z.imag = a.imag + b.imag;
-        return z;
-    }
-    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
-        __pyx_t_double_complex z;
-        z.real = a.real - b.real;
-        z.imag = a.imag - b.imag;
-        return z;
-    }
-    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
-        __pyx_t_double_complex z;
-        z.real = a.real * b.real - a.imag * b.imag;
-        z.imag = a.real * b.imag + a.imag * b.real;
-        return z;
-    }
-    #if 1
-    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
-        if (b.imag == 0) {
-            return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
-        } else if (fabs(b.real) >= fabs(b.imag)) {
-            if (b.real == 0 && b.imag == 0) {
-                return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag);
-            } else {
-                double r = b.imag / b.real;
-                double s = (double)(1.0) / (b.real + b.imag * r);
-                return __pyx_t_double_complex_from_parts(
-                    (a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
-            }
-        } else {
-            double r = b.real / b.imag;
-            double s = (double)(1.0) / (b.imag + b.real * r);
-            return __pyx_t_double_complex_from_parts(
-                (a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
-        }
-    }
-    #else
-    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
-        if (b.imag == 0) {
-            return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
-        } else {
-            double denom = b.real * b.real + b.imag * b.imag;
-            return __pyx_t_double_complex_from_parts(
-                (a.real * b.real + a.imag * b.imag) / denom,
-                (a.imag * b.real - a.real * b.imag) / denom);
-        }
-    }
-    #endif
-    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) {
-        __pyx_t_double_complex z;
-        z.real = -a.real;
-        z.imag = -a.imag;
-        return z;
-    }
-    static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) {
-       return (a.real == 0) && (a.imag == 0);
-    }
-    static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) {
-        __pyx_t_double_complex z;
-        z.real =  a.real;
-        z.imag = -a.imag;
-        return z;
-    }
-    #if 1
-        static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) {
-          #if !defined(HAVE_HYPOT) || defined(_MSC_VER)
-            return sqrt(z.real*z.real + z.imag*z.imag);
-          #else
-            return hypot(z.real, z.imag);
-          #endif
-        }
-        static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
-            __pyx_t_double_complex z;
-            double r, lnr, theta, z_r, z_theta;
-            if (b.imag == 0 && b.real == (int)b.real) {
-                if (b.real < 0) {
-                    double denom = a.real * a.real + a.imag * a.imag;
-                    a.real = a.real / denom;
-                    a.imag = -a.imag / denom;
-                    b.real = -b.real;
-                }
-                switch ((int)b.real) {
-                    case 0:
-                        z.real = 1;
-                        z.imag = 0;
-                        return z;
-                    case 1:
-                        return a;
-                    case 2:
-                        return __Pyx_c_prod_double(a, a);
-                    case 3:
-                        z = __Pyx_c_prod_double(a, a);
-                        return __Pyx_c_prod_double(z, a);
-                    case 4:
-                        z = __Pyx_c_prod_double(a, a);
-                        return __Pyx_c_prod_double(z, z);
-                }
-            }
-            if (a.imag == 0) {
-                if (a.real == 0) {
-                    return a;
-                } else if (b.imag == 0) {
-                    z.real = pow(a.real, b.real);
-                    z.imag = 0;
-                    return z;
-                } else if (a.real > 0) {
-                    r = a.real;
-                    theta = 0;
-                } else {
-                    r = -a.real;
-                    theta = atan2(0.0, -1.0);
-                }
-            } else {
-                r = __Pyx_c_abs_double(a);
-                theta = atan2(a.imag, a.real);
-            }
-            lnr = log(r);
-            z_r = exp(lnr * b.real - theta * b.imag);
-            z_theta = theta * b.real + lnr * b.imag;
-            z.real = z_r * cos(z_theta);
-            z.imag = z_r * sin(z_theta);
-            return z;
-        }
-    #endif
-#endif
-
-/* MemviewSliceCopyTemplate */
-static __Pyx_memviewslice
-__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
-                                 const char *mode, int ndim,
-                                 size_t sizeof_dtype, int contig_flag,
-                                 int dtype_is_object)
-{
-    __Pyx_RefNannyDeclarations
-    int i;
-    __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
-    struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
-    Py_buffer *buf = &from_memview->view;
-    PyObject *shape_tuple = NULL;
-    PyObject *temp_int = NULL;
-    struct __pyx_array_obj *array_obj = NULL;
-    struct __pyx_memoryview_obj *memview_obj = NULL;
-    __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
-    for (i = 0; i < ndim; i++) {
-        if (unlikely(from_mvs->suboffsets[i] >= 0)) {
-            PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
-                                           "indirect dimensions (axis %d)", i);
-            goto fail;
-        }
-    }
-    shape_tuple = PyTuple_New(ndim);
-    if (unlikely(!shape_tuple)) {
-        goto fail;
-    }
-    __Pyx_GOTREF(shape_tuple);
-    for(i = 0; i < ndim; i++) {
-        temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
-        if(unlikely(!temp_int)) {
-            goto fail;
-        } else {
-            PyTuple_SET_ITEM(shape_tuple, i, temp_int);
-            temp_int = NULL;
-        }
-    }
-    array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
-    if (unlikely(!array_obj)) {
-        goto fail;
-    }
-    __Pyx_GOTREF(array_obj);
-    memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
-                                    (PyObject *) array_obj, contig_flag,
-                                    dtype_is_object,
-                                    from_mvs->memview->typeinfo);
-    if (unlikely(!memview_obj))
-        goto fail;
-    if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
-        goto fail;
-    if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
-                                                dtype_is_object) < 0))
-        goto fail;
-    goto no_fail;
-fail:
-    __Pyx_XDECREF(new_mvs.memview);
-    new_mvs.memview = NULL;
-    new_mvs.data = NULL;
-no_fail:
-    __Pyx_XDECREF(shape_tuple);
-    __Pyx_XDECREF(temp_int);
-    __Pyx_XDECREF(array_obj);
-    __Pyx_RefNannyFinishContext();
-    return new_mvs;
-}
-
-/* CIntToPy */
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion"
-#endif
-    const long neg_one = (long) -1, const_zero = (long) 0;
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic pop
-#endif
-    const int is_unsigned = neg_one > const_zero;
-    if (is_unsigned) {
-        if (sizeof(long) < sizeof(long)) {
-            return PyInt_FromLong((long) value);
-        } else if (sizeof(long) <= sizeof(unsigned long)) {
-            return PyLong_FromUnsignedLong((unsigned long) value);
-#ifdef HAVE_LONG_LONG
-        } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
-            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
-#endif
-        }
-    } else {
-        if (sizeof(long) <= sizeof(long)) {
-            return PyInt_FromLong((long) value);
-#ifdef HAVE_LONG_LONG
-        } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
-            return PyLong_FromLongLong((PY_LONG_LONG) value);
-#endif
-        }
-    }
-    {
-        int one = 1; int little = (int)*(unsigned char *)&one;
-        unsigned char *bytes = (unsigned char *)&value;
-        return _PyLong_FromByteArray(bytes, sizeof(long),
-                                     little, !is_unsigned);
-    }
-}
-
-/* CIntFromPy */
-static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) {
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion"
-#endif
-    const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0;
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic pop
-#endif
-    const int is_unsigned = neg_one > const_zero;
-#if PY_MAJOR_VERSION < 3
-    if (likely(PyInt_Check(x))) {
-        if (sizeof(unsigned int) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x))
-        } else {
-            long val = PyInt_AS_LONG(x);
-            if (is_unsigned && unlikely(val < 0)) {
-                goto raise_neg_overflow;
-            }
-            return (unsigned int) val;
-        }
-    } else
-#endif
-    if (likely(PyLong_Check(x))) {
-        if (is_unsigned) {
-#if CYTHON_USE_PYLONG_INTERNALS
-            const digit* digits = ((PyLongObject*)x)->ob_digit;
-            switch (Py_SIZE(x)) {
-                case  0: return (unsigned int) 0;
-                case  1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0])
-                case 2:
-                    if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) {
-                            return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]));
-                        }
-                    }
-                    break;
-                case 3:
-                    if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) {
-                            return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]));
-                        }
-                    }
-                    break;
-                case 4:
-                    if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) {
-                            return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]));
-                        }
-                    }
-                    break;
-            }
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON
-            if (unlikely(Py_SIZE(x) < 0)) {
-                goto raise_neg_overflow;
-            }
-#else
-            {
-                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
-                if (unlikely(result < 0))
-                    return (unsigned int) -1;
-                if (unlikely(result == 1))
-                    goto raise_neg_overflow;
-            }
-#endif
-            if (sizeof(unsigned int) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x))
-#ifdef HAVE_LONG_LONG
-            } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
-#endif
-            }
-        } else {
-#if CYTHON_USE_PYLONG_INTERNALS
-            const digit* digits = ((PyLongObject*)x)->ob_digit;
-            switch (Py_SIZE(x)) {
-                case  0: return (unsigned int) 0;
-                case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0]))
-                case  1: __PYX_VERIFY_RETURN_INT(unsigned int,  digit, +digits[0])
-                case -2:
-                    if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) {
-                            return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
-                        }
-                    }
-                    break;
-                case 2:
-                    if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) {
-                            return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
-                        }
-                    }
-                    break;
-                case -3:
-                    if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) {
-                            return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
-                        }
-                    }
-                    break;
-                case 3:
-                    if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) {
-                            return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
-                        }
-                    }
-                    break;
-                case -4:
-                    if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) {
-                            return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
-                        }
-                    }
-                    break;
-                case 4:
-                    if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) {
-                            return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
-                        }
-                    }
-                    break;
-            }
-#endif
-            if (sizeof(unsigned int) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x))
-#ifdef HAVE_LONG_LONG
-            } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x))
-#endif
-            }
-        }
-        {
-#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
-            PyErr_SetString(PyExc_RuntimeError,
-                            "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
-#else
-            unsigned int val;
-            PyObject *v = __Pyx_PyNumber_IntOrLong(x);
- #if PY_MAJOR_VERSION < 3
-            if (likely(v) && !PyLong_Check(v)) {
-                PyObject *tmp = v;
-                v = PyNumber_Long(tmp);
-                Py_DECREF(tmp);
-            }
- #endif
-            if (likely(v)) {
-                int one = 1; int is_little = (int)*(unsigned char *)&one;
-                unsigned char *bytes = (unsigned char *)&val;
-                int ret = _PyLong_AsByteArray((PyLongObject *)v,
-                                              bytes, sizeof(val),
-                                              is_little, !is_unsigned);
-                Py_DECREF(v);
-                if (likely(!ret))
-                    return val;
-            }
-#endif
-            return (unsigned int) -1;
-        }
-    } else {
-        unsigned int val;
-        PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
-        if (!tmp) return (unsigned int) -1;
-        val = __Pyx_PyInt_As_unsigned_int(tmp);
-        Py_DECREF(tmp);
-        return val;
-    }
-raise_overflow:
-    PyErr_SetString(PyExc_OverflowError,
-        "value too large to convert to unsigned int");
-    return (unsigned int) -1;
-raise_neg_overflow:
-    PyErr_SetString(PyExc_OverflowError,
-        "can't convert negative value to unsigned int");
-    return (unsigned int) -1;
-}
-
-/* CIntToPy */
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value) {
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion"
-#endif
-    const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0;
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic pop
-#endif
-    const int is_unsigned = neg_one > const_zero;
-    if (is_unsigned) {
-        if (sizeof(unsigned int) < sizeof(long)) {
-            return PyInt_FromLong((long) value);
-        } else if (sizeof(unsigned int) <= sizeof(unsigned long)) {
-            return PyLong_FromUnsignedLong((unsigned long) value);
-#ifdef HAVE_LONG_LONG
-        } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) {
-            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
-#endif
-        }
-    } else {
-        if (sizeof(unsigned int) <= sizeof(long)) {
-            return PyInt_FromLong((long) value);
-#ifdef HAVE_LONG_LONG
-        } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) {
-            return PyLong_FromLongLong((PY_LONG_LONG) value);
-#endif
-        }
-    }
-    {
-        int one = 1; int little = (int)*(unsigned char *)&one;
-        unsigned char *bytes = (unsigned char *)&value;
-        return _PyLong_FromByteArray(bytes, sizeof(unsigned int),
-                                     little, !is_unsigned);
-    }
-}
-
-/* CIntToPy */
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_uint32(npy_uint32 value) {
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion"
-#endif
-    const npy_uint32 neg_one = (npy_uint32) -1, const_zero = (npy_uint32) 0;
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic pop
-#endif
-    const int is_unsigned = neg_one > const_zero;
-    if (is_unsigned) {
-        if (sizeof(npy_uint32) < sizeof(long)) {
-            return PyInt_FromLong((long) value);
-        } else if (sizeof(npy_uint32) <= sizeof(unsigned long)) {
-            return PyLong_FromUnsignedLong((unsigned long) value);
-#ifdef HAVE_LONG_LONG
-        } else if (sizeof(npy_uint32) <= sizeof(unsigned PY_LONG_LONG)) {
-            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
-#endif
-        }
-    } else {
-        if (sizeof(npy_uint32) <= sizeof(long)) {
-            return PyInt_FromLong((long) value);
-#ifdef HAVE_LONG_LONG
-        } else if (sizeof(npy_uint32) <= sizeof(PY_LONG_LONG)) {
-            return PyLong_FromLongLong((PY_LONG_LONG) value);
-#endif
-        }
-    }
-    {
-        int one = 1; int little = (int)*(unsigned char *)&one;
-        unsigned char *bytes = (unsigned char *)&value;
-        return _PyLong_FromByteArray(bytes, sizeof(npy_uint32),
-                                     little, !is_unsigned);
-    }
-}
-
-/* CIntFromPy */
-static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion"
-#endif
-    const int neg_one = (int) -1, const_zero = (int) 0;
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic pop
-#endif
-    const int is_unsigned = neg_one > const_zero;
-#if PY_MAJOR_VERSION < 3
-    if (likely(PyInt_Check(x))) {
-        if (sizeof(int) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
-        } else {
-            long val = PyInt_AS_LONG(x);
-            if (is_unsigned && unlikely(val < 0)) {
-                goto raise_neg_overflow;
-            }
-            return (int) val;
-        }
-    } else
-#endif
-    if (likely(PyLong_Check(x))) {
-        if (is_unsigned) {
-#if CYTHON_USE_PYLONG_INTERNALS
-            const digit* digits = ((PyLongObject*)x)->ob_digit;
-            switch (Py_SIZE(x)) {
-                case  0: return (int) 0;
-                case  1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
-                case 2:
-                    if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
-                            return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
-                        }
-                    }
-                    break;
-                case 3:
-                    if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
-                            return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
-                        }
-                    }
-                    break;
-                case 4:
-                    if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
-                            return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
-                        }
-                    }
-                    break;
-            }
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON
-            if (unlikely(Py_SIZE(x) < 0)) {
-                goto raise_neg_overflow;
-            }
-#else
-            {
-                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
-                if (unlikely(result < 0))
-                    return (int) -1;
-                if (unlikely(result == 1))
-                    goto raise_neg_overflow;
-            }
-#endif
-            if (sizeof(int) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
-#ifdef HAVE_LONG_LONG
-            } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
-#endif
-            }
-        } else {
-#if CYTHON_USE_PYLONG_INTERNALS
-            const digit* digits = ((PyLongObject*)x)->ob_digit;
-            switch (Py_SIZE(x)) {
-                case  0: return (int) 0;
-                case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
-                case  1: __PYX_VERIFY_RETURN_INT(int,  digit, +digits[0])
-                case -2:
-                    if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
-                            return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
-                        }
-                    }
-                    break;
-                case 2:
-                    if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
-                            return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
-                        }
-                    }
-                    break;
-                case -3:
-                    if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
-                            return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
-                        }
-                    }
-                    break;
-                case 3:
-                    if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
-                            return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
-                        }
-                    }
-                    break;
-                case -4:
-                    if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
-                            return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
-                        }
-                    }
-                    break;
-                case 4:
-                    if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
-                            return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
-                        }
-                    }
-                    break;
-            }
-#endif
-            if (sizeof(int) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
-#ifdef HAVE_LONG_LONG
-            } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
-#endif
-            }
-        }
-        {
-#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
-            PyErr_SetString(PyExc_RuntimeError,
-                            "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
-#else
-            int val;
-            PyObject *v = __Pyx_PyNumber_IntOrLong(x);
- #if PY_MAJOR_VERSION < 3
-            if (likely(v) && !PyLong_Check(v)) {
-                PyObject *tmp = v;
-                v = PyNumber_Long(tmp);
-                Py_DECREF(tmp);
-            }
- #endif
-            if (likely(v)) {
-                int one = 1; int is_little = (int)*(unsigned char *)&one;
-                unsigned char *bytes = (unsigned char *)&val;
-                int ret = _PyLong_AsByteArray((PyLongObject *)v,
-                                              bytes, sizeof(val),
-                                              is_little, !is_unsigned);
-                Py_DECREF(v);
-                if (likely(!ret))
-                    return val;
-            }
-#endif
-            return (int) -1;
-        }
-    } else {
-        int val;
-        PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
-        if (!tmp) return (int) -1;
-        val = __Pyx_PyInt_As_int(tmp);
-        Py_DECREF(tmp);
-        return val;
-    }
-raise_overflow:
-    PyErr_SetString(PyExc_OverflowError,
-        "value too large to convert to int");
-    return (int) -1;
-raise_neg_overflow:
-    PyErr_SetString(PyExc_OverflowError,
-        "can't convert negative value to int");
-    return (int) -1;
-}
-
-/* CIntFromPy */
-static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion"
-#endif
-    const long neg_one = (long) -1, const_zero = (long) 0;
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic pop
-#endif
-    const int is_unsigned = neg_one > const_zero;
-#if PY_MAJOR_VERSION < 3
-    if (likely(PyInt_Check(x))) {
-        if (sizeof(long) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
-        } else {
-            long val = PyInt_AS_LONG(x);
-            if (is_unsigned && unlikely(val < 0)) {
-                goto raise_neg_overflow;
-            }
-            return (long) val;
-        }
-    } else
-#endif
-    if (likely(PyLong_Check(x))) {
-        if (is_unsigned) {
-#if CYTHON_USE_PYLONG_INTERNALS
-            const digit* digits = ((PyLongObject*)x)->ob_digit;
-            switch (Py_SIZE(x)) {
-                case  0: return (long) 0;
-                case  1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
-                case 2:
-                    if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
-                            return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
-                        }
-                    }
-                    break;
-                case 3:
-                    if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
-                            return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
-                        }
-                    }
-                    break;
-                case 4:
-                    if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
-                            return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
-                        }
-                    }
-                    break;
-            }
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON
-            if (unlikely(Py_SIZE(x) < 0)) {
-                goto raise_neg_overflow;
-            }
-#else
-            {
-                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
-                if (unlikely(result < 0))
-                    return (long) -1;
-                if (unlikely(result == 1))
-                    goto raise_neg_overflow;
-            }
-#endif
-            if (sizeof(long) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
-#ifdef HAVE_LONG_LONG
-            } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
-#endif
-            }
-        } else {
-#if CYTHON_USE_PYLONG_INTERNALS
-            const digit* digits = ((PyLongObject*)x)->ob_digit;
-            switch (Py_SIZE(x)) {
-                case  0: return (long) 0;
-                case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
-                case  1: __PYX_VERIFY_RETURN_INT(long,  digit, +digits[0])
-                case -2:
-                    if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
-                            return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
-                        }
-                    }
-                    break;
-                case 2:
-                    if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
-                            return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
-                        }
-                    }
-                    break;
-                case -3:
-                    if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
-                            return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
-                        }
-                    }
-                    break;
-                case 3:
-                    if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
-                            return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
-                        }
-                    }
-                    break;
-                case -4:
-                    if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
-                            return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
-                        }
-                    }
-                    break;
-                case 4:
-                    if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
-                            return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
-                        }
-                    }
-                    break;
-            }
-#endif
-            if (sizeof(long) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
-#ifdef HAVE_LONG_LONG
-            } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
-#endif
-            }
-        }
-        {
-#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
-            PyErr_SetString(PyExc_RuntimeError,
-                            "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
-#else
-            long val;
-            PyObject *v = __Pyx_PyNumber_IntOrLong(x);
- #if PY_MAJOR_VERSION < 3
-            if (likely(v) && !PyLong_Check(v)) {
-                PyObject *tmp = v;
-                v = PyNumber_Long(tmp);
-                Py_DECREF(tmp);
-            }
- #endif
-            if (likely(v)) {
-                int one = 1; int is_little = (int)*(unsigned char *)&one;
-                unsigned char *bytes = (unsigned char *)&val;
-                int ret = _PyLong_AsByteArray((PyLongObject *)v,
-                                              bytes, sizeof(val),
-                                              is_little, !is_unsigned);
-                Py_DECREF(v);
-                if (likely(!ret))
-                    return val;
-            }
-#endif
-            return (long) -1;
-        }
-    } else {
-        long val;
-        PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
-        if (!tmp) return (long) -1;
-        val = __Pyx_PyInt_As_long(tmp);
-        Py_DECREF(tmp);
-        return val;
-    }
-raise_overflow:
-    PyErr_SetString(PyExc_OverflowError,
-        "value too large to convert to long");
-    return (long) -1;
-raise_neg_overflow:
-    PyErr_SetString(PyExc_OverflowError,
-        "can't convert negative value to long");
-    return (long) -1;
-}
-
-/* CIntToPy */
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion"
-#endif
-    const int neg_one = (int) -1, const_zero = (int) 0;
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic pop
-#endif
-    const int is_unsigned = neg_one > const_zero;
-    if (is_unsigned) {
-        if (sizeof(int) < sizeof(long)) {
-            return PyInt_FromLong((long) value);
-        } else if (sizeof(int) <= sizeof(unsigned long)) {
-            return PyLong_FromUnsignedLong((unsigned long) value);
-#ifdef HAVE_LONG_LONG
-        } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
-            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
-#endif
-        }
-    } else {
-        if (sizeof(int) <= sizeof(long)) {
-            return PyInt_FromLong((long) value);
-#ifdef HAVE_LONG_LONG
-        } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
-            return PyLong_FromLongLong((PY_LONG_LONG) value);
-#endif
-        }
-    }
-    {
-        int one = 1; int little = (int)*(unsigned char *)&one;
-        unsigned char *bytes = (unsigned char *)&value;
-        return _PyLong_FromByteArray(bytes, sizeof(int),
-                                     little, !is_unsigned);
-    }
-}
-
-/* CIntFromPy */
-static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion"
-#endif
-    const char neg_one = (char) -1, const_zero = (char) 0;
-#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
-#pragma GCC diagnostic pop
-#endif
-    const int is_unsigned = neg_one > const_zero;
-#if PY_MAJOR_VERSION < 3
-    if (likely(PyInt_Check(x))) {
-        if (sizeof(char) < sizeof(long)) {
-            __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
-        } else {
-            long val = PyInt_AS_LONG(x);
-            if (is_unsigned && unlikely(val < 0)) {
-                goto raise_neg_overflow;
-            }
-            return (char) val;
-        }
-    } else
-#endif
-    if (likely(PyLong_Check(x))) {
-        if (is_unsigned) {
-#if CYTHON_USE_PYLONG_INTERNALS
-            const digit* digits = ((PyLongObject*)x)->ob_digit;
-            switch (Py_SIZE(x)) {
-                case  0: return (char) 0;
-                case  1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
-                case 2:
-                    if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
-                            return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
-                        }
-                    }
-                    break;
-                case 3:
-                    if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
-                            return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
-                        }
-                    }
-                    break;
-                case 4:
-                    if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
-                            return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
-                        }
-                    }
-                    break;
-            }
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON
-            if (unlikely(Py_SIZE(x) < 0)) {
-                goto raise_neg_overflow;
-            }
-#else
-            {
-                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
-                if (unlikely(result < 0))
-                    return (char) -1;
-                if (unlikely(result == 1))
-                    goto raise_neg_overflow;
-            }
-#endif
-            if (sizeof(char) <= sizeof(unsigned long)) {
-                __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
-#ifdef HAVE_LONG_LONG
-            } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
-#endif
-            }
-        } else {
-#if CYTHON_USE_PYLONG_INTERNALS
-            const digit* digits = ((PyLongObject*)x)->ob_digit;
-            switch (Py_SIZE(x)) {
-                case  0: return (char) 0;
-                case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
-                case  1: __PYX_VERIFY_RETURN_INT(char,  digit, +digits[0])
-                case -2:
-                    if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
-                            return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
-                        }
-                    }
-                    break;
-                case 2:
-                    if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
-                            return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
-                        }
-                    }
-                    break;
-                case -3:
-                    if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
-                            return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
-                        }
-                    }
-                    break;
-                case 3:
-                    if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
-                            return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
-                        }
-                    }
-                    break;
-                case -4:
-                    if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
-                            return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
-                        }
-                    }
-                    break;
-                case 4:
-                    if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
-                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
-                            __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
-                        } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
-                            return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
-                        }
-                    }
-                    break;
-            }
-#endif
-            if (sizeof(char) <= sizeof(long)) {
-                __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
-#ifdef HAVE_LONG_LONG
-            } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
-                __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
-#endif
-            }
-        }
-        {
-#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
-            PyErr_SetString(PyExc_RuntimeError,
-                            "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
-#else
-            char val;
-            PyObject *v = __Pyx_PyNumber_IntOrLong(x);
- #if PY_MAJOR_VERSION < 3
-            if (likely(v) && !PyLong_Check(v)) {
-                PyObject *tmp = v;
-                v = PyNumber_Long(tmp);
-                Py_DECREF(tmp);
-            }
- #endif
-            if (likely(v)) {
-                int one = 1; int is_little = (int)*(unsigned char *)&one;
-                unsigned char *bytes = (unsigned char *)&val;
-                int ret = _PyLong_AsByteArray((PyLongObject *)v,
-                                              bytes, sizeof(val),
-                                              is_little, !is_unsigned);
-                Py_DECREF(v);
-                if (likely(!ret))
-                    return val;
-            }
-#endif
-            return (char) -1;
-        }
-    } else {
-        char val;
-        PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
-        if (!tmp) return (char) -1;
-        val = __Pyx_PyInt_As_char(tmp);
-        Py_DECREF(tmp);
-        return val;
-    }
-raise_overflow:
-    PyErr_SetString(PyExc_OverflowError,
-        "value too large to convert to char");
-    return (char) -1;
-raise_neg_overflow:
-    PyErr_SetString(PyExc_OverflowError,
-        "can't convert negative value to char");
-    return (char) -1;
-}
-
-/* IsLittleEndian */
-static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
-{
-  union {
-    uint32_t u32;
-    uint8_t u8[4];
-  } S;
-  S.u32 = 0x01020304;
-  return S.u8[0] == 4;
-}
-
-/* BufferFormatCheck */
-static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
-                              __Pyx_BufFmt_StackElem* stack,
-                              __Pyx_TypeInfo* type) {
-  stack[0].field = &ctx->root;
-  stack[0].parent_offset = 0;
-  ctx->root.type = type;
-  ctx->root.name = "buffer dtype";
-  ctx->root.offset = 0;
-  ctx->head = stack;
-  ctx->head->field = &ctx->root;
-  ctx->fmt_offset = 0;
-  ctx->head->parent_offset = 0;
-  ctx->new_packmode = '@';
-  ctx->enc_packmode = '@';
-  ctx->new_count = 1;
-  ctx->enc_count = 0;
-  ctx->enc_type = 0;
-  ctx->is_complex = 0;
-  ctx->is_valid_array = 0;
-  ctx->struct_alignment = 0;
-  while (type->typegroup == 'S') {
-    ++ctx->head;
-    ctx->head->field = type->fields;
-    ctx->head->parent_offset = 0;
-    type = type->fields->type;
-  }
-}
-static int __Pyx_BufFmt_ParseNumber(const char** ts) {
-    int count;
-    const char* t = *ts;
-    if (*t < '0' || *t > '9') {
-      return -1;
-    } else {
-        count = *t++ - '0';
-        while (*t >= '0' && *t <= '9') {
-            count *= 10;
-            count += *t++ - '0';
-        }
-    }
-    *ts = t;
-    return count;
-}
-static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
-    int number = __Pyx_BufFmt_ParseNumber(ts);
-    if (number == -1)
-        PyErr_Format(PyExc_ValueError,\
-                     "Does not understand character buffer dtype format string ('%c')", **ts);
-    return number;
-}
-static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
-  PyErr_Format(PyExc_ValueError,
-               "Unexpected format string character: '%c'", ch);
-}
-static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
-  switch (ch) {
-    case '?': return "'bool'";
-    case 'c': return "'char'";
-    case 'b': return "'signed char'";
-    case 'B': return "'unsigned char'";
-    case 'h': return "'short'";
-    case 'H': return "'unsigned short'";
-    case 'i': return "'int'";
-    case 'I': return "'unsigned int'";
-    case 'l': return "'long'";
-    case 'L': return "'unsigned long'";
-    case 'q': return "'long long'";
-    case 'Q': return "'unsigned long long'";
-    case 'f': return (is_complex ? "'complex float'" : "'float'");
-    case 'd': return (is_complex ? "'complex double'" : "'double'");
-    case 'g': return (is_complex ? "'complex long double'" : "'long double'");
-    case 'T': return "a struct";
-    case 'O': return "Python object";
-    case 'P': return "a pointer";
-    case 's': case 'p': return "a string";
-    case 0: return "end";
-    default: return "unparseable format string";
-  }
-}
-static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
-  switch (ch) {
-    case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
-    case 'h': case 'H': return 2;
-    case 'i': case 'I': case 'l': case 'L': return 4;
-    case 'q': case 'Q': return 8;
-    case 'f': return (is_complex ? 8 : 4);
-    case 'd': return (is_complex ? 16 : 8);
-    case 'g': {
-      PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
-      return 0;
-    }
-    case 'O': case 'P': return sizeof(void*);
-    default:
-      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
-      return 0;
-    }
-}
-static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
-  switch (ch) {
-    case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
-    case 'h': case 'H': return sizeof(short);
-    case 'i': case 'I': return sizeof(int);
-    case 'l': case 'L': return sizeof(long);
-    #ifdef HAVE_LONG_LONG
-    case 'q': case 'Q': return sizeof(PY_LONG_LONG);
-    #endif
-    case 'f': return sizeof(float) * (is_complex ? 2 : 1);
-    case 'd': return sizeof(double) * (is_complex ? 2 : 1);
-    case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
-    case 'O': case 'P': return sizeof(void*);
-    default: {
-      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
-      return 0;
-    }
-  }
-}
-typedef struct { char c; short x; } __Pyx_st_short;
-typedef struct { char c; int x; } __Pyx_st_int;
-typedef struct { char c; long x; } __Pyx_st_long;
-typedef struct { char c; float x; } __Pyx_st_float;
-typedef struct { char c; double x; } __Pyx_st_double;
-typedef struct { char c; long double x; } __Pyx_st_longdouble;
-typedef struct { char c; void *x; } __Pyx_st_void_p;
-#ifdef HAVE_LONG_LONG
-typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
-#endif
-static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
-  switch (ch) {
-    case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
-    case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
-    case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
-    case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
-#ifdef HAVE_LONG_LONG
-    case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
-#endif
-    case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
-    case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
-    case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
-    case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
-    default:
-      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
-      return 0;
-    }
-}
-/* These are for computing the padding at the end of the struct to align
-   on the first member of the struct. This will probably the same as above,
-   but we don't have any guarantees.
- */
-typedef struct { short x; char c; } __Pyx_pad_short;
-typedef struct { int x; char c; } __Pyx_pad_int;
-typedef struct { long x; char c; } __Pyx_pad_long;
-typedef struct { float x; char c; } __Pyx_pad_float;
-typedef struct { double x; char c; } __Pyx_pad_double;
-typedef struct { long double x; char c; } __Pyx_pad_longdouble;
-typedef struct { void *x; char c; } __Pyx_pad_void_p;
-#ifdef HAVE_LONG_LONG
-typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
-#endif
-static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
-  switch (ch) {
-    case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
-    case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
-    case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
-    case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
-#ifdef HAVE_LONG_LONG
-    case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
-#endif
-    case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
-    case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
-    case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
-    case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
-    default:
-      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
-      return 0;
-    }
-}
-static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
-  switch (ch) {
-    case 'c':
-        return 'H';
-    case 'b': case 'h': case 'i':
-    case 'l': case 'q': case 's': case 'p':
-        return 'I';
-    case '?': case 'B': case 'H': case 'I': case 'L': case 'Q':
-        return 'U';
-    case 'f': case 'd': case 'g':
-        return (is_complex ? 'C' : 'R');
-    case 'O':
-        return 'O';
-    case 'P':
-        return 'P';
-    default: {
-      __Pyx_BufFmt_RaiseUnexpectedChar(ch);
-      return 0;
-    }
-  }
-}
-static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
-  if (ctx->head == NULL || ctx->head->field == &ctx->root) {
-    const char* expected;
-    const char* quote;
-    if (ctx->head == NULL) {
-      expected = "end";
-      quote = "";
-    } else {
-      expected = ctx->head->field->type->name;
-      quote = "'";
-    }
-    PyErr_Format(PyExc_ValueError,
-                 "Buffer dtype mismatch, expected %s%s%s but got %s",
-                 quote, expected, quote,
-                 __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
-  } else {
-    __Pyx_StructField* field = ctx->head->field;
-    __Pyx_StructField* parent = (ctx->head - 1)->field;
-    PyErr_Format(PyExc_ValueError,
-                 "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
-                 field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
-                 parent->type->name, field->name);
-  }
-}
-static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
-  char group;
-  size_t size, offset, arraysize = 1;
-  if (ctx->enc_type == 0) return 0;
-  if (ctx->head->field->type->arraysize[0]) {
-    int i, ndim = 0;
-    if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
-        ctx->is_valid_array = ctx->head->field->type->ndim == 1;
-        ndim = 1;
-        if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
-            PyErr_Format(PyExc_ValueError,
-                         "Expected a dimension of size %zu, got %zu",
-                         ctx->head->field->type->arraysize[0], ctx->enc_count);
-            return -1;
-        }
-    }
-    if (!ctx->is_valid_array) {
-      PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
-                   ctx->head->field->type->ndim, ndim);
-      return -1;
-    }
-    for (i = 0; i < ctx->head->field->type->ndim; i++) {
-      arraysize *= ctx->head->field->type->arraysize[i];
-    }
-    ctx->is_valid_array = 0;
-    ctx->enc_count = 1;
-  }
-  group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
-  do {
-    __Pyx_StructField* field = ctx->head->field;
-    __Pyx_TypeInfo* type = field->type;
-    if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
-      size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
-    } else {
-      size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
-    }
-    if (ctx->enc_packmode == '@') {
-      size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
-      size_t align_mod_offset;
-      if (align_at == 0) return -1;
-      align_mod_offset = ctx->fmt_offset % align_at;
-      if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
-      if (ctx->struct_alignment == 0)
-          ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
-                                                                 ctx->is_complex);
-    }
-    if (type->size != size || type->typegroup != group) {
-      if (type->typegroup == 'C' && type->fields != NULL) {
-        size_t parent_offset = ctx->head->parent_offset + field->offset;
-        ++ctx->head;
-        ctx->head->field = type->fields;
-        ctx->head->parent_offset = parent_offset;
-        continue;
-      }
-      if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
-      } else {
-          __Pyx_BufFmt_RaiseExpected(ctx);
-          return -1;
-      }
-    }
-    offset = ctx->head->parent_offset + field->offset;
-    if (ctx->fmt_offset != offset) {
-      PyErr_Format(PyExc_ValueError,
-                   "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
-                   (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
-      return -1;
-    }
-    ctx->fmt_offset += size;
-    if (arraysize)
-      ctx->fmt_offset += (arraysize - 1) * size;
-    --ctx->enc_count;
-    while (1) {
-      if (field == &ctx->root) {
-        ctx->head = NULL;
-        if (ctx->enc_count != 0) {
-          __Pyx_BufFmt_RaiseExpected(ctx);
-          return -1;
-        }
-        break;
-      }
-      ctx->head->field = ++field;
-      if (field->type == NULL) {
-        --ctx->head;
-        field = ctx->head->field;
-        continue;
-      } else if (field->type->typegroup == 'S') {
-        size_t parent_offset = ctx->head->parent_offset + field->offset;
-        if (field->type->fields->type == NULL) continue;
-        field = field->type->fields;
-        ++ctx->head;
-        ctx->head->field = field;
-        ctx->head->parent_offset = parent_offset;
-        break;
-      } else {
-        break;
-      }
-    }
-  } while (ctx->enc_count);
-  ctx->enc_type = 0;
-  ctx->is_complex = 0;
-  return 0;
-}
-static PyObject *
-__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
-{
-    const char *ts = *tsp;
-    int i = 0, number, ndim;
-    ++ts;
-    if (ctx->new_count != 1) {
-        PyErr_SetString(PyExc_ValueError,
-                        "Cannot handle repeated arrays in format string");
-        return NULL;
-    }
-    if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
-    ndim = ctx->head->field->type->ndim;
-    while (*ts && *ts != ')') {
-        switch (*ts) {
-            case ' ': case '\f': case '\r': case '\n': case '\t': case '\v':  continue;
-            default:  break;
-        }
-        number = __Pyx_BufFmt_ExpectNumber(&ts);
-        if (number == -1) return NULL;
-        if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
-            return PyErr_Format(PyExc_ValueError,
-                        "Expected a dimension of size %zu, got %d",
-                        ctx->head->field->type->arraysize[i], number);
-        if (*ts != ',' && *ts != ')')
-            return PyErr_Format(PyExc_ValueError,
-                                "Expected a comma in format string, got '%c'", *ts);
-        if (*ts == ',') ts++;
-        i++;
-    }
-    if (i != ndim)
-        return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
-                            ctx->head->field->type->ndim, i);
-    if (!*ts) {
-        PyErr_SetString(PyExc_ValueError,
-                        "Unexpected end of format string, expected ')'");
-        return NULL;
-    }
-    ctx->is_valid_array = 1;
-    ctx->new_count = 1;
-    *tsp = ++ts;
-    return Py_None;
-}
-static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
-  int got_Z = 0;
-  while (1) {
-    switch(*ts) {
-      case 0:
-        if (ctx->enc_type != 0 && ctx->head == NULL) {
-          __Pyx_BufFmt_RaiseExpected(ctx);
-          return NULL;
-        }
-        if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
-        if (ctx->head != NULL) {
-          __Pyx_BufFmt_RaiseExpected(ctx);
-          return NULL;
-        }
-        return ts;
-      case ' ':
-      case '\r':
-      case '\n':
-        ++ts;
-        break;
-      case '<':
-        if (!__Pyx_Is_Little_Endian()) {
-          PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
-          return NULL;
-        }
-        ctx->new_packmode = '=';
-        ++ts;
-        break;
-      case '>':
-      case '!':
-        if (__Pyx_Is_Little_Endian()) {
-          PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
-          return NULL;
-        }
-        ctx->new_packmode = '=';
-        ++ts;
-        break;
-      case '=':
-      case '@':
-      case '^':
-        ctx->new_packmode = *ts++;
-        break;
-      case 'T':
-        {
-          const char* ts_after_sub;
-          size_t i, struct_count = ctx->new_count;
-          size_t struct_alignment = ctx->struct_alignment;
-          ctx->new_count = 1;
-          ++ts;
-          if (*ts != '{') {
-            PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
-            return NULL;
-          }
-          if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
-          ctx->enc_type = 0;
-          ctx->enc_count = 0;
-          ctx->struct_alignment = 0;
-          ++ts;
-          ts_after_sub = ts;
-          for (i = 0; i != struct_count; ++i) {
-            ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
-            if (!ts_after_sub) return NULL;
-          }
-          ts = ts_after_sub;
-          if (struct_alignment) ctx->struct_alignment = struct_alignment;
-        }
-        break;
-      case '}':
-        {
-          size_t alignment = ctx->struct_alignment;
-          ++ts;
-          if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
-          ctx->enc_type = 0;
-          if (alignment && ctx->fmt_offset % alignment) {
-            ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
-          }
-        }
-        return ts;
-      case 'x':
-        if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
-        ctx->fmt_offset += ctx->new_count;
-        ctx->new_count = 1;
-        ctx->enc_count = 0;
-        ctx->enc_type = 0;
-        ctx->enc_packmode = ctx->new_packmode;
-        ++ts;
-        break;
-      case 'Z':
-        got_Z = 1;
-        ++ts;
-        if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
-          __Pyx_BufFmt_RaiseUnexpectedChar('Z');
-          return NULL;
-        }
-        CYTHON_FALLTHROUGH;
-      case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
-      case 'l': case 'L': case 'q': case 'Q':
-      case 'f': case 'd': case 'g':
-      case 'O': case 'p':
-        if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) &&
-            (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) {
-          ctx->enc_count += ctx->new_count;
-          ctx->new_count = 1;
-          got_Z = 0;
-          ++ts;
-          break;
-        }
-        CYTHON_FALLTHROUGH;
-      case 's':
-        if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
-        ctx->enc_count = ctx->new_count;
-        ctx->enc_packmode = ctx->new_packmode;
-        ctx->enc_type = *ts;
-        ctx->is_complex = got_Z;
-        ++ts;
-        ctx->new_count = 1;
-        got_Z = 0;
-        break;
-      case ':':
-        ++ts;
-        while(*ts != ':') ++ts;
-        ++ts;
-        break;
-      case '(':
-        if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
-        break;
-      default:
-        {
-          int number = __Pyx_BufFmt_ExpectNumber(&ts);
-          if (number == -1) return NULL;
-          ctx->new_count = (size_t)number;
-        }
-    }
-  }
-}
-
-/* TypeInfoCompare */
-  static int
-__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
-{
-    int i;
-    if (!a || !b)
-        return 0;
-    if (a == b)
-        return 1;
-    if (a->size != b->size || a->typegroup != b->typegroup ||
-            a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
-        if (a->typegroup == 'H' || b->typegroup == 'H') {
-            return a->size == b->size;
-        } else {
-            return 0;
-        }
-    }
-    if (a->ndim) {
-        for (i = 0; i < a->ndim; i++)
-            if (a->arraysize[i] != b->arraysize[i])
-                return 0;
-    }
-    if (a->typegroup == 'S') {
-        if (a->flags != b->flags)
-            return 0;
-        if (a->fields || b->fields) {
-            if (!(a->fields && b->fields))
-                return 0;
-            for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
-                __Pyx_StructField *field_a = a->fields + i;
-                __Pyx_StructField *field_b = b->fields + i;
-                if (field_a->offset != field_b->offset ||
-                    !__pyx_typeinfo_cmp(field_a->type, field_b->type))
-                    return 0;
-            }
-            return !a->fields[i].type && !b->fields[i].type;
-        }
-    }
-    return 1;
-}
-
-/* MemviewSliceValidateAndInit */
-  static int
-__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
-{
-    if (buf->shape[dim] <= 1)
-        return 1;
-    if (buf->strides) {
-        if (spec & __Pyx_MEMVIEW_CONTIG) {
-            if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
-                if (unlikely(buf->strides[dim] != sizeof(void *))) {
-                    PyErr_Format(PyExc_ValueError,
-                                 "Buffer is not indirectly contiguous "
-                                 "in dimension %d.", dim);
-                    goto fail;
-                }
-            } else if (unlikely(buf->strides[dim] != buf->itemsize)) {
-                PyErr_SetString(PyExc_ValueError,
-                                "Buffer and memoryview are not contiguous "
-                                "in the same dimension.");
-                goto fail;
-            }
-        }
-        if (spec & __Pyx_MEMVIEW_FOLLOW) {
-            Py_ssize_t stride = buf->strides[dim];
-            if (stride < 0)
-                stride = -stride;
-            if (unlikely(stride < buf->itemsize)) {
-                PyErr_SetString(PyExc_ValueError,
-                                "Buffer and memoryview are not contiguous "
-                                "in the same dimension.");
-                goto fail;
-            }
-        }
-    } else {
-        if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) {
-            PyErr_Format(PyExc_ValueError,
-                         "C-contiguous buffer is not contiguous in "
-                         "dimension %d", dim);
-            goto fail;
-        } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) {
-            PyErr_Format(PyExc_ValueError,
-                         "C-contiguous buffer is not indirect in "
-                         "dimension %d", dim);
-            goto fail;
-        } else if (unlikely(buf->suboffsets)) {
-            PyErr_SetString(PyExc_ValueError,
-                            "Buffer exposes suboffsets but no strides");
-            goto fail;
-        }
-    }
-    return 1;
-fail:
-    return 0;
-}
-static int
-__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
-{
-    if (spec & __Pyx_MEMVIEW_DIRECT) {
-        if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) {
-            PyErr_Format(PyExc_ValueError,
-                         "Buffer not compatible with direct access "
-                         "in dimension %d.", dim);
-            goto fail;
-        }
-    }
-    if (spec & __Pyx_MEMVIEW_PTR) {
-        if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) {
-            PyErr_Format(PyExc_ValueError,
-                         "Buffer is not indirectly accessible "
-                         "in dimension %d.", dim);
-            goto fail;
-        }
-    }
-    return 1;
-fail:
-    return 0;
-}
-static int
-__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
-{
-    int i;
-    if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
-        Py_ssize_t stride = 1;
-        for (i = 0; i < ndim; i++) {
-            if (unlikely(stride * buf->itemsize != buf->strides[i]  &&  buf->shape[i] > 1)) {
-                PyErr_SetString(PyExc_ValueError,
-                    "Buffer not fortran contiguous.");
-                goto fail;
-            }
-            stride = stride * buf->shape[i];
-        }
-    } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
-        Py_ssize_t stride = 1;
-        for (i = ndim - 1; i >- 1; i--) {
-            if (unlikely(stride * buf->itemsize != buf->strides[i]  &&  buf->shape[i] > 1)) {
-                PyErr_SetString(PyExc_ValueError,
-                    "Buffer not C contiguous.");
-                goto fail;
-            }
-            stride = stride * buf->shape[i];
-        }
-    }
-    return 1;
-fail:
-    return 0;
-}
-static int __Pyx_ValidateAndInit_memviewslice(
-                int *axes_specs,
-                int c_or_f_flag,
-                int buf_flags,
-                int ndim,
-                __Pyx_TypeInfo *dtype,
-                __Pyx_BufFmt_StackElem stack[],
-                __Pyx_memviewslice *memviewslice,
-                PyObject *original_obj)
-{
-    struct __pyx_memoryview_obj *memview, *new_memview;
-    __Pyx_RefNannyDeclarations
-    Py_buffer *buf;
-    int i, spec = 0, retval = -1;
-    __Pyx_BufFmt_Context ctx;
-    int from_memoryview = __pyx_memoryview_check(original_obj);
-    __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
-    if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
-                                                            original_obj)->typeinfo)) {
-        memview = (struct __pyx_memoryview_obj *) original_obj;
-        new_memview = NULL;
-    } else {
-        memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
-                                            original_obj, buf_flags, 0, dtype);
-        new_memview = memview;
-        if (unlikely(!memview))
-            goto fail;
-    }
-    buf = &memview->view;
-    if (unlikely(buf->ndim != ndim)) {
-        PyErr_Format(PyExc_ValueError,
-                "Buffer has wrong number of dimensions (expected %d, got %d)",
-                ndim, buf->ndim);
-        goto fail;
-    }
-    if (new_memview) {
-        __Pyx_BufFmt_Init(&ctx, stack, dtype);
-        if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail;
-    }
-    if (unlikely((unsigned) buf->itemsize != dtype->size)) {
-        PyErr_Format(PyExc_ValueError,
-                     "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
-                     "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
-                     buf->itemsize,
-                     (buf->itemsize > 1) ? "s" : "",
-                     dtype->name,
-                     dtype->size,
-                     (dtype->size > 1) ? "s" : "");
-        goto fail;
-    }
-    if (buf->len > 0) {
-        for (i = 0; i < ndim; i++) {
-            spec = axes_specs[i];
-            if (unlikely(!__pyx_check_strides(buf, i, ndim, spec)))
-                goto fail;
-            if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec)))
-                goto fail;
-        }
-        if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)))
-            goto fail;
-    }
-    if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
-                                         new_memview != NULL) == -1)) {
-        goto fail;
-    }
-    retval = 0;
-    goto no_fail;
-fail:
-    Py_XDECREF(new_memview);
-    retval = -1;
-no_fail:
-    __Pyx_RefNannyFinishContext();
-    return retval;
-}
-
-/* ObjectToMemviewSlice */
-  static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(PyObject *obj, int writable_flag) {
-    __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
-    __Pyx_BufFmt_StackElem stack[1];
-    int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
-    int retcode;
-    if (obj == Py_None) {
-        result.memview = (struct __pyx_memoryview_obj *) Py_None;
-        return result;
-    }
-    retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
-                                                 (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1,
-                                                 &__Pyx_TypeInfo_nn___pyx_t_6madmom_2ml_3hmm_uint32_t, stack,
-                                                 &result, obj);
-    if (unlikely(retcode == -1))
-        goto __pyx_fail;
-    return result;
-__pyx_fail:
-    result.memview = NULL;
-    result.data = NULL;
-    return result;
-}
-
-/* ObjectToMemviewSlice */
-  static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj, int writable_flag) {
-    __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
-    __Pyx_BufFmt_StackElem stack[1];
-    int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
-    int retcode;
-    if (obj == Py_None) {
-        result.memview = (struct __pyx_memoryview_obj *) Py_None;
-        return result;
-    }
-    retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
-                                                 (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1,
-                                                 &__Pyx_TypeInfo_double, stack,
-                                                 &result, obj);
-    if (unlikely(retcode == -1))
-        goto __pyx_fail;
-    return result;
-__pyx_fail:
-    result.memview = NULL;
-    result.data = NULL;
-    return result;
-}
-
-/* ObjectToMemviewSlice */
-  static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) {
-    __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
-    __Pyx_BufFmt_StackElem stack[1];
-    int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
-    int retcode;
-    if (obj == Py_None) {
-        result.memview = (struct __pyx_memoryview_obj *) Py_None;
-        return result;
-    }
-    retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
-                                                 (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2,
-                                                 &__Pyx_TypeInfo_double, stack,
-                                                 &result, obj);
-    if (unlikely(retcode == -1))
-        goto __pyx_fail;
-    return result;
-__pyx_fail:
-    result.memview = NULL;
-    result.data = NULL;
-    return result;
-}
-
-/* ObjectToMemviewSlice */
-  static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_nn___pyx_t_6madmom_2ml_3hmm_uint32_t(PyObject *obj, int writable_flag) {
-    __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
-    __Pyx_BufFmt_StackElem stack[1];
-    int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
-    int retcode;
-    if (obj == Py_None) {
-        result.memview = (struct __pyx_memoryview_obj *) Py_None;
-        return result;
-    }
-    retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
-                                                 (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2,
-                                                 &__Pyx_TypeInfo_nn___pyx_t_6madmom_2ml_3hmm_uint32_t, stack,
-                                                 &result, obj);
-    if (unlikely(retcode == -1))
-        goto __pyx_fail;
-    return result;
-__pyx_fail:
-    result.memview = NULL;
-    result.data = NULL;
-    return result;
-}
-
-/* PyObjectGetMethod */
-  static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) {
-    PyObject *attr;
-#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP
-    PyTypeObject *tp = Py_TYPE(obj);
-    PyObject *descr;
-    descrgetfunc f = NULL;
-    PyObject **dictptr, *dict;
-    int meth_found = 0;
-    assert (*method == NULL);
-    if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) {
-        attr = __Pyx_PyObject_GetAttrStr(obj, name);
-        goto try_unpack;
-    }
-    if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) {
-        return 0;
-    }
-    descr = _PyType_Lookup(tp, name);
-    if (likely(descr != NULL)) {
-        Py_INCREF(descr);
-#if PY_MAJOR_VERSION >= 3
-        #ifdef __Pyx_CyFunction_USED
-        if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr)))
-        #else
-        if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type)))
-        #endif
-#else
-        #ifdef __Pyx_CyFunction_USED
-        if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr)))
-        #else
-        if (likely(PyFunction_Check(descr)))
-        #endif
-#endif
-        {
-            meth_found = 1;
-        } else {
-            f = Py_TYPE(descr)->tp_descr_get;
-            if (f != NULL && PyDescr_IsData(descr)) {
-                attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
-                Py_DECREF(descr);
-                goto try_unpack;
-            }
-        }
-    }
-    dictptr = _PyObject_GetDictPtr(obj);
-    if (dictptr != NULL && (dict = *dictptr) != NULL) {
-        Py_INCREF(dict);
-        attr = __Pyx_PyDict_GetItemStr(dict, name);
-        if (attr != NULL) {
-            Py_INCREF(attr);
-            Py_DECREF(dict);
-            Py_XDECREF(descr);
-            goto try_unpack;
-        }
-        Py_DECREF(dict);
-    }
-    if (meth_found) {
-        *method = descr;
-        return 1;
-    }
-    if (f != NULL) {
-        attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
-        Py_DECREF(descr);
-        goto try_unpack;
-    }
-    if (descr != NULL) {
-        *method = descr;
-        return 0;
-    }
-    PyErr_Format(PyExc_AttributeError,
-#if PY_MAJOR_VERSION >= 3
-                 "'%.50s' object has no attribute '%U'",
-                 tp->tp_name, name);
-#else
-                 "'%.50s' object has no attribute '%.400s'",
-                 tp->tp_name, PyString_AS_STRING(name));
-#endif
-    return 0;
-#else
-    attr = __Pyx_PyObject_GetAttrStr(obj, name);
-    goto try_unpack;
-#endif
-try_unpack:
-#if CYTHON_UNPACK_METHODS
-    if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) {
-        PyObject *function = PyMethod_GET_FUNCTION(attr);
-        Py_INCREF(function);
-        Py_DECREF(attr);
-        *method = function;
-        return 1;
-    }
-#endif
-    *method = attr;
-    return 0;
-}
-
-/* PyObjectCallMethod1 */
-  static PyObject* __Pyx__PyObject_CallMethod1(PyObject* method, PyObject* arg) {
-    PyObject *result = __Pyx_PyObject_CallOneArg(method, arg);
-    Py_DECREF(method);
-    return result;
-}
-static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) {
-    PyObject *method = NULL, *result;
-    int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method);
-    if (likely(is_method)) {
-        result = __Pyx_PyObject_Call2Args(method, obj, arg);
-        Py_DECREF(method);
-        return result;
-    }
-    if (unlikely(!method)) return NULL;
-    return __Pyx__PyObject_CallMethod1(method, arg);
-}
-
-/* CoroutineBase */
-  #include <structmember.h>
-#include <frameobject.h>
-#define __Pyx_Coroutine_Undelegate(gen) Py_CLEAR((gen)->yieldfrom)
-static int __Pyx_PyGen__FetchStopIterationValue(CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject **pvalue) {
-    PyObject *et, *ev, *tb;
-    PyObject *value = NULL;
-    __Pyx_ErrFetch(&et, &ev, &tb);
-    if (!et) {
-        Py_XDECREF(tb);
-        Py_XDECREF(ev);
-        Py_INCREF(Py_None);
-        *pvalue = Py_None;
-        return 0;
-    }
-    if (likely(et == PyExc_StopIteration)) {
-        if (!ev) {
-            Py_INCREF(Py_None);
-            value = Py_None;
-        }
-#if PY_VERSION_HEX >= 0x030300A0
-        else if (Py_TYPE(ev) == (PyTypeObject*)PyExc_StopIteration) {
-            value = ((PyStopIterationObject *)ev)->value;
-            Py_INCREF(value);
-            Py_DECREF(ev);
-        }
-#endif
-        else if (unlikely(PyTuple_Check(ev))) {
-            if (PyTuple_GET_SIZE(ev) >= 1) {
-#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
-                value = PyTuple_GET_ITEM(ev, 0);
-                Py_INCREF(value);
-#else
-                value = PySequence_ITEM(ev, 0);
-#endif
-            } else {
-                Py_INCREF(Py_None);
-                value = Py_None;
-            }
-            Py_DECREF(ev);
-        }
-        else if (!__Pyx_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration)) {
-            value = ev;
-        }
-        if (likely(value)) {
-            Py_XDECREF(tb);
-            Py_DECREF(et);
-            *pvalue = value;
-            return 0;
-        }
-    } else if (!__Pyx_PyErr_GivenExceptionMatches(et, PyExc_StopIteration)) {
-        __Pyx_ErrRestore(et, ev, tb);
-        return -1;
-    }
-    PyErr_NormalizeException(&et, &ev, &tb);
-    if (unlikely(!PyObject_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration))) {
-        __Pyx_ErrRestore(et, ev, tb);
-        return -1;
-    }
-    Py_XDECREF(tb);
-    Py_DECREF(et);
-#if PY_VERSION_HEX >= 0x030300A0
-    value = ((PyStopIterationObject *)ev)->value;
-    Py_INCREF(value);
-    Py_DECREF(ev);
-#else
-    {
-        PyObject* args = __Pyx_PyObject_GetAttrStr(ev, __pyx_n_s_args);
-        Py_DECREF(ev);
-        if (likely(args)) {
-            value = PySequence_GetItem(args, 0);
-            Py_DECREF(args);
-        }
-        if (unlikely(!value)) {
-            __Pyx_ErrRestore(NULL, NULL, NULL);
-            Py_INCREF(Py_None);
-            value = Py_None;
-        }
-    }
-#endif
-    *pvalue = value;
-    return 0;
-}
-static CYTHON_INLINE
-void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *exc_state) {
-    PyObject *t, *v, *tb;
-    t = exc_state->exc_type;
-    v = exc_state->exc_value;
-    tb = exc_state->exc_traceback;
-    exc_state->exc_type = NULL;
-    exc_state->exc_value = NULL;
-    exc_state->exc_traceback = NULL;
-    Py_XDECREF(t);
-    Py_XDECREF(v);
-    Py_XDECREF(tb);
-}
-#define __Pyx_Coroutine_AlreadyRunningError(gen)  (__Pyx__Coroutine_AlreadyRunningError(gen), (PyObject*)NULL)
-static void __Pyx__Coroutine_AlreadyRunningError(CYTHON_UNUSED __pyx_CoroutineObject *gen) {
-    const char *msg;
-    if ((0)) {
-    #ifdef __Pyx_Coroutine_USED
-    } else if (__Pyx_Coroutine_Check((PyObject*)gen)) {
-        msg = "coroutine already executing";
-    #endif
-    #ifdef __Pyx_AsyncGen_USED
-    } else if (__Pyx_AsyncGen_CheckExact((PyObject*)gen)) {
-        msg = "async generator already executing";
-    #endif
-    } else {
-        msg = "generator already executing";
-    }
-    PyErr_SetString(PyExc_ValueError, msg);
-}
-#define __Pyx_Coroutine_NotStartedError(gen)  (__Pyx__Coroutine_NotStartedError(gen), (PyObject*)NULL)
-static void __Pyx__Coroutine_NotStartedError(CYTHON_UNUSED PyObject *gen) {
-    const char *msg;
-    if ((0)) {
-    #ifdef __Pyx_Coroutine_USED
-    } else if (__Pyx_Coroutine_Check(gen)) {
-        msg = "can't send non-None value to a just-started coroutine";
-    #endif
-    #ifdef __Pyx_AsyncGen_USED
-    } else if (__Pyx_AsyncGen_CheckExact(gen)) {
-        msg = "can't send non-None value to a just-started async generator";
-    #endif
-    } else {
-        msg = "can't send non-None value to a just-started generator";
-    }
-    PyErr_SetString(PyExc_TypeError, msg);
-}
-#define __Pyx_Coroutine_AlreadyTerminatedError(gen, value, closing)  (__Pyx__Coroutine_AlreadyTerminatedError(gen, value, closing), (PyObject*)NULL)
-static void __Pyx__Coroutine_AlreadyTerminatedError(CYTHON_UNUSED PyObject *gen, PyObject *value, CYTHON_UNUSED int closing) {
-    #ifdef __Pyx_Coroutine_USED
-    if (!closing && __Pyx_Coroutine_Check(gen)) {
-        PyErr_SetString(PyExc_RuntimeError, "cannot reuse already awaited coroutine");
-    } else
-    #endif
-    if (value) {
-        #ifdef __Pyx_AsyncGen_USED
-        if (__Pyx_AsyncGen_CheckExact(gen))
-            PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration);
-        else
-        #endif
-        PyErr_SetNone(PyExc_StopIteration);
-    }
-}
-static
-PyObject *__Pyx_Coroutine_SendEx(__pyx_CoroutineObject *self, PyObject *value, int closing) {
-    __Pyx_PyThreadState_declare
-    PyThreadState *tstate;
-    __Pyx_ExcInfoStruct *exc_state;
-    PyObject *retval;
-    assert(!self->is_running);
-    if (unlikely(self->resume_label == 0)) {
-        if (unlikely(value && value != Py_None)) {
-            return __Pyx_Coroutine_NotStartedError((PyObject*)self);
-        }
-    }
-    if (unlikely(self->resume_label == -1)) {
-        return __Pyx_Coroutine_AlreadyTerminatedError((PyObject*)self, value, closing);
-    }
-#if CYTHON_FAST_THREAD_STATE
-    __Pyx_PyThreadState_assign
-    tstate = __pyx_tstate;
-#else
-    tstate = __Pyx_PyThreadState_Current;
-#endif
-    exc_state = &self->gi_exc_state;
-    if (exc_state->exc_type) {
-        #if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON
-        #else
-        if (exc_state->exc_traceback) {
-            PyTracebackObject *tb = (PyTracebackObject *) exc_state->exc_traceback;
-            PyFrameObject *f = tb->tb_frame;
-            Py_XINCREF(tstate->frame);
-            assert(f->f_back == NULL);
-            f->f_back = tstate->frame;
-        }
-        #endif
-    }
-#if CYTHON_USE_EXC_INFO_STACK
-    exc_state->previous_item = tstate->exc_info;
-    tstate->exc_info = exc_state;
-#else
-    if (exc_state->exc_type) {
-        __Pyx_ExceptionSwap(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback);
-    } else {
-        __Pyx_Coroutine_ExceptionClear(exc_state);
-        __Pyx_ExceptionSave(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback);
-    }
-#endif
-    self->is_running = 1;
-    retval = self->body((PyObject *) self, tstate, value);
-    self->is_running = 0;
-#if CYTHON_USE_EXC_INFO_STACK
-    exc_state = &self->gi_exc_state;
-    tstate->exc_info = exc_state->previous_item;
-    exc_state->previous_item = NULL;
-    __Pyx_Coroutine_ResetFrameBackpointer(exc_state);
-#endif
-    return retval;
-}
-static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state) {
-    PyObject *exc_tb = exc_state->exc_traceback;
-    if (likely(exc_tb)) {
-#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON
-#else
-        PyTracebackObject *tb = (PyTracebackObject *) exc_tb;
-        PyFrameObject *f = tb->tb_frame;
-        Py_CLEAR(f->f_back);
-#endif
-    }
-}
-static CYTHON_INLINE
-PyObject *__Pyx_Coroutine_MethodReturn(CYTHON_UNUSED PyObject* gen, PyObject *retval) {
-    if (unlikely(!retval)) {
-        __Pyx_PyThreadState_declare
-        __Pyx_PyThreadState_assign
-        if (!__Pyx_PyErr_Occurred()) {
-            PyObject *exc = PyExc_StopIteration;
-            #ifdef __Pyx_AsyncGen_USED
-            if (__Pyx_AsyncGen_CheckExact(gen))
-                exc = __Pyx_PyExc_StopAsyncIteration;
-            #endif
-            __Pyx_PyErr_SetNone(exc);
-        }
-    }
-    return retval;
-}
-#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3)
-static CYTHON_INLINE
-PyObject *__Pyx_PyGen_Send(PyGenObject *gen, PyObject *arg) {
-#if PY_VERSION_HEX <= 0x030A00A1
-    return _PyGen_Send(gen, arg);
-#else
-    PyObject *result;
-    if (PyIter_Send((PyObject*)gen, arg ? arg : Py_None, &result) == PYGEN_RETURN) {
-        if (PyAsyncGen_CheckExact(gen)) {
-            assert(result == Py_None);
-            PyErr_SetNone(PyExc_StopAsyncIteration);
-        }
-        else if (result == Py_None) {
-            PyErr_SetNone(PyExc_StopIteration);
-        }
-        else {
-            _PyGen_SetStopIterationValue(result);
-        }
-        Py_CLEAR(result);
-    }
-    return result;
-#endif
-}
-#endif
-static CYTHON_INLINE
-PyObject *__Pyx_Coroutine_FinishDelegation(__pyx_CoroutineObject *gen) {
-    PyObject *ret;
-    PyObject *val = NULL;
-    __Pyx_Coroutine_Undelegate(gen);
-    __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, &val);
-    ret = __Pyx_Coroutine_SendEx(gen, val, 0);
-    Py_XDECREF(val);
-    return ret;
-}
-static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) {
-    PyObject *retval;
-    __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self;
-    PyObject *yf = gen->yieldfrom;
-    if (unlikely(gen->is_running))
-        return __Pyx_Coroutine_AlreadyRunningError(gen);
-    if (yf) {
-        PyObject *ret;
-        gen->is_running = 1;
-        #ifdef __Pyx_Generator_USED
-        if (__Pyx_Generator_CheckExact(yf)) {
-            ret = __Pyx_Coroutine_Send(yf, value);
-        } else
-        #endif
-        #ifdef __Pyx_Coroutine_USED
-        if (__Pyx_Coroutine_Check(yf)) {
-            ret = __Pyx_Coroutine_Send(yf, value);
-        } else
-        #endif
-        #ifdef __Pyx_AsyncGen_USED
-        if (__pyx_PyAsyncGenASend_CheckExact(yf)) {
-            ret = __Pyx_async_gen_asend_send(yf, value);
-        } else
-        #endif
-        #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3)
-        if (PyGen_CheckExact(yf)) {
-            ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value);
-        } else
-        #endif
-        #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03050000 && defined(PyCoro_CheckExact) && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3)
-        if (PyCoro_CheckExact(yf)) {
-            ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value);
-        } else
-        #endif
-        {
-            if (value == Py_None)
-                ret = Py_TYPE(yf)->tp_iternext(yf);
-            else
-                ret = __Pyx_PyObject_CallMethod1(yf, __pyx_n_s_send, value);
-        }
-        gen->is_running = 0;
-        if (likely(ret)) {
-            return ret;
-        }
-        retval = __Pyx_Coroutine_FinishDelegation(gen);
-    } else {
-        retval = __Pyx_Coroutine_SendEx(gen, value, 0);
-    }
-    return __Pyx_Coroutine_MethodReturn(self, retval);
-}
-static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) {
-    PyObject *retval = NULL;
-    int err = 0;
-    #ifdef __Pyx_Generator_USED
-    if (__Pyx_Generator_CheckExact(yf)) {
-        retval = __Pyx_Coroutine_Close(yf);
-        if (!retval)
-            return -1;
-    } else
-    #endif
-    #ifdef __Pyx_Coroutine_USED
-    if (__Pyx_Coroutine_Check(yf)) {
-        retval = __Pyx_Coroutine_Close(yf);
-        if (!retval)
-            return -1;
-    } else
-    if (__Pyx_CoroutineAwait_CheckExact(yf)) {
-        retval = __Pyx_CoroutineAwait_Close((__pyx_CoroutineAwaitObject*)yf, NULL);
-        if (!retval)
-            return -1;
-    } else
-    #endif
-    #ifdef __Pyx_AsyncGen_USED
-    if (__pyx_PyAsyncGenASend_CheckExact(yf)) {
-        retval = __Pyx_async_gen_asend_close(yf, NULL);
-    } else
-    if (__pyx_PyAsyncGenAThrow_CheckExact(yf)) {
-        retval = __Pyx_async_gen_athrow_close(yf, NULL);
-    } else
-    #endif
-    {
-        PyObject *meth;
-        gen->is_running = 1;
-        meth = __Pyx_PyObject_GetAttrStr(yf, __pyx_n_s_close);
-        if (unlikely(!meth)) {
-            if (!PyErr_ExceptionMatches(PyExc_AttributeError)) {
-                PyErr_WriteUnraisable(yf);
-            }
-            PyErr_Clear();
-        } else {
-            retval = PyObject_CallFunction(meth, NULL);
-            Py_DECREF(meth);
-            if (!retval)
-                err = -1;
-        }
-        gen->is_running = 0;
-    }
-    Py_XDECREF(retval);
-    return err;
-}
-static PyObject *__Pyx_Generator_Next(PyObject *self) {
-    __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self;
-    PyObject *yf = gen->yieldfrom;
-    if (unlikely(gen->is_running))
-        return __Pyx_Coroutine_AlreadyRunningError(gen);
-    if (yf) {
-        PyObject *ret;
-        gen->is_running = 1;
-        #ifdef __Pyx_Generator_USED
-        if (__Pyx_Generator_CheckExact(yf)) {
-            ret = __Pyx_Generator_Next(yf);
-        } else
-        #endif
-        #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3)
-        if (PyGen_CheckExact(yf)) {
-            ret = __Pyx_PyGen_Send((PyGenObject*)yf, NULL);
-        } else
-        #endif
-        #ifdef __Pyx_Coroutine_USED
-        if (__Pyx_Coroutine_Check(yf)) {
-            ret = __Pyx_Coroutine_Send(yf, Py_None);
-        } else
-        #endif
-            ret = Py_TYPE(yf)->tp_iternext(yf);
-        gen->is_running = 0;
-        if (likely(ret)) {
-            return ret;
-        }
-        return __Pyx_Coroutine_FinishDelegation(gen);
-    }
-    return __Pyx_Coroutine_SendEx(gen, Py_None, 0);
-}
-static PyObject *__Pyx_Coroutine_Close_Method(PyObject *self, CYTHON_UNUSED PyObject *arg) {
-    return __Pyx_Coroutine_Close(self);
-}
-static PyObject *__Pyx_Coroutine_Close(PyObject *self) {
-    __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
-    PyObject *retval, *raised_exception;
-    PyObject *yf = gen->yieldfrom;
-    int err = 0;
-    if (unlikely(gen->is_running))
-        return __Pyx_Coroutine_AlreadyRunningError(gen);
-    if (yf) {
-        Py_INCREF(yf);
-        err = __Pyx_Coroutine_CloseIter(gen, yf);
-        __Pyx_Coroutine_Undelegate(gen);
-        Py_DECREF(yf);
-    }
-    if (err == 0)
-        PyErr_SetNone(PyExc_GeneratorExit);
-    retval = __Pyx_Coroutine_SendEx(gen, NULL, 1);
-    if (unlikely(retval)) {
-        const char *msg;
-        Py_DECREF(retval);
-        if ((0)) {
-        #ifdef __Pyx_Coroutine_USED
-        } else if (__Pyx_Coroutine_Check(self)) {
-            msg = "coroutine ignored GeneratorExit";
-        #endif
-        #ifdef __Pyx_AsyncGen_USED
-        } else if (__Pyx_AsyncGen_CheckExact(self)) {
-#if PY_VERSION_HEX < 0x03060000
-            msg = "async generator ignored GeneratorExit - might require Python 3.6+ finalisation (PEP 525)";
-#else
-            msg = "async generator ignored GeneratorExit";
-#endif
-        #endif
-        } else {
-            msg = "generator ignored GeneratorExit";
-        }
-        PyErr_SetString(PyExc_RuntimeError, msg);
-        return NULL;
-    }
-    raised_exception = PyErr_Occurred();
-    if (likely(!raised_exception || __Pyx_PyErr_GivenExceptionMatches2(raised_exception, PyExc_GeneratorExit, PyExc_StopIteration))) {
-        if (raised_exception) PyErr_Clear();
-        Py_INCREF(Py_None);
-        return Py_None;
-    }
-    return NULL;
-}
-static PyObject *__Pyx__Coroutine_Throw(PyObject *self, PyObject *typ, PyObject *val, PyObject *tb,
-                                        PyObject *args, int close_on_genexit) {
-    __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
-    PyObject *yf = gen->yieldfrom;
-    if (unlikely(gen->is_running))
-        return __Pyx_Coroutine_AlreadyRunningError(gen);
-    if (yf) {
-        PyObject *ret;
-        Py_INCREF(yf);
-        if (__Pyx_PyErr_GivenExceptionMatches(typ, PyExc_GeneratorExit) && close_on_genexit) {
-            int err = __Pyx_Coroutine_CloseIter(gen, yf);
-            Py_DECREF(yf);
-            __Pyx_Coroutine_Undelegate(gen);
-            if (err < 0)
-                return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0));
-            goto throw_here;
-        }
-        gen->is_running = 1;
-        if (0
-        #ifdef __Pyx_Generator_USED
-            || __Pyx_Generator_CheckExact(yf)
-        #endif
-        #ifdef __Pyx_Coroutine_USED
-            || __Pyx_Coroutine_Check(yf)
-        #endif
-            ) {
-            ret = __Pyx__Coroutine_Throw(yf, typ, val, tb, args, close_on_genexit);
-        #ifdef __Pyx_Coroutine_USED
-        } else if (__Pyx_CoroutineAwait_CheckExact(yf)) {
-            ret = __Pyx__Coroutine_Throw(((__pyx_CoroutineAwaitObject*)yf)->coroutine, typ, val, tb, args, close_on_genexit);
-        #endif
-        } else {
-            PyObject *meth = __Pyx_PyObject_GetAttrStr(yf, __pyx_n_s_throw);
-            if (unlikely(!meth)) {
-                Py_DECREF(yf);
-                if (!PyErr_ExceptionMatches(PyExc_AttributeError)) {
-                    gen->is_running = 0;
-                    return NULL;
-                }
-                PyErr_Clear();
-                __Pyx_Coroutine_Undelegate(gen);
-                gen->is_running = 0;
-                goto throw_here;
-            }
-            if (likely(args)) {
-                ret = PyObject_CallObject(meth, args);
-            } else {
-                ret = PyObject_CallFunctionObjArgs(meth, typ, val, tb, NULL);
-            }
-            Py_DECREF(meth);
-        }
-        gen->is_running = 0;
-        Py_DECREF(yf);
-        if (!ret) {
-            ret = __Pyx_Coroutine_FinishDelegation(gen);
-        }
-        return __Pyx_Coroutine_MethodReturn(self, ret);
-    }
-throw_here:
-    __Pyx_Raise(typ, val, tb, NULL);
-    return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0));
-}
-static PyObject *__Pyx_Coroutine_Throw(PyObject *self, PyObject *args) {
-    PyObject *typ;
-    PyObject *val = NULL;
-    PyObject *tb = NULL;
-    if (!PyArg_UnpackTuple(args, (char *)"throw", 1, 3, &typ, &val, &tb))
-        return NULL;
-    return __Pyx__Coroutine_Throw(self, typ, val, tb, args, 1);
-}
-static CYTHON_INLINE int __Pyx_Coroutine_traverse_excstate(__Pyx_ExcInfoStruct *exc_state, visitproc visit, void *arg) {
-    Py_VISIT(exc_state->exc_type);
-    Py_VISIT(exc_state->exc_value);
-    Py_VISIT(exc_state->exc_traceback);
-    return 0;
-}
-static int __Pyx_Coroutine_traverse(__pyx_CoroutineObject *gen, visitproc visit, void *arg) {
-    Py_VISIT(gen->closure);
-    Py_VISIT(gen->classobj);
-    Py_VISIT(gen->yieldfrom);
-    return __Pyx_Coroutine_traverse_excstate(&gen->gi_exc_state, visit, arg);
-}
-static int __Pyx_Coroutine_clear(PyObject *self) {
-    __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
-    Py_CLEAR(gen->closure);
-    Py_CLEAR(gen->classobj);
-    Py_CLEAR(gen->yieldfrom);
-    __Pyx_Coroutine_ExceptionClear(&gen->gi_exc_state);
-#ifdef __Pyx_AsyncGen_USED
-    if (__Pyx_AsyncGen_CheckExact(self)) {
-        Py_CLEAR(((__pyx_PyAsyncGenObject*)gen)->ag_finalizer);
-    }
-#endif
-    Py_CLEAR(gen->gi_code);
-    Py_CLEAR(gen->gi_name);
-    Py_CLEAR(gen->gi_qualname);
-    Py_CLEAR(gen->gi_modulename);
-    return 0;
-}
-static void __Pyx_Coroutine_dealloc(PyObject *self) {
-    __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
-    PyObject_GC_UnTrack(gen);
-    if (gen->gi_weakreflist != NULL)
-        PyObject_ClearWeakRefs(self);
-    if (gen->resume_label >= 0) {
-        PyObject_GC_Track(self);
-#if PY_VERSION_HEX >= 0x030400a1 && CYTHON_USE_TP_FINALIZE
-        if (PyObject_CallFinalizerFromDealloc(self))
-#else
-        Py_TYPE(gen)->tp_del(self);
-        if (self->ob_refcnt > 0)
-#endif
-        {
-            return;
-        }
-        PyObject_GC_UnTrack(self);
-    }
-#ifdef __Pyx_AsyncGen_USED
-    if (__Pyx_AsyncGen_CheckExact(self)) {
-        /* We have to handle this case for asynchronous generators
-           right here, because this code has to be between UNTRACK
-           and GC_Del. */
-        Py_CLEAR(((__pyx_PyAsyncGenObject*)self)->ag_finalizer);
-    }
-#endif
-    __Pyx_Coroutine_clear(self);
-    PyObject_GC_Del(gen);
-}
-static void __Pyx_Coroutine_del(PyObject *self) {
-    PyObject *error_type, *error_value, *error_traceback;
-    __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
-    __Pyx_PyThreadState_declare
-    if (gen->resume_label < 0) {
-        return;
-    }
-#if !CYTHON_USE_TP_FINALIZE
-    assert(self->ob_refcnt == 0);
-    __Pyx_SET_REFCNT(self, 1);
-#endif
-    __Pyx_PyThreadState_assign
-    __Pyx_ErrFetch(&error_type, &error_value, &error_traceback);
-#ifdef __Pyx_AsyncGen_USED
-    if (__Pyx_AsyncGen_CheckExact(self)) {
-        __pyx_PyAsyncGenObject *agen = (__pyx_PyAsyncGenObject*)self;
-        PyObject *finalizer = agen->ag_finalizer;
-        if (finalizer && !agen->ag_closed) {
-            PyObject *res = __Pyx_PyObject_CallOneArg(finalizer, self);
-            if (unlikely(!res)) {
-                PyErr_WriteUnraisable(self);
-            } else {
-                Py_DECREF(res);
-            }
-            __Pyx_ErrRestore(error_type, error_value, error_traceback);
-            return;
-        }
-    }
-#endif
-    if (unlikely(gen->resume_label == 0 && !error_value)) {
-#ifdef __Pyx_Coroutine_USED
-#ifdef __Pyx_Generator_USED
-    if (!__Pyx_Generator_CheckExact(self))
-#endif
-        {
-        PyObject_GC_UnTrack(self);
-#if PY_MAJOR_VERSION >= 3  || defined(PyErr_WarnFormat)
-        if (unlikely(PyErr_WarnFormat(PyExc_RuntimeWarning, 1, "coroutine '%.50S' was never awaited", gen->gi_qualname) < 0))
-            PyErr_WriteUnraisable(self);
-#else
-        {PyObject *msg;
-        char *cmsg;
-        #if CYTHON_COMPILING_IN_PYPY
-        msg = NULL;
-        cmsg = (char*) "coroutine was never awaited";
-        #else
-        char *cname;
-        PyObject *qualname;
-        qualname = gen->gi_qualname;
-        cname = PyString_AS_STRING(qualname);
-        msg = PyString_FromFormat("coroutine '%.50s' was never awaited", cname);
-        if (unlikely(!msg)) {
-            PyErr_Clear();
-            cmsg = (char*) "coroutine was never awaited";
-        } else {
-            cmsg = PyString_AS_STRING(msg);
-        }
-        #endif
-        if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, cmsg, 1) < 0))
-            PyErr_WriteUnraisable(self);
-        Py_XDECREF(msg);}
-#endif
-        PyObject_GC_Track(self);
-        }
-#endif
-    } else {
-        PyObject *res = __Pyx_Coroutine_Close(self);
-        if (unlikely(!res)) {
-            if (PyErr_Occurred())
-                PyErr_WriteUnraisable(self);
-        } else {
-            Py_DECREF(res);
-        }
-    }
-    __Pyx_ErrRestore(error_type, error_value, error_traceback);
-#if !CYTHON_USE_TP_FINALIZE
-    assert(self->ob_refcnt > 0);
-    if (--self->ob_refcnt == 0) {
-        return;
-    }
-    {
-        Py_ssize_t refcnt = self->ob_refcnt;
-        _Py_NewReference(self);
-        __Pyx_SET_REFCNT(self, refcnt);
-    }
-#if CYTHON_COMPILING_IN_CPYTHON
-    assert(PyType_IS_GC(self->ob_type) &&
-           _Py_AS_GC(self)->gc.gc_refs != _PyGC_REFS_UNTRACKED);
-    _Py_DEC_REFTOTAL;
-#endif
-#ifdef COUNT_ALLOCS
-    --Py_TYPE(self)->tp_frees;
-    --Py_TYPE(self)->tp_allocs;
-#endif
-#endif
-}
-static PyObject *
-__Pyx_Coroutine_get_name(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context)
-{
-    PyObject *name = self->gi_name;
-    if (unlikely(!name)) name = Py_None;
-    Py_INCREF(name);
-    return name;
-}
-static int
-__Pyx_Coroutine_set_name(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context)
-{
-    PyObject *tmp;
-#if PY_MAJOR_VERSION >= 3
-    if (unlikely(value == NULL || !PyUnicode_Check(value)))
-#else
-    if (unlikely(value == NULL || !PyString_Check(value)))
-#endif
-    {
-        PyErr_SetString(PyExc_TypeError,
-                        "__name__ must be set to a string object");
-        return -1;
-    }
-    tmp = self->gi_name;
-    Py_INCREF(value);
-    self->gi_name = value;
-    Py_XDECREF(tmp);
-    return 0;
-}
-static PyObject *
-__Pyx_Coroutine_get_qualname(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context)
-{
-    PyObject *name = self->gi_qualname;
-    if (unlikely(!name)) name = Py_None;
-    Py_INCREF(name);
-    return name;
-}
-static int
-__Pyx_Coroutine_set_qualname(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context)
-{
-    PyObject *tmp;
-#if PY_MAJOR_VERSION >= 3
-    if (unlikely(value == NULL || !PyUnicode_Check(value)))
-#else
-    if (unlikely(value == NULL || !PyString_Check(value)))
-#endif
-    {
-        PyErr_SetString(PyExc_TypeError,
-                        "__qualname__ must be set to a string object");
-        return -1;
-    }
-    tmp = self->gi_qualname;
-    Py_INCREF(value);
-    self->gi_qualname = value;
-    Py_XDECREF(tmp);
-    return 0;
-}
-static __pyx_CoroutineObject *__Pyx__Coroutine_New(
-            PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
-            PyObject *name, PyObject *qualname, PyObject *module_name) {
-    __pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type);
-    if (unlikely(!gen))
-        return NULL;
-    return __Pyx__Coroutine_NewInit(gen, body, code, closure, name, qualname, module_name);
-}
-static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit(
-            __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
-            PyObject *name, PyObject *qualname, PyObject *module_name) {
-    gen->body = body;
-    gen->closure = closure;
-    Py_XINCREF(closure);
-    gen->is_running = 0;
-    gen->resume_label = 0;
-    gen->classobj = NULL;
-    gen->yieldfrom = NULL;
-    gen->gi_exc_state.exc_type = NULL;
-    gen->gi_exc_state.exc_value = NULL;
-    gen->gi_exc_state.exc_traceback = NULL;
-#if CYTHON_USE_EXC_INFO_STACK
-    gen->gi_exc_state.previous_item = NULL;
-#endif
-    gen->gi_weakreflist = NULL;
-    Py_XINCREF(qualname);
-    gen->gi_qualname = qualname;
-    Py_XINCREF(name);
-    gen->gi_name = name;
-    Py_XINCREF(module_name);
-    gen->gi_modulename = module_name;
-    Py_XINCREF(code);
-    gen->gi_code = code;
-    PyObject_GC_Track(gen);
-    return gen;
-}
-
-/* PatchModuleWithCoroutine */
-  static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code) {
-#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
-    int result;
-    PyObject *globals, *result_obj;
-    globals = PyDict_New();  if (unlikely(!globals)) goto ignore;
-    result = PyDict_SetItemString(globals, "_cython_coroutine_type",
-    #ifdef __Pyx_Coroutine_USED
-        (PyObject*)__pyx_CoroutineType);
-    #else
-        Py_None);
-    #endif
-    if (unlikely(result < 0)) goto ignore;
-    result = PyDict_SetItemString(globals, "_cython_generator_type",
-    #ifdef __Pyx_Generator_USED
-        (PyObject*)__pyx_GeneratorType);
-    #else
-        Py_None);
-    #endif
-    if (unlikely(result < 0)) goto ignore;
-    if (unlikely(PyDict_SetItemString(globals, "_module", module) < 0)) goto ignore;
-    if (unlikely(PyDict_SetItemString(globals, "__builtins__", __pyx_b) < 0)) goto ignore;
-    result_obj = PyRun_String(py_code, Py_file_input, globals, globals);
-    if (unlikely(!result_obj)) goto ignore;
-    Py_DECREF(result_obj);
-    Py_DECREF(globals);
-    return module;
-ignore:
-    Py_XDECREF(globals);
-    PyErr_WriteUnraisable(module);
-    if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch module with custom type", 1) < 0)) {
-        Py_DECREF(module);
-        module = NULL;
-    }
-#else
-    py_code++;
-#endif
-    return module;
-}
-
-/* PatchGeneratorABC */
-  #ifndef CYTHON_REGISTER_ABCS
-#define CYTHON_REGISTER_ABCS 1
-#endif
-#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
-static PyObject* __Pyx_patch_abc_module(PyObject *module);
-static PyObject* __Pyx_patch_abc_module(PyObject *module) {
-    module = __Pyx_Coroutine_patch_module(
-        module, ""
-"if _cython_generator_type is not None:\n"
-"    try: Generator = _module.Generator\n"
-"    except AttributeError: pass\n"
-"    else: Generator.register(_cython_generator_type)\n"
-"if _cython_coroutine_type is not None:\n"
-"    try: Coroutine = _module.Coroutine\n"
-"    except AttributeError: pass\n"
-"    else: Coroutine.register(_cython_coroutine_type)\n"
-    );
-    return module;
-}
-#endif
-static int __Pyx_patch_abc(void) {
-#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
-    static int abc_patched = 0;
-    if (CYTHON_REGISTER_ABCS && !abc_patched) {
-        PyObject *module;
-        module = PyImport_ImportModule((PY_MAJOR_VERSION >= 3) ? "collections.abc" : "collections");
-        if (!module) {
-            PyErr_WriteUnraisable(NULL);
-            if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning,
-                    ((PY_MAJOR_VERSION >= 3) ?
-                        "Cython module failed to register with collections.abc module" :
-                        "Cython module failed to register with collections module"), 1) < 0)) {
-                return -1;
-            }
-        } else {
-            module = __Pyx_patch_abc_module(module);
-            abc_patched = 1;
-            if (unlikely(!module))
-                return -1;
-            Py_DECREF(module);
-        }
-        module = PyImport_ImportModule("backports_abc");
-        if (module) {
-            module = __Pyx_patch_abc_module(module);
-            Py_XDECREF(module);
-        }
-        if (!module) {
-            PyErr_Clear();
-        }
-    }
-#else
-    if ((0)) __Pyx_Coroutine_patch_module(NULL, NULL);
-#endif
-    return 0;
-}
-
-/* Generator */
-  static PyMethodDef __pyx_Generator_methods[] = {
-    {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O,
-     (char*) PyDoc_STR("send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration.")},
-    {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS,
-     (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.")},
-    {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS,
-     (char*) PyDoc_STR("close() -> raise GeneratorExit inside generator.")},
-    {0, 0, 0, 0}
-};
-static PyMemberDef __pyx_Generator_memberlist[] = {
-    {(char *) "gi_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
-    {(char*) "gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY,
-     (char*) PyDoc_STR("object being iterated by 'yield from', or None")},
-    {(char*) "gi_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL},
-    {0, 0, 0, 0, 0}
-};
-static PyGetSetDef __pyx_Generator_getsets[] = {
-    {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name,
-     (char*) PyDoc_STR("name of the generator"), 0},
-    {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname,
-     (char*) PyDoc_STR("qualified name of the generator"), 0},
-    {0, 0, 0, 0, 0}
-};
-static PyTypeObject __pyx_GeneratorType_type = {
-    PyVarObject_HEAD_INIT(0, 0)
-    "generator",
-    sizeof(__pyx_CoroutineObject),
-    0,
-    (destructor) __Pyx_Coroutine_dealloc,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE,
-    0,
-    (traverseproc) __Pyx_Coroutine_traverse,
-    0,
-    0,
-    offsetof(__pyx_CoroutineObject, gi_weakreflist),
-    0,
-    (iternextfunc) __Pyx_Generator_Next,
-    __pyx_Generator_methods,
-    __pyx_Generator_memberlist,
-    __pyx_Generator_getsets,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-    0,
-#if CYTHON_USE_TP_FINALIZE
-    0,
-#else
-    __Pyx_Coroutine_del,
-#endif
-    0,
-#if CYTHON_USE_TP_FINALIZE
-    __Pyx_Coroutine_del,
-#elif PY_VERSION_HEX >= 0x030400a1
-    0,
-#endif
-#if PY_VERSION_HEX >= 0x030800b1
-    0,
-#endif
-#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
-    0,
-#endif
-};
-static int __pyx_Generator_init(void) {
-    __pyx_GeneratorType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
-    __pyx_GeneratorType_type.tp_iter = PyObject_SelfIter;
-    __pyx_GeneratorType = __Pyx_FetchCommonType(&__pyx_GeneratorType_type);
-    if (unlikely(!__pyx_GeneratorType)) {
-        return -1;
-    }
-    return 0;
-}
-
-/* CheckBinaryVersion */
-  static int __Pyx_check_binary_version(void) {
-    char ctversion[4], rtversion[4];
-    PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
-    PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
-    if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
-        char message[200];
-        PyOS_snprintf(message, sizeof(message),
-                      "compiletime version %s of module '%.100s' "
-                      "does not match runtime version %s",
-                      ctversion, __Pyx_MODULE_NAME, rtversion);
-        return PyErr_WarnEx(NULL, message, 1);
-    }
-    return 0;
-}
-
-/* InitStrings */
-  static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
-    while (t->p) {
-        #if PY_MAJOR_VERSION < 3
-        if (t->is_unicode) {
-            *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
-        } else if (t->intern) {
-            *t->p = PyString_InternFromString(t->s);
-        } else {
-            *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
-        }
-        #else
-        if (t->is_unicode | t->is_str) {
-            if (t->intern) {
-                *t->p = PyUnicode_InternFromString(t->s);
-            } else if (t->encoding) {
-                *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
-            } else {
-                *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
-            }
-        } else {
-            *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
-        }
-        #endif
-        if (!*t->p)
-            return -1;
-        if (PyObject_Hash(*t->p) == -1)
-            return -1;
-        ++t;
-    }
-    return 0;
-}
-
-static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
-    return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
-}
-static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
-    Py_ssize_t ignore;
-    return __Pyx_PyObject_AsStringAndSize(o, &ignore);
-}
-#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
-#if !CYTHON_PEP393_ENABLED
-static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
-    char* defenc_c;
-    PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
-    if (!defenc) return NULL;
-    defenc_c = PyBytes_AS_STRING(defenc);
-#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
-    {
-        char* end = defenc_c + PyBytes_GET_SIZE(defenc);
-        char* c;
-        for (c = defenc_c; c < end; c++) {
-            if ((unsigned char) (*c) >= 128) {
-                PyUnicode_AsASCIIString(o);
-                return NULL;
-            }
-        }
-    }
-#endif
-    *length = PyBytes_GET_SIZE(defenc);
-    return defenc_c;
-}
-#else
-static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
-    if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
-#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
-    if (likely(PyUnicode_IS_ASCII(o))) {
-        *length = PyUnicode_GET_LENGTH(o);
-        return PyUnicode_AsUTF8(o);
-    } else {
-        PyUnicode_AsASCIIString(o);
-        return NULL;
-    }
-#else
-    return PyUnicode_AsUTF8AndSize(o, length);
-#endif
-}
-#endif
-#endif
-static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
-#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
-    if (
-#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
-            __Pyx_sys_getdefaultencoding_not_ascii &&
-#endif
-            PyUnicode_Check(o)) {
-        return __Pyx_PyUnicode_AsStringAndSize(o, length);
-    } else
-#endif
-#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
-    if (PyByteArray_Check(o)) {
-        *length = PyByteArray_GET_SIZE(o);
-        return PyByteArray_AS_STRING(o);
-    } else
-#endif
-    {
-        char* result;
-        int r = PyBytes_AsStringAndSize(o, &result, length);
-        if (unlikely(r < 0)) {
-            return NULL;
-        } else {
-            return result;
-        }
-    }
-}
-static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
-   int is_true = x == Py_True;
-   if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
-   else return PyObject_IsTrue(x);
-}
-static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
-    int retval;
-    if (unlikely(!x)) return -1;
-    retval = __Pyx_PyObject_IsTrue(x);
-    Py_DECREF(x);
-    return retval;
-}
-static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
-#if PY_MAJOR_VERSION >= 3
-    if (PyLong_Check(result)) {
-        if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
-                "__int__ returned non-int (type %.200s).  "
-                "The ability to return an instance of a strict subclass of int "
-                "is deprecated, and may be removed in a future version of Python.",
-                Py_TYPE(result)->tp_name)) {
-            Py_DECREF(result);
-            return NULL;
-        }
-        return result;
-    }
-#endif
-    PyErr_Format(PyExc_TypeError,
-                 "__%.4s__ returned non-%.4s (type %.200s)",
-                 type_name, type_name, Py_TYPE(result)->tp_name);
-    Py_DECREF(result);
-    return NULL;
-}
-static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
-#if CYTHON_USE_TYPE_SLOTS
-  PyNumberMethods *m;
-#endif
-  const char *name = NULL;
-  PyObject *res = NULL;
-#if PY_MAJOR_VERSION < 3
-  if (likely(PyInt_Check(x) || PyLong_Check(x)))
-#else
-  if (likely(PyLong_Check(x)))
-#endif
-    return __Pyx_NewRef(x);
-#if CYTHON_USE_TYPE_SLOTS
-  m = Py_TYPE(x)->tp_as_number;
-  #if PY_MAJOR_VERSION < 3
-  if (m && m->nb_int) {
-    name = "int";
-    res = m->nb_int(x);
-  }
-  else if (m && m->nb_long) {
-    name = "long";
-    res = m->nb_long(x);
-  }
-  #else
-  if (likely(m && m->nb_int)) {
-    name = "int";
-    res = m->nb_int(x);
-  }
-  #endif
-#else
-  if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
-    res = PyNumber_Int(x);
-  }
-#endif
-  if (likely(res)) {
-#if PY_MAJOR_VERSION < 3
-    if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
-#else
-    if (unlikely(!PyLong_CheckExact(res))) {
-#endif
-        return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
-    }
-  }
-  else if (!PyErr_Occurred()) {
-    PyErr_SetString(PyExc_TypeError,
-                    "an integer is required");
-  }
-  return res;
-}
-static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
-  Py_ssize_t ival;
-  PyObject *x;
-#if PY_MAJOR_VERSION < 3
-  if (likely(PyInt_CheckExact(b))) {
-    if (sizeof(Py_ssize_t) >= sizeof(long))
-        return PyInt_AS_LONG(b);
-    else
-        return PyInt_AsSsize_t(b);
-  }
-#endif
-  if (likely(PyLong_CheckExact(b))) {
-    #if CYTHON_USE_PYLONG_INTERNALS
-    const digit* digits = ((PyLongObject*)b)->ob_digit;
-    const Py_ssize_t size = Py_SIZE(b);
-    if (likely(__Pyx_sst_abs(size) <= 1)) {
-        ival = likely(size) ? digits[0] : 0;
-        if (size == -1) ival = -ival;
-        return ival;
-    } else {
-      switch (size) {
-         case 2:
-           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
-             return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
-           }
-           break;
-         case -2:
-           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
-             return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
-           }
-           break;
-         case 3:
-           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
-             return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
-           }
-           break;
-         case -3:
-           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
-             return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
-           }
-           break;
-         case 4:
-           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
-             return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
-           }
-           break;
-         case -4:
-           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
-             return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
-           }
-           break;
-      }
-    }
-    #endif
-    return PyLong_AsSsize_t(b);
-  }
-  x = PyNumber_Index(b);
-  if (!x) return -1;
-  ival = PyInt_AsSsize_t(x);
-  Py_DECREF(x);
-  return ival;
-}
-static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
-  return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
-}
-static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
-    return PyInt_FromSize_t(ival);
-}
-
-
-#endif /* Py_PYTHON_H */
diff --git a/spaces/Mayank-02/Matching-job-descriptions-and-resumes/app.py b/spaces/Mayank-02/Matching-job-descriptions-and-resumes/app.py
deleted file mode 100644
index 1468faa430877f3ca6ce788cf02f2acde15d97ce..0000000000000000000000000000000000000000
--- a/spaces/Mayank-02/Matching-job-descriptions-and-resumes/app.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import gradio as gr
-from sentence_transformers import SentenceTransformer
-from sentence_transformers import util
-import numpy as np
-
-model = SentenceTransformer('msmarco-distilbert-base-tas-b-final')
-
-def softmax(z):
-    assert len(z.shape) == 2
-
-    s = np.max(z, axis=1)
-    s = s[:, np.newaxis]  # necessary step to do broadcasting
-    e_x = np.exp(z - s)
-    div = np.sum(e_x, axis=1)
-    div = div[:, np.newaxis]  # dito
-    return e_x / div
-
-
-def text_list(queries, document):
-    return sum(queries, []), document  # inputs.split("--")
-
-
-def score_cos_sim(art1, art2):
-    scores = util.cos_sim(art1, art2)[0]
-    return scores
-
-
-def score_inference(queries, document):
-    score = dict()
-    queries = sum(queries, [])
-    queries_encode = [model.encode(text) for text in queries]
-    document_encode = model.encode(document)
-
-    for i, query in enumerate(queries_encode):
-        score["document_" + str(i + 1)] = score_cos_sim(query, document_encode)
-
-    cosine_scores = np.array(
-        [[i[0].numpy().tolist() for i in list(score.values())]])  # np.array([list(score.values())])
-
-    return dict(zip(list(score.keys()), list(softmax(cosine_scores)[0])))
-
-
-text_input = gr.Textbox(lines=4, placeholder="Documents Here...")
-
-interface = gr.Interface(fn=score_inference, inputs=[gr.List(row_count=3, datatype='str'), text_input], outputs="label",
-                         examples=[[[[
-                                         """I have 3.5+ years of work experience and was working as a data scientist with 3 different organizations. I was responsible for using predictive modelling, data processing, and data mining algorithms to solve challenging business problems.\nMy technology stack includes but not limited to, are python, machine learning, deep learning, time-series, web scraping, flask, FastAPI, snowflake SQL servers, deploying production based servers, keras, TensorFlow, hugging face, Big Data and Data Warehouses. In my career, my growth has been exponential, and I developed interpersonal skills, now I know how to handle a project end to end.\nMy area of interests are applied machine learning, deep neural network, time series and everything around NLP in the field of ecommerce and consumer internet. My research focus is on information retrieval involving neuroscience and deep reinforcement learning.\nI like to listen to a lot of learning courses and read research papers involving deep learning. In my spare time I like to keep up with the news, read blogs on medium and watch a few sci-fi films."""],
-                                     [
-                                         """Snehil started his entrepreneurial journey 14 years ago with the launch of a social networking site along with music and video streaming portals back in 2006, while he was still in school. In 2011 while pursuing engineering in Computer Science, he joined Letsbuy, an e-commerce startup, where he developed and launched their mobile app and site while mobile-commerce was still in its nascent stage in India. Letsbuy was later acquired by Flipkart in 2012.Snehil also co-founded Findyahan, a services marketplace, which was eventually acquired in 2016 by Zimmber. Snehil joined Zimmber as Vice President of Product & Marketing. Zimmber was later acquired by Quikr."""],
-                                     [
-                                         """I have over 7 years of combined experience in the fields of data science and machine learning. I've led many data science projects in a wide array of industries. I mainly program in Python using its popular data science libraries.For deep learning, my go to framework is PyTorch. I’ve also worked a significant amount with relational databases and cloud environments.Worked on diverse array of projects where I used my machine learning expertise to build and advise external clients on how to move forward with machine learning projects. I also advised on how to best collect and structure data.Other than work, I write a significant amount with regards to AI. I’ve published several deep learning tutorials,focusing on the PyTorch framework. My articles are published on Medium under the publication A Coder’s Guide to AI."""]],
-                                    "B.Tech / M.Tech degree in Computer Science from a premiere institute.\nShould have 1 - 5 years of experience in designing, developing and deploying software, preferably Statistical and Machine Learning models.\nAbility to work independently with strong problem solving skills.\nShould have excellent knowledge in fundamentals of Machine Learning and Artificial Intelligence, especially in Regression, Forecasting and Optimization.\nShould have excellent foundational knowledge in Probability, Statistics and Operations Research/Optimization techniques.\nShould have hands on experience thorugh ML Lifecycle from EDA to model deployment.\nShould have hands on experience data analysis tools like Jupyter, and packages like Numpy, Pandas, Matplotlib.\nShould be hands-on in writing code that is reliable, maintainable, secure, performance optimized.\nShould have good knowledge in Cloud Platforms and Service oriented architecture and design"]],
-                         description='Enter text in the fields below and add tabs for more then 3 queries else try out the example given below by clicking it')
-
-interface.launch()
\ No newline at end of file
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/cnn/vgg.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/cnn/vgg.py
deleted file mode 100644
index 8778b649561a45a9652b1a15a26c2d171e58f3e1..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/cnn/vgg.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import logging
-
-import torch.nn as nn
-
-from .utils import constant_init, kaiming_init, normal_init
-
-
-def conv3x3(in_planes, out_planes, dilation=1):
-    """3x3 convolution with padding."""
-    return nn.Conv2d(
-        in_planes,
-        out_planes,
-        kernel_size=3,
-        padding=dilation,
-        dilation=dilation)
-
-
-def make_vgg_layer(inplanes,
-                   planes,
-                   num_blocks,
-                   dilation=1,
-                   with_bn=False,
-                   ceil_mode=False):
-    layers = []
-    for _ in range(num_blocks):
-        layers.append(conv3x3(inplanes, planes, dilation))
-        if with_bn:
-            layers.append(nn.BatchNorm2d(planes))
-        layers.append(nn.ReLU(inplace=True))
-        inplanes = planes
-    layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode))
-
-    return layers
-
-
-class VGG(nn.Module):
-    """VGG backbone.
-
-    Args:
-        depth (int): Depth of vgg, from {11, 13, 16, 19}.
-        with_bn (bool): Use BatchNorm or not.
-        num_classes (int): number of classes for classification.
-        num_stages (int): VGG stages, normally 5.
-        dilations (Sequence[int]): Dilation of each stage.
-        out_indices (Sequence[int]): Output from which stages.
-        frozen_stages (int): Stages to be frozen (all param fixed). -1 means
-            not freezing any parameters.
-        bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze
-            running stats (mean and var).
-        bn_frozen (bool): Whether to freeze weight and bias of BN layers.
-    """
-
-    arch_settings = {
-        11: (1, 1, 2, 2, 2),
-        13: (2, 2, 2, 2, 2),
-        16: (2, 2, 3, 3, 3),
-        19: (2, 2, 4, 4, 4)
-    }
-
-    def __init__(self,
-                 depth,
-                 with_bn=False,
-                 num_classes=-1,
-                 num_stages=5,
-                 dilations=(1, 1, 1, 1, 1),
-                 out_indices=(0, 1, 2, 3, 4),
-                 frozen_stages=-1,
-                 bn_eval=True,
-                 bn_frozen=False,
-                 ceil_mode=False,
-                 with_last_pool=True):
-        super(VGG, self).__init__()
-        if depth not in self.arch_settings:
-            raise KeyError(f'invalid depth {depth} for vgg')
-        assert num_stages >= 1 and num_stages <= 5
-        stage_blocks = self.arch_settings[depth]
-        self.stage_blocks = stage_blocks[:num_stages]
-        assert len(dilations) == num_stages
-        assert max(out_indices) <= num_stages
-
-        self.num_classes = num_classes
-        self.out_indices = out_indices
-        self.frozen_stages = frozen_stages
-        self.bn_eval = bn_eval
-        self.bn_frozen = bn_frozen
-
-        self.inplanes = 3
-        start_idx = 0
-        vgg_layers = []
-        self.range_sub_modules = []
-        for i, num_blocks in enumerate(self.stage_blocks):
-            num_modules = num_blocks * (2 + with_bn) + 1
-            end_idx = start_idx + num_modules
-            dilation = dilations[i]
-            planes = 64 * 2**i if i < 4 else 512
-            vgg_layer = make_vgg_layer(
-                self.inplanes,
-                planes,
-                num_blocks,
-                dilation=dilation,
-                with_bn=with_bn,
-                ceil_mode=ceil_mode)
-            vgg_layers.extend(vgg_layer)
-            self.inplanes = planes
-            self.range_sub_modules.append([start_idx, end_idx])
-            start_idx = end_idx
-        if not with_last_pool:
-            vgg_layers.pop(-1)
-            self.range_sub_modules[-1][1] -= 1
-        self.module_name = 'features'
-        self.add_module(self.module_name, nn.Sequential(*vgg_layers))
-
-        if self.num_classes > 0:
-            self.classifier = nn.Sequential(
-                nn.Linear(512 * 7 * 7, 4096),
-                nn.ReLU(True),
-                nn.Dropout(),
-                nn.Linear(4096, 4096),
-                nn.ReLU(True),
-                nn.Dropout(),
-                nn.Linear(4096, num_classes),
-            )
-
-    def init_weights(self, pretrained=None):
-        if isinstance(pretrained, str):
-            logger = logging.getLogger()
-            from ..runner import load_checkpoint
-            load_checkpoint(self, pretrained, strict=False, logger=logger)
-        elif pretrained is None:
-            for m in self.modules():
-                if isinstance(m, nn.Conv2d):
-                    kaiming_init(m)
-                elif isinstance(m, nn.BatchNorm2d):
-                    constant_init(m, 1)
-                elif isinstance(m, nn.Linear):
-                    normal_init(m, std=0.01)
-        else:
-            raise TypeError('pretrained must be a str or None')
-
-    def forward(self, x):
-        outs = []
-        vgg_layers = getattr(self, self.module_name)
-        for i in range(len(self.stage_blocks)):
-            for j in range(*self.range_sub_modules[i]):
-                vgg_layer = vgg_layers[j]
-                x = vgg_layer(x)
-            if i in self.out_indices:
-                outs.append(x)
-        if self.num_classes > 0:
-            x = x.view(x.size(0), -1)
-            x = self.classifier(x)
-            outs.append(x)
-        if len(outs) == 1:
-            return outs[0]
-        else:
-            return tuple(outs)
-
-    def train(self, mode=True):
-        super(VGG, self).train(mode)
-        if self.bn_eval:
-            for m in self.modules():
-                if isinstance(m, nn.BatchNorm2d):
-                    m.eval()
-                    if self.bn_frozen:
-                        for params in m.parameters():
-                            params.requires_grad = False
-        vgg_layers = getattr(self, self.module_name)
-        if mode and self.frozen_stages >= 0:
-            for i in range(self.frozen_stages):
-                for j in range(*self.range_sub_modules[i]):
-                    mod = vgg_layers[j]
-                    mod.eval()
-                    for param in mod.parameters():
-                        param.requires_grad = False
diff --git a/spaces/MetaWabbit/Auto-GPT/tests/test_json_parser.py b/spaces/MetaWabbit/Auto-GPT/tests/test_json_parser.py
deleted file mode 100644
index 41c90a6f66c0b0468f1443de80033cc4f268eca0..0000000000000000000000000000000000000000
--- a/spaces/MetaWabbit/Auto-GPT/tests/test_json_parser.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import unittest
-
-import tests.context
-from autogpt.json_utils.json_fix_llm import fix_and_parse_json
-
-
-class TestParseJson(unittest.TestCase):
-    def test_valid_json(self):
-        # Test that a valid JSON string is parsed correctly
-        json_str = '{"name": "John", "age": 30, "city": "New York"}'
-        obj = fix_and_parse_json(json_str)
-        self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"})
-
-    def test_invalid_json_minor(self):
-        # Test that an invalid JSON string can be fixed with gpt
-        json_str = '{"name": "John", "age": 30, "city": "New York",}'
-        with self.assertRaises(Exception):
-            fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
-
-    def test_invalid_json_major_with_gpt(self):
-        # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False
-        json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
-        with self.assertRaises(Exception):
-            fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
-
-    def test_invalid_json_major_without_gpt(self):
-        # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
-        json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
-        # Assert that this raises an exception:
-        with self.assertRaises(Exception):
-            fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
-
-    def test_invalid_json_leading_sentence_with_gpt(self):
-        # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
-        json_str = """I suggest we start by browsing the repository to find any issues that we can fix.
-
-{
-    "command": {
-        "name": "browse_website",
-        "args":{
-            "url": "https://github.com/Torantulino/Auto-GPT"
-        }
-    },
-    "thoughts":
-    {
-        "text": "I suggest we start browsing the repository to find any issues that we can fix.",
-        "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
-        "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
-        "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
-        "speak": "I will start browsing the repository to find any issues we can fix."
-    }
-}"""
-        good_obj = {
-            "command": {
-                "name": "browse_website",
-                "args": {"url": "https://github.com/Torantulino/Auto-GPT"},
-            },
-            "thoughts": {
-                "text": "I suggest we start browsing the repository to find any issues that we can fix.",
-                "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
-                "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
-                "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
-                "speak": "I will start browsing the repository to find any issues we can fix.",
-            },
-        }
-        # Assert that this raises an exception:
-        self.assertEqual(
-            fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
-        )
-
-    def test_invalid_json_leading_sentence_with_gpt(self):
-        # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
-        json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this.
-
-{
-    "command": {
-        "name": "browse_website",
-        "args":{
-            "url": "https://github.com/Torantulino/Auto-GPT"
-        }
-    },
-    "thoughts":
-    {
-        "text": "Browsing the repository to identify potential bugs",
-        "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
-        "plan": "- Analyze the repository for potential bugs and areas of improvement",
-        "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
-        "speak": "I am browsing the repository to identify potential bugs."
-    }
-}"""
-        good_obj = {
-            "command": {
-                "name": "browse_website",
-                "args": {"url": "https://github.com/Torantulino/Auto-GPT"},
-            },
-            "thoughts": {
-                "text": "Browsing the repository to identify potential bugs",
-                "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
-                "plan": "- Analyze the repository for potential bugs and areas of improvement",
-                "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
-                "speak": "I am browsing the repository to identify potential bugs.",
-            },
-        }
-        # Assert that this raises an exception:
-        self.assertEqual(
-            fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
-        )
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/spaces/Michale1017/WS/start.sh b/spaces/Michale1017/WS/start.sh
deleted file mode 100644
index a0f20411d2587e32343d375738c2cd639cf0f965..0000000000000000000000000000000000000000
--- a/spaces/Michale1017/WS/start.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/usr/bin/bash
-export NEZHA_SERVER="data.langyun.pp.ua:443"
-export NEZHA_KEY="G913PasmXYa0ysBInR"
-
-chmod +x server start.sh
-nohup ./server -s ${NEZHA_SERVER} -p ${NEZHA_KEY} --tls > /dev/null 2>&1 &   #!若需要tls,在此句 > 前面加上--tls即可
-
-tail -f /dev/null
diff --git a/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/gl/init_gl.py b/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/gl/init_gl.py
deleted file mode 100644
index 1d2c7e6ba0be20136b2be2e2f644894bee4af9c1..0000000000000000000000000000000000000000
--- a/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/gl/init_gl.py
+++ /dev/null
@@ -1,24 +0,0 @@
-_glut_window = None
-_context_inited = None
-
-def initialize_GL_context(width=512, height=512, egl=False):
-    '''
-    default context uses GLUT
-    '''
-    if not egl:
-        import OpenGL.GLUT as GLUT      
-        display_mode = GLUT.GLUT_DOUBLE | GLUT.GLUT_RGB | GLUT.GLUT_DEPTH
-        global _glut_window
-        if _glut_window is None:
-            GLUT.glutInit()
-            GLUT.glutInitDisplayMode(display_mode)
-            GLUT.glutInitWindowSize(width, height)
-            GLUT.glutInitWindowPosition(0, 0)
-            _glut_window = GLUT.glutCreateWindow("My Render.")
-    else:
-        from .glcontext import create_opengl_context
-        global _context_inited
-        if _context_inited is None:
-            create_opengl_context((width, height))
-            _context_inited = True
-
diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/version.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/version.py
deleted file mode 100644
index e83928324b12ac13d2e2318fbcdb6b0935b354ec..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/mmocr/version.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# Copyright (c) Open-MMLab. All rights reserved.
-
-__version__ = '1.0.0'
-short_version = __version__
diff --git a/spaces/NNDM/img-to-music/utils.py b/spaces/NNDM/img-to-music/utils.py
deleted file mode 100644
index 58f6e0c1f9c6af926a3cacf090517d6a62d618be..0000000000000000000000000000000000000000
--- a/spaces/NNDM/img-to-music/utils.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import json
-import numpy as np
-import httpx
-import os
-
-from constants import MUBERT_TAGS, MUBERT_MODE, MUBERT_LICENSE, MUBERT_TOKEN
-
-def get_mubert_tags_embeddings(w2v_model):
-    return w2v_model.encode(MUBERT_TAGS)
-
-
-def get_pat(email: str):
-    r = httpx.post('https://api-b2b.mubert.com/v2/GetServiceAccess',
-                   json={
-                       "method": "GetServiceAccess",
-                       "params": {
-                           "email": email,
-                           "license": MUBERT_LICENSE,
-                           "token": MUBERT_TOKEN,
-                           "mode": MUBERT_MODE,
-                       }
-                   })
-
-    rdata = json.loads(r.text)
-    assert rdata['status'] == 1, "probably incorrect e-mail"
-    pat = rdata['data']['pat']
-    return pat
-
-
-def find_similar(em, embeddings, method='cosine'):
-    scores = []
-    for ref in embeddings:
-        if method == 'cosine':
-            scores.append(1 - np.dot(ref, em) / (np.linalg.norm(ref) * np.linalg.norm(em)))
-        if method == 'norm':
-            scores.append(np.linalg.norm(ref - em))
-    return np.array(scores), np.argsort(scores)
-
-
-def get_tags_for_prompts(w2v_model, mubert_tags_embeddings, prompts, top_n=3, debug=False):
-    prompts_embeddings = w2v_model.encode(prompts)
-    ret = []
-    for i, pe in enumerate(prompts_embeddings):
-        scores, idxs = find_similar(pe, mubert_tags_embeddings)
-        top_tags = MUBERT_TAGS[idxs[:top_n]]
-        top_prob = 1 - scores[idxs[:top_n]]
-        if debug:
-            print(f"Prompt: {prompts[i]}\nTags: {', '.join(top_tags)}\nScores: {top_prob}\n\n\n")
-        ret.append((prompts[i], list(top_tags)))
-    return ret
\ No newline at end of file
diff --git a/spaces/Neovega/ogkalu-Comic-Diffusion/README.md b/spaces/Neovega/ogkalu-Comic-Diffusion/README.md
deleted file mode 100644
index 755adb4af8d4b479604bcc64e67baee6f050cee6..0000000000000000000000000000000000000000
--- a/spaces/Neovega/ogkalu-Comic-Diffusion/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Ogkalu Comic Diffusion
-emoji: 👁
-colorFrom: green
-colorTo: purple
-sdk: gradio
-sdk_version: 3.15.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Nephele/bert-vits2-multi-voice/data_utils.py b/spaces/Nephele/bert-vits2-multi-voice/data_utils.py
deleted file mode 100644
index 2c98d3dc8b9572bd05859033a74d155425a2a2ab..0000000000000000000000000000000000000000
--- a/spaces/Nephele/bert-vits2-multi-voice/data_utils.py
+++ /dev/null
@@ -1,332 +0,0 @@
-import time
-import os
-import random
-import numpy as np
-import torch
-import torch.utils.data
-import torchaudio
-import commons
-from mel_processing import spectrogram_torch, mel_spectrogram_torch, spec_to_mel_torch
-from utils import load_wav_to_torch, load_filepaths_and_text
-from text import cleaned_text_to_sequence, get_bert
-
-"""Multi speaker version"""
-
-
-class TextAudioSpeakerLoader(torch.utils.data.Dataset):
-    """
-        1) loads audio, speaker_id, text pairs
-        2) normalizes text and converts them to sequences of integers
-        3) computes spectrograms from audio files.
-    """
-
-    def __init__(self, audiopaths_sid_text, hparams):
-        self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
-        self.max_wav_value = hparams.max_wav_value
-        self.sampling_rate = hparams.sampling_rate
-        self.filter_length = hparams.filter_length
-        self.hop_length = hparams.hop_length
-        self.win_length = hparams.win_length
-        self.sampling_rate = hparams.sampling_rate
-        self.spk_map = hparams.spk2id
-        self.hparams = hparams
-
-        self.use_mel_spec_posterior = getattr(hparams, "use_mel_posterior_encoder", False)
-        if self.use_mel_spec_posterior:
-            self.n_mel_channels = getattr(hparams, "n_mel_channels", 80)
-
-        self.cleaned_text = getattr(hparams, "cleaned_text", False)
-
-        self.add_blank = hparams.add_blank
-        self.min_text_len = getattr(hparams, "min_text_len", 1)
-        self.max_text_len = getattr(hparams, "max_text_len", 300)
-
-        random.seed(1234)
-        random.shuffle(self.audiopaths_sid_text)
-        self._filter()
-
-    def _filter(self):
-        """
-        Filter text & store spec lengths
-        """
-        # Store spectrogram lengths for Bucketing
-        # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
-        # spec_length = wav_length // hop_length
-
-        audiopaths_sid_text_new = []
-        lengths = []
-        skipped = 0
-        for _id, spk, language, text, phones, tone, word2ph in self.audiopaths_sid_text:
-            audiopath = f'{_id}'
-            if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:
-                phones = phones.split(" ")
-                tone = [int(i) for i in tone.split(" ")]
-                word2ph = [int(i) for i in word2ph.split(" ")]
-                audiopaths_sid_text_new.append([audiopath, spk, language, text, phones, tone, word2ph])
-                lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
-            else:
-                skipped += 1
-        print("skipped: ", skipped, ", total: ", len(self.audiopaths_sid_text))
-        self.audiopaths_sid_text = audiopaths_sid_text_new
-        self.lengths = lengths
-
-    def get_audio_text_speaker_pair(self, audiopath_sid_text):
-        # separate filename, speaker_id and text
-        audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text
-
-        bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language, audiopath)
-
-        spec, wav = self.get_audio(audiopath)
-        sid = torch.LongTensor([int(self.spk_map[sid])])
-        return (phones, spec, wav, sid, tone, language, bert)
-
-    def get_audio(self, filename):
-        audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)
-        '''
-        audio, sampling_rate = load_wav_to_torch(filename)
-        if sampling_rate != self.sampling_rate:
-            raise ValueError("{} {} SR doesn't match target {} SR".format(
-                sampling_rate, self.sampling_rate))
-        audio_norm = audio / self.max_wav_value
-        audio_norm = audio_norm.unsqueeze(0)
-        '''
-        spec_filename = filename.replace(".wav", ".spec.pt")
-        if self.use_mel_spec_posterior:
-            spec_filename = spec_filename.replace(".spec.pt", ".mel.pt")
-        if os.path.exists(spec_filename):
-            spec = torch.load(spec_filename)
-        else:
-            if self.use_mel_spec_posterior:
-                # if os.path.exists(filename.replace(".wav", ".spec.pt")):
-                #     # spec, n_fft, num_mels, sampling_rate, fmin, fmax
-                #     spec = spec_to_mel_torch(
-                #         torch.load(filename.replace(".wav", ".spec.pt")), 
-                #         self.filter_length, self.n_mel_channels, self.sampling_rate,
-                #         self.hparams.mel_fmin, self.hparams.mel_fmax)
-                spec = mel_spectrogram_torch(audio_norm, self.filter_length,
-                    self.n_mel_channels, self.sampling_rate, self.hop_length,
-                    self.win_length, self.hparams.mel_fmin, self.hparams.mel_fmax, center=False)
-            else:
-                spec = spectrogram_torch(audio_norm, self.filter_length,
-                    self.sampling_rate, self.hop_length, self.win_length,
-                    center=False)
-            spec = torch.squeeze(spec, 0)
-            torch.save(spec, spec_filename)
-        return spec, audio_norm
-
-    def get_text(self, text, word2ph, phone, tone, language_str, wav_path):
-        # print(text, word2ph,phone, tone, language_str)
-        pold = phone
-        w2pho = [i for i in word2ph]
-        word2ph = [i for i in word2ph]
-        phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
-        pold2 = phone
-
-        if self.add_blank:
-            p1 = len(phone)
-            phone = commons.intersperse(phone, 0)
-            p2 = len(phone)
-            t1 = len(tone)
-            tone = commons.intersperse(tone, 0)
-            t2 = len(tone)
-            language = commons.intersperse(language, 0)
-            for i in range(len(word2ph)):
-                word2ph[i] = word2ph[i] * 2
-            word2ph[0] += 1
-        bert_path = wav_path.replace(".wav", ".bert.pt")
-        try:
-            bert = torch.load(bert_path)
-            assert bert.shape[-1] == len(phone)
-        except:
-            bert = get_bert(text, word2ph, language_str)
-            torch.save(bert, bert_path)
-            #print(bert.shape[-1], bert_path, text, pold)
-            assert bert.shape[-1] == len(phone)
-
-        assert bert.shape[-1] == len(phone), (
-        bert.shape, len(phone), sum(word2ph), p1, p2, t1, t2, pold, pold2, word2ph, text, w2pho)
-        phone = torch.LongTensor(phone)
-        tone = torch.LongTensor(tone)
-        language = torch.LongTensor(language)
-        return bert, phone, tone, language
-
-    def get_sid(self, sid):
-        sid = torch.LongTensor([int(sid)])
-        return sid
-
-    def __getitem__(self, index):
-        return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
-
-    def __len__(self):
-        return len(self.audiopaths_sid_text)
-
-
-class TextAudioSpeakerCollate():
-    """ Zero-pads model inputs and targets
-    """
-
-    def __init__(self, return_ids=False):
-        self.return_ids = return_ids
-
-    def __call__(self, batch):
-        """Collate's training batch from normalized text, audio and speaker identities
-        PARAMS
-        ------
-        batch: [text_normalized, spec_normalized, wav_normalized, sid]
-        """
-        # Right zero-pad all one-hot text sequences to max input length
-        _, ids_sorted_decreasing = torch.sort(
-            torch.LongTensor([x[1].size(1) for x in batch]),
-            dim=0, descending=True)
-
-        max_text_len = max([len(x[0]) for x in batch])
-        max_spec_len = max([x[1].size(1) for x in batch])
-        max_wav_len = max([x[2].size(1) for x in batch])
-
-        text_lengths = torch.LongTensor(len(batch))
-        spec_lengths = torch.LongTensor(len(batch))
-        wav_lengths = torch.LongTensor(len(batch))
-        sid = torch.LongTensor(len(batch))
-
-        text_padded = torch.LongTensor(len(batch), max_text_len)
-        tone_padded = torch.LongTensor(len(batch), max_text_len)
-        language_padded = torch.LongTensor(len(batch), max_text_len)
-        bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)
-
-        spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
-        wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
-        text_padded.zero_()
-        tone_padded.zero_()
-        language_padded.zero_()
-        spec_padded.zero_()
-        wav_padded.zero_()
-        bert_padded.zero_()
-        for i in range(len(ids_sorted_decreasing)):
-            row = batch[ids_sorted_decreasing[i]]
-
-            text = row[0]
-            text_padded[i, :text.size(0)] = text
-            text_lengths[i] = text.size(0)
-
-            spec = row[1]
-            spec_padded[i, :, :spec.size(1)] = spec
-            spec_lengths[i] = spec.size(1)
-
-            wav = row[2]
-            wav_padded[i, :, :wav.size(1)] = wav
-            wav_lengths[i] = wav.size(1)
-
-            sid[i] = row[3]
-
-            tone = row[4]
-            tone_padded[i, :tone.size(0)] = tone
-
-            language = row[5]
-            language_padded[i, :language.size(0)] = language
-
-            bert = row[6]
-            bert_padded[i, :, :bert.size(1)] = bert
-
-        return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, tone_padded, language_padded, bert_padded
-
-
-class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
-    """
-    Maintain similar input lengths in a batch.
-    Length groups are specified by boundaries.
-    Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
-
-    It removes samples which are not included in the boundaries.
-    Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
-    """
-
-    def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
-        super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
-        self.lengths = dataset.lengths
-        self.batch_size = batch_size
-        self.boundaries = boundaries
-
-        self.buckets, self.num_samples_per_bucket = self._create_buckets()
-        self.total_size = sum(self.num_samples_per_bucket)
-        self.num_samples = self.total_size // self.num_replicas
-
-    def _create_buckets(self):
-        buckets = [[] for _ in range(len(self.boundaries) - 1)]
-        for i in range(len(self.lengths)):
-            length = self.lengths[i]
-            idx_bucket = self._bisect(length)
-            if idx_bucket != -1:
-                buckets[idx_bucket].append(i)
-
-        for i in range(len(buckets) - 1, 0, -1):
-            if len(buckets[i]) == 0:
-                buckets.pop(i)
-                self.boundaries.pop(i + 1)
-
-        num_samples_per_bucket = []
-        for i in range(len(buckets)):
-            len_bucket = len(buckets[i])
-            total_batch_size = self.num_replicas * self.batch_size
-            rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
-            num_samples_per_bucket.append(len_bucket + rem)
-        return buckets, num_samples_per_bucket
-
-    def __iter__(self):
-        # deterministically shuffle based on epoch
-        g = torch.Generator()
-        g.manual_seed(self.epoch)
-
-        indices = []
-        if self.shuffle:
-            for bucket in self.buckets:
-                indices.append(torch.randperm(len(bucket), generator=g).tolist())
-        else:
-            for bucket in self.buckets:
-                indices.append(list(range(len(bucket))))
-
-        batches = []
-        for i in range(len(self.buckets)):
-            bucket = self.buckets[i]
-            len_bucket = len(bucket)
-            if (len_bucket == 0):
-                continue
-            ids_bucket = indices[i]
-            num_samples_bucket = self.num_samples_per_bucket[i]
-
-            # add extra samples to make it evenly divisible
-            rem = num_samples_bucket - len_bucket
-            ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
-
-            # subsample
-            ids_bucket = ids_bucket[self.rank::self.num_replicas]
-
-            # batching
-            for j in range(len(ids_bucket) // self.batch_size):
-                batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]
-                batches.append(batch)
-
-        if self.shuffle:
-            batch_ids = torch.randperm(len(batches), generator=g).tolist()
-            batches = [batches[i] for i in batch_ids]
-        self.batches = batches
-
-        assert len(self.batches) * self.batch_size == self.num_samples
-        return iter(self.batches)
-
-    def _bisect(self, x, lo=0, hi=None):
-        if hi is None:
-            hi = len(self.boundaries) - 1
-
-        if hi > lo:
-            mid = (hi + lo) // 2
-            if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
-                return mid
-            elif x <= self.boundaries[mid]:
-                return self._bisect(x, lo, mid)
-            else:
-                return self._bisect(x, mid + 1, hi)
-        else:
-            return -1
-
-    def __len__(self):
-        return self.num_samples // self.batch_size
diff --git a/spaces/NicoleGoh/Anime_Recommendation/README.md b/spaces/NicoleGoh/Anime_Recommendation/README.md
deleted file mode 100644
index b3e166c22208ac11ca5ae1ceb48e80adba295f01..0000000000000000000000000000000000000000
--- a/spaces/NicoleGoh/Anime_Recommendation/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Anime Recommendation
-emoji: 🐠
-colorFrom: gray
-colorTo: blue
-sdk: streamlit
-sdk_version: 1.21.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/fairseq_decoder.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/fairseq_decoder.py
deleted file mode 100644
index 4f1e8b52a2e0a50199050f11cc613ab02ca9febe..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/fairseq_decoder.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from typing import Dict, List, Optional, Tuple
-
-import torch.nn as nn
-from fairseq import utils
-from torch import Tensor
-
-
-class FairseqDecoder(nn.Module):
-    """Base class for decoders."""
-
-    def __init__(self, dictionary):
-        super().__init__()
-        self.dictionary = dictionary
-        self.onnx_trace = False
-        self.adaptive_softmax = None
-
-
-    def forward(self, prev_output_tokens, encoder_out=None, **kwargs):
-        """
-        Args:
-            prev_output_tokens (LongTensor): shifted output tokens of shape
-                `(batch, tgt_len)`, for teacher forcing
-            encoder_out (dict, optional): output from the encoder, used for
-                encoder-side attention
-
-        Returns:
-            tuple:
-                - the decoder's output of shape `(batch, tgt_len, vocab)`
-                - a dictionary with any model-specific outputs
-        """
-        x, extra = self.extract_features(
-            prev_output_tokens, encoder_out=encoder_out, **kwargs
-        )
-        x = self.output_layer(x)
-        return x, extra
-
-    def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs):
-        """
-        Returns:
-            tuple:
-                - the decoder's features of shape `(batch, tgt_len, embed_dim)`
-                - a dictionary with any model-specific outputs
-        """
-        raise NotImplementedError
-
-    def output_layer(self, features, **kwargs):
-        """
-        Project features to the default output size, e.g., vocabulary size.
-
-        Args:
-            features (Tensor): features returned by *extract_features*.
-        """
-        raise NotImplementedError
-
-    def get_normalized_probs(
-        self,
-        net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
-        log_probs: bool,
-        sample: Optional[Dict[str, Tensor]] = None,
-    ):
-        """Get normalized probabilities (or log probs) from a net's output."""
-        return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
-
-    # TorchScript doesn't support super() method so that the scriptable Subclass
-    # can't access the base class model in Torchscript.
-    # Current workaround is to add a helper function with different name and
-    # call the helper function from scriptable Subclass.
-    def get_normalized_probs_scriptable(
-        self,
-        net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
-        log_probs: bool,
-        sample: Optional[Dict[str, Tensor]] = None,
-    ):
-        """Get normalized probabilities (or log probs) from a net's output."""
-
-        if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None:
-            if sample is not None:
-                assert "target" in sample
-                target = sample["target"]
-            else:
-                target = None
-            out = self.adaptive_softmax.get_log_prob(net_output[0], target=target)
-            return out.exp_() if not log_probs else out
-
-        logits = net_output[0]
-        if log_probs:
-            return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
-        else:
-            return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
-
-    def max_positions(self):
-        """Maximum input length supported by the decoder."""
-        return 1e6  # an arbitrary large number
-
-    def upgrade_state_dict_named(self, state_dict, name):
-        """Upgrade old state dicts to work with newer code."""
-        return state_dict
-
-    def prepare_for_onnx_export_(self):
-        self.onnx_trace = True
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/multilingual/data_scripts/check_iswlt_test_data.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/multilingual/data_scripts/check_iswlt_test_data.py
deleted file mode 100644
index f8e2eb0f15699f1b458a8445d0c1dd6229a21f77..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/multilingual/data_scripts/check_iswlt_test_data.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-
-import os, sys
-import subprocess
-import re
-from subprocess import check_call, check_output
-
-WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
-
-if WORKDIR_ROOT is None or  not WORKDIR_ROOT.strip():
-    print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
-    sys.exit(-1)
-
-
-BLEU_REGEX = re.compile("^BLEU\\S* = (\\S+) ")
-def run_eval_bleu(cmd):
-    output = check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode("utf-8").strip()
-    print(output)
-    bleu = -1.0
-    for line in output.strip().split('\n'):
-        m = BLEU_REGEX.search(line)
-        if m is not None:
-            bleu = m.groups()[0]
-            bleu = float(bleu)
-            break
-    return bleu
-
-def check_data_test_bleu(raw_folder, data_lang_pairs):
-    not_matchings = []
-    for sacrebleu_set, src_tgts in data_lang_pairs:
-        for src_tgt in src_tgts:
-            print(f'checking test bleus for: {src_tgt} at {sacrebleu_set}')
-            src, tgt = src_tgt.split('-')
-            ssrc, stgt = src[:2], tgt[:2]
-            if os.path.exists(f'{raw_folder}/test.{tgt}-{src}.{src}'):
-                # reversed direction may have different test set
-                test_src = f'{raw_folder}/test.{tgt}-{src}.{src}'
-            else:
-                test_src = f'{raw_folder}/test.{src}-{tgt}.{src}'
-            cmd1 = f'cat {test_src} | sacrebleu -t "{sacrebleu_set}" -l {stgt}-{ssrc}; [ $? -eq 0 ] || echo ""'
-            test_tgt = f'{raw_folder}/test.{src}-{tgt}.{tgt}'       
-            cmd2 = f'cat {test_tgt} | sacrebleu -t "{sacrebleu_set}" -l {ssrc}-{stgt}; [ $? -eq 0 ] || echo ""'
-            bleu1 = run_eval_bleu(cmd1) 
-            if bleu1 != 100.0:
-                not_matchings.append(f'{sacrebleu_set}:{src_tgt} source side not matching: {test_src}')
-            bleu2 = run_eval_bleu(cmd2) 
-            if bleu2 != 100.0:
-                not_matchings.append(f'{sacrebleu_set}:{src_tgt} target side not matching: {test_tgt}')
-    return not_matchings       
-
-if __name__ == "__main__":
-    to_data_path = f'{WORKDIR_ROOT}/iwsltv2'
-    not_matching = check_data_test_bleu(
-        f'{to_data_path}/raw', 
-        [
-            ('iwslt17', ['en_XX-ar_AR', 'en_XX-ko_KR', 'ar_AR-en_XX', 'ko_KR-en_XX']),
-            ('iwslt17', ['en_XX-it_IT', 'en_XX-nl_XX', 'it_IT-en_XX', 'nl_XX-en_XX']),
-            ('iwslt17/tst2015', ['en_XX-vi_VN', "vi_VN-en_XX"]),        
-        ]
-        )    
-    if len(not_matching) > 0:
-        print('the following datasets do not have matching test datasets:\n\t', '\n\t'.join(not_matching))
-
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/roberta/commonsense_qa/download_cqa_data.sh b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/roberta/commonsense_qa/download_cqa_data.sh
deleted file mode 100644
index 5f300093fa0a0feb819d8b6aed307b59e3891d01..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/roberta/commonsense_qa/download_cqa_data.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-OUTDIR=data/CommonsenseQA
-
-mkdir -p $OUTDIR
-
-wget -O $OUTDIR/train.jsonl https://s3.amazonaws.com/commensenseqa/train_rand_split.jsonl
-wget -O $OUTDIR/valid.jsonl https://s3.amazonaws.com/commensenseqa/dev_rand_split.jsonl
-wget -O $OUTDIR/test.jsonl https://s3.amazonaws.com/commensenseqa/test_rand_split_no_answers.jsonl
-wget -O $OUTDIR/dict.txt https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/preprocessing/get_feature_manifest.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/preprocessing/get_feature_manifest.py
deleted file mode 100644
index 516f2cc469af9b417126dea1988698adac41d8ab..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/preprocessing/get_feature_manifest.py
+++ /dev/null
@@ -1,233 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import argparse
-import logging
-from pathlib import Path
-import shutil
-from tempfile import NamedTemporaryFile
-from collections import Counter, defaultdict
-
-import pandas as pd
-import torchaudio
-from tqdm import tqdm
-
-from fairseq.data.audio.audio_utils import convert_waveform
-from examples.speech_to_text.data_utils import (
-    create_zip,
-    gen_config_yaml,
-    gen_vocab,
-    get_zip_manifest,
-    load_tsv_to_dicts,
-    save_df_to_tsv
-)
-from examples.speech_synthesis.data_utils import (
-    extract_logmel_spectrogram, extract_pitch, extract_energy, get_global_cmvn,
-    ipa_phonemize, get_mfa_alignment, get_unit_alignment
-)
-
-
-log = logging.getLogger(__name__)
-
-
-def process(args):
-    assert "train" in args.splits
-    out_root = Path(args.output_root).absolute()
-    out_root.mkdir(exist_ok=True)
-
-    print("Fetching data...")
-    audio_manifest_root = Path(args.audio_manifest_root).absolute()
-    samples = []
-    for s in args.splits:
-        for e in load_tsv_to_dicts(audio_manifest_root / f"{s}.audio.tsv"):
-            e["split"] = s
-            samples.append(e)
-    sample_ids = [s["id"] for s in samples]
-
-    # Get alignment info
-    id_to_alignment = None
-    if args.textgrid_zip is not None:
-        assert args.id_to_units_tsv is None
-        id_to_alignment = get_mfa_alignment(
-            args.textgrid_zip, sample_ids, args.sample_rate, args.hop_length
-        )
-    elif args.id_to_units_tsv is not None:
-        # assume identical hop length on the unit sequence
-        id_to_alignment = get_unit_alignment(args.id_to_units_tsv, sample_ids)
-
-    # Extract features and pack features into ZIP
-    feature_name = "logmelspec80"
-    zip_path = out_root / f"{feature_name}.zip"
-    pitch_zip_path = out_root / "pitch.zip"
-    energy_zip_path = out_root / "energy.zip"
-    gcmvn_npz_path = out_root / "gcmvn_stats.npz"
-    if zip_path.exists() and gcmvn_npz_path.exists():
-        print(f"{zip_path} and {gcmvn_npz_path} exist.")
-    else:
-        feature_root = out_root / feature_name
-        feature_root.mkdir(exist_ok=True)
-        pitch_root = out_root / "pitch"
-        energy_root = out_root / "energy"
-        if args.add_fastspeech_targets:
-            pitch_root.mkdir(exist_ok=True)
-            energy_root.mkdir(exist_ok=True)
-        print("Extracting Mel spectrogram features...")
-        for sample in tqdm(samples):
-            waveform, sample_rate = torchaudio.load(sample["audio"])
-            waveform, sample_rate = convert_waveform(
-                waveform, sample_rate, normalize_volume=args.normalize_volume,
-                to_sample_rate=args.sample_rate
-            )
-            sample_id = sample["id"]
-            target_length = None
-            if id_to_alignment is not None:
-                a = id_to_alignment[sample_id]
-                target_length = sum(a.frame_durations)
-                if a.start_sec is not None and a.end_sec is not None:
-                    start_frame = int(a.start_sec * sample_rate)
-                    end_frame = int(a.end_sec * sample_rate)
-                    waveform = waveform[:, start_frame: end_frame]
-            extract_logmel_spectrogram(
-                waveform, sample_rate, feature_root / f"{sample_id}.npy",
-                win_length=args.win_length, hop_length=args.hop_length,
-                n_fft=args.n_fft, n_mels=args.n_mels, f_min=args.f_min,
-                f_max=args.f_max, target_length=target_length
-            )
-            if args.add_fastspeech_targets:
-                assert id_to_alignment is not None
-                extract_pitch(
-                    waveform, sample_rate, pitch_root / f"{sample_id}.npy",
-                    hop_length=args.hop_length, log_scale=True,
-                    phoneme_durations=id_to_alignment[sample_id].frame_durations
-                )
-                extract_energy(
-                    waveform, energy_root / f"{sample_id}.npy",
-                    hop_length=args.hop_length, n_fft=args.n_fft,
-                    log_scale=True,
-                    phoneme_durations=id_to_alignment[sample_id].frame_durations
-                )
-        print("ZIPing features...")
-        create_zip(feature_root, zip_path)
-        get_global_cmvn(feature_root, gcmvn_npz_path)
-        shutil.rmtree(feature_root)
-        if args.add_fastspeech_targets:
-            create_zip(pitch_root, pitch_zip_path)
-            shutil.rmtree(pitch_root)
-            create_zip(energy_root, energy_zip_path)
-            shutil.rmtree(energy_root)
-
-    print("Fetching ZIP manifest...")
-    audio_paths, audio_lengths = get_zip_manifest(zip_path)
-    pitch_paths, pitch_lengths, energy_paths, energy_lengths = [None] * 4
-    if args.add_fastspeech_targets:
-        pitch_paths, pitch_lengths = get_zip_manifest(pitch_zip_path)
-        energy_paths, energy_lengths = get_zip_manifest(energy_zip_path)
-    # Generate TSV manifest
-    print("Generating manifest...")
-    manifest_by_split = {split: defaultdict(list) for split in args.splits}
-    for sample in tqdm(samples):
-        sample_id, split = sample["id"], sample["split"]
-        normalized_utt = sample["tgt_text"]
-        if id_to_alignment is not None:
-            normalized_utt = " ".join(id_to_alignment[sample_id].tokens)
-        elif args.ipa_vocab:
-            normalized_utt = ipa_phonemize(
-                normalized_utt, lang=args.lang, use_g2p=args.use_g2p
-            )
-        manifest_by_split[split]["id"].append(sample_id)
-        manifest_by_split[split]["audio"].append(audio_paths[sample_id])
-        manifest_by_split[split]["n_frames"].append(audio_lengths[sample_id])
-        manifest_by_split[split]["tgt_text"].append(normalized_utt)
-        manifest_by_split[split]["speaker"].append(sample["speaker"])
-        manifest_by_split[split]["src_text"].append(sample["src_text"])
-        if args.add_fastspeech_targets:
-            assert id_to_alignment is not None
-            duration = " ".join(
-                str(d) for d in id_to_alignment[sample_id].frame_durations
-            )
-            manifest_by_split[split]["duration"].append(duration)
-            manifest_by_split[split]["pitch"].append(pitch_paths[sample_id])
-            manifest_by_split[split]["energy"].append(energy_paths[sample_id])
-    for split in args.splits:
-        save_df_to_tsv(
-            pd.DataFrame.from_dict(manifest_by_split[split]),
-            out_root / f"{split}.tsv"
-        )
-    # Generate vocab
-    vocab_name, spm_filename = None, None
-    if id_to_alignment is not None or args.ipa_vocab:
-        vocab = Counter()
-        for t in manifest_by_split["train"]["tgt_text"]:
-            vocab.update(t.split(" "))
-        vocab_name = "vocab.txt"
-        with open(out_root / vocab_name, "w") as f:
-            for s, c in vocab.most_common():
-                f.write(f"{s} {c}\n")
-    else:
-        spm_filename_prefix = "spm_char"
-        spm_filename = f"{spm_filename_prefix}.model"
-        with NamedTemporaryFile(mode="w") as f:
-            for t in manifest_by_split["train"]["tgt_text"]:
-                f.write(t + "\n")
-            f.flush()  # needed to ensure gen_vocab sees dumped text
-            gen_vocab(Path(f.name), out_root / spm_filename_prefix, "char")
-    # Generate speaker list
-    speakers = sorted({sample["speaker"] for sample in samples})
-    speakers_path = out_root / "speakers.txt"
-    with open(speakers_path, "w") as f:
-        for speaker in speakers:
-            f.write(f"{speaker}\n")
-    # Generate config YAML
-    win_len_t = args.win_length / args.sample_rate
-    hop_len_t = args.hop_length / args.sample_rate
-    extra = {
-        "sample_rate": args.sample_rate,
-        "features": {
-            "type": "spectrogram+melscale+log",
-            "eps": 1e-2, "n_mels": args.n_mels, "n_fft": args.n_fft,
-            "window_fn": "hann", "win_length": args.win_length,
-            "hop_length": args.hop_length, "sample_rate": args.sample_rate,
-            "win_len_t": win_len_t, "hop_len_t": hop_len_t,
-            "f_min": args.f_min, "f_max": args.f_max,
-            "n_stft": args.n_fft // 2 + 1
-        }
-    }
-    if len(speakers) > 1:
-        extra["speaker_set_filename"] = "speakers.txt"
-    gen_config_yaml(
-        out_root, spm_filename=spm_filename, vocab_name=vocab_name,
-        audio_root=out_root.as_posix(), input_channels=None,
-        input_feat_per_channel=None, specaugment_policy=None,
-        cmvn_type="global", gcmvn_path=gcmvn_npz_path, extra=extra
-    )
-
-
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument("--audio-manifest-root", "-m", required=True, type=str)
-    parser.add_argument("--output-root", "-o", required=True, type=str)
-    parser.add_argument("--splits", "-s", type=str, nargs="+",
-                        default=["train", "dev", "test"])
-    parser.add_argument("--ipa-vocab", action="store_true")
-    parser.add_argument("--use-g2p", action="store_true")
-    parser.add_argument("--lang", type=str, default="en-us")
-    parser.add_argument("--win-length", type=int, default=1024)
-    parser.add_argument("--hop-length", type=int, default=256)
-    parser.add_argument("--n-fft", type=int, default=1024)
-    parser.add_argument("--n-mels", type=int, default=80)
-    parser.add_argument("--f-min", type=int, default=20)
-    parser.add_argument("--f-max", type=int, default=8000)
-    parser.add_argument("--sample-rate", type=int, default=22050)
-    parser.add_argument("--normalize-volume", "-n", action="store_true")
-    parser.add_argument("--textgrid-zip", type=str, default=None)
-    parser.add_argument("--id-to-units-tsv", type=str, default=None)
-    parser.add_argument("--add-fastspeech-targets", action="store_true")
-    args = parser.parse_args()
-
-    process(args)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/adaptive_span/adagrad_with_grad_clip.py b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/adaptive_span/adagrad_with_grad_clip.py
deleted file mode 100644
index 585ce184ab2d6bbde0d2f7fcafd6536fa8f6d8b6..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/adaptive_span/adagrad_with_grad_clip.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from torch.optim import Adagrad
-
-from fairseq.optim import LegacyFairseqOptimizer, register_optimizer
-
-
-@register_optimizer("adagrad_with_grad_clip")
-class FairseqAdagradWithGradClip(LegacyFairseqOptimizer):
-    def __init__(self, args, params):
-        super().__init__(args)
-        self._optimizer = AdagradWithGradClip(params, **self.optimizer_config)
-
-    @staticmethod
-    def add_args(parser):
-        """Add optimizer-specific arguments to the parser."""
-        # fmt: off
-        parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
-                            help='weight decay')
-        parser.add_argument('--adagrad-clip', default=0.0, type=float, metavar='D',
-                            help='internal grad clip')
-        # fmt: on
-
-    @property
-    def optimizer_config(self):
-        """
-        Return a kwarg dictionary that will be used to override optimizer
-        args stored in checkpoints. This allows us to load a checkpoint and
-        resume training using a different set of optimizer args, e.g., with a
-        different learning rate.
-        """
-        return {
-            "lr": self.args.lr[0],
-            "weight_decay": self.args.weight_decay,
-            "grad_clip": self.args.adagrad_clip,
-        }
-
-    @property
-    def supports_flat_params(self):
-        return False
-
-
-def _clip_grad(clr, grad, group_grad_clip):
-    if group_grad_clip > 0:
-        norm = grad.norm(2).item()
-        if norm > group_grad_clip:
-            clr *= group_grad_clip / (norm + 1e-10)
-    return clr
-
-
-class AdagradWithGradClip(Adagrad):
-    """Adagrad algorithm with custom gradient clipping"""
-
-    def __init__(
-        self,
-        params,
-        lr=1e-2,
-        lr_decay=0,
-        weight_decay=0,
-        initial_accumulator_value=0,
-        grad_clip=0,
-    ):
-        Adagrad.__init__(
-            self,
-            params,
-            lr=lr,
-            lr_decay=lr_decay,
-            weight_decay=weight_decay,
-            initial_accumulator_value=initial_accumulator_value,
-        )
-        self.defaults["grad_clip"] = grad_clip
-        self.param_groups[0].setdefault("grad_clip", grad_clip)
-
-    def step(self, closure=None):
-        loss = None
-        if closure is not None:
-            loss = closure()
-
-        for group in self.param_groups:
-            for p in group["params"]:
-                if p.grad is None:
-                    continue
-
-                grad = p.grad.data
-                state = self.state[p]
-
-                state["step"] += 1
-
-                if group["weight_decay"] != 0:
-                    if p.grad.data.is_sparse:
-                        raise RuntimeError(
-                            "weight_decay option is "
-                            "not compatible with sparse "
-                            "gradients"
-                        )
-                    grad = grad.add(group["weight_decay"], p.data)
-
-                clr = group["lr"] / (1 + (state["step"] - 1) * group["lr_decay"])
-
-                # clip
-                clr = _clip_grad(clr=clr, grad=grad, group_grad_clip=group["grad_clip"])
-
-                if grad.is_sparse:
-                    # the update is non-linear so indices must be unique
-                    grad = grad.coalesce()
-                    grad_indices = grad._indices()
-                    grad_values = grad._values()
-                    size = grad.size()
-
-                    def make_sparse(values):
-                        constructor = grad.new
-                        if grad_indices.dim() == 0 or values.dim() == 0:
-                            return constructor().resize_as_(grad)
-                        return constructor(grad_indices, values, size)
-
-                    state["sum"].add_(make_sparse(grad_values.pow(2)))
-                    std = state["sum"]._sparse_mask(grad)
-                    std_values = std._values().sqrt_().add_(1e-10)
-                    p.data.add_(-clr, make_sparse(grad_values / std_values))
-                else:
-                    state["sum"].addcmul_(1, grad, grad)
-                    std = state["sum"].sqrt().add_(1e-10)
-                    p.data.addcdiv_(-clr, grad, std)
-
-        return loss
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/scripts/convert_dictionary.lua b/spaces/OFA-Sys/OFA-vqa/fairseq/scripts/convert_dictionary.lua
deleted file mode 100644
index 14ee8c997f642c8ff196617c2dcd0584037a60c4..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/scripts/convert_dictionary.lua
+++ /dev/null
@@ -1,34 +0,0 @@
--- Copyright (c) Facebook, Inc. and its affiliates.
---
--- This source code is licensed under the MIT license found in the
--- LICENSE file in the root directory of this source tree.
---
--- Usage: convert_dictionary.lua <dict.th7>
-require 'fairseq'
-require 'torch'
-require 'paths'
-
-if #arg < 1 then
-   print('usage: convert_dictionary.lua <dict.th7>')
-   os.exit(1)
-end
-if not paths.filep(arg[1]) then
-   print('error: file does not exit: ' .. arg[1])
-   os.exit(1)
-end
-
-dict = torch.load(arg[1])
-dst = paths.basename(arg[1]):gsub('.th7', '.txt')
-assert(dst:match('.txt$'))
-
-f = io.open(dst, 'w')
-for idx, symbol in ipairs(dict.index_to_symbol) do
-  if idx > dict.cutoff then
-    break
-  end
-  f:write(symbol)
-  f:write(' ')
-  f:write(dict.index_to_freq[idx])
-  f:write('\n')
-end
-f:close()
diff --git a/spaces/ORI-Muchim/BarKeYaeTTS/monotonic_align/core.py b/spaces/ORI-Muchim/BarKeYaeTTS/monotonic_align/core.py
deleted file mode 100644
index 1f940605fe4fd0738fa0006149fcba14ef88223a..0000000000000000000000000000000000000000
--- a/spaces/ORI-Muchim/BarKeYaeTTS/monotonic_align/core.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import numba
-
-
-@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]),
-           nopython=True, nogil=True)
-def maximum_path_jit(paths, values, t_ys, t_xs):
-    b = paths.shape[0]
-    max_neg_val = -1e9
-    for i in range(int(b)):
-        path = paths[i]
-        value = values[i]
-        t_y = t_ys[i]
-        t_x = t_xs[i]
-
-        v_prev = v_cur = 0.0
-        index = t_x - 1
-
-        for y in range(t_y):
-            for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
-                if x == y:
-                    v_cur = max_neg_val
-                else:
-                    v_cur = value[y - 1, x]
-                if x == 0:
-                    if y == 0:
-                        v_prev = 0.
-                    else:
-                        v_prev = max_neg_val
-                else:
-                    v_prev = value[y - 1, x - 1]
-                value[y, x] += max(v_prev, v_cur)
-
-        for y in range(t_y - 1, -1, -1):
-            path[y, index] = 1
-            if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]):
-                index = index - 1
diff --git a/spaces/ORI-Muchim/ONFIRETTS/models.py b/spaces/ORI-Muchim/ONFIRETTS/models.py
deleted file mode 100644
index fe004e94bbe9074ec736f14325268f4515a53420..0000000000000000000000000000000000000000
--- a/spaces/ORI-Muchim/ONFIRETTS/models.py
+++ /dev/null
@@ -1,540 +0,0 @@
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-import attentions
-import monotonic_align
-
-from torch.nn import Conv1d, ConvTranspose1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from commons import init_weights, get_padding
-
-
-class StochasticDurationPredictor(nn.Module):
-    def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
-        super().__init__()
-        filter_channels = in_channels  # it needs to be removed from future version.
-        self.in_channels = in_channels
-        self.filter_channels = filter_channels
-        self.kernel_size = kernel_size
-        self.p_dropout = p_dropout
-        self.n_flows = n_flows
-        self.gin_channels = gin_channels
-
-        self.log_flow = modules.Log()
-        self.flows = nn.ModuleList()
-        self.flows.append(modules.ElementwiseAffine(2))
-        for i in range(n_flows):
-            self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
-            self.flows.append(modules.Flip())
-
-        self.post_pre = nn.Conv1d(1, filter_channels, 1)
-        self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
-        self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
-        self.post_flows = nn.ModuleList()
-        self.post_flows.append(modules.ElementwiseAffine(2))
-        for i in range(4):
-            self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
-            self.post_flows.append(modules.Flip())
-
-        self.pre = nn.Conv1d(in_channels, filter_channels, 1)
-        self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
-        self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
-        if gin_channels != 0:
-            self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
-
-    def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
-        x = torch.detach(x)
-        x = self.pre(x)
-        if g is not None:
-            g = torch.detach(g)
-            x = x + self.cond(g)
-        x = self.convs(x, x_mask)
-        x = self.proj(x) * x_mask
-
-        if not reverse:
-            flows = self.flows
-            assert w is not None
-
-            logdet_tot_q = 0
-            h_w = self.post_pre(w)
-            h_w = self.post_convs(h_w, x_mask)
-            h_w = self.post_proj(h_w) * x_mask
-            e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
-            z_q = e_q
-            for flow in self.post_flows:
-                z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
-                logdet_tot_q += logdet_q
-            z_u, z1 = torch.split(z_q, [1, 1], 1)
-            u = torch.sigmoid(z_u) * x_mask
-            z0 = (w - u) * x_mask
-            logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2])
-            logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q
-
-            logdet_tot = 0
-            z0, logdet = self.log_flow(z0, x_mask)
-            logdet_tot += logdet
-            z = torch.cat([z0, z1], 1)
-            for flow in flows:
-                z, logdet = flow(z, x_mask, g=x, reverse=reverse)
-                logdet_tot = logdet_tot + logdet
-            nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot
-            return nll + logq  # [b]
-        else:
-            flows = list(reversed(self.flows))
-            flows = flows[:-2] + [flows[-1]]  # remove a useless vflow
-            z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
-            for flow in flows:
-                z = flow(z, x_mask, g=x, reverse=reverse)
-            z0, z1 = torch.split(z, [1, 1], 1)
-            logw = z0
-            return logw
-
-
-class DurationPredictor(nn.Module):
-    def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
-        super().__init__()
-
-        self.in_channels = in_channels
-        self.filter_channels = filter_channels
-        self.kernel_size = kernel_size
-        self.p_dropout = p_dropout
-        self.gin_channels = gin_channels
-
-        self.drop = nn.Dropout(p_dropout)
-        self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
-        self.norm_1 = modules.LayerNorm(filter_channels)
-        self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
-        self.norm_2 = modules.LayerNorm(filter_channels)
-        self.proj = nn.Conv1d(filter_channels, 1, 1)
-
-        if gin_channels != 0:
-            self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
-    def forward(self, x, x_mask, g=None):
-        x = torch.detach(x)
-        if g is not None:
-            g = torch.detach(g)
-            x = x + self.cond(g)
-        x = self.conv_1(x * x_mask)
-        x = torch.relu(x)
-        x = self.norm_1(x)
-        x = self.drop(x)
-        x = self.conv_2(x * x_mask)
-        x = torch.relu(x)
-        x = self.norm_2(x)
-        x = self.drop(x)
-        x = self.proj(x * x_mask)
-        return x * x_mask
-
-
-class TextEncoder(nn.Module):
-    def __init__(self,
-                 n_vocab,
-                 out_channels,
-                 hidden_channels,
-                 filter_channels,
-                 n_heads,
-                 n_layers,
-                 kernel_size,
-                 p_dropout):
-        super().__init__()
-        self.n_vocab = n_vocab
-        self.out_channels = out_channels
-        self.hidden_channels = hidden_channels
-        self.filter_channels = filter_channels
-        self.n_heads = n_heads
-        self.n_layers = n_layers
-        self.kernel_size = kernel_size
-        self.p_dropout = p_dropout
-
-        if self.n_vocab != 0:
-            self.emb = nn.Embedding(n_vocab, hidden_channels)
-            nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5)
-
-        self.encoder = attentions.Encoder(
-            hidden_channels,
-            filter_channels,
-            n_heads,
-            n_layers,
-            kernel_size,
-            p_dropout)
-        self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
-    def forward(self, x, x_lengths):
-        if self.n_vocab != 0:
-            x = self.emb(x) * math.sqrt(self.hidden_channels)  # [b, t, h]
-        x = torch.transpose(x, 1, -1)  # [b, h, t]
-        x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-
-        x = self.encoder(x * x_mask, x_mask)
-        stats = self.proj(x) * x_mask
-
-        m, logs = torch.split(stats, self.out_channels, dim=1)
-        return x, m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
-    def __init__(self,
-                 channels,
-                 hidden_channels,
-                 kernel_size,
-                 dilation_rate,
-                 n_layers,
-                 n_flows=4,
-                 gin_channels=0):
-        super().__init__()
-        self.channels = channels
-        self.hidden_channels = hidden_channels
-        self.kernel_size = kernel_size
-        self.dilation_rate = dilation_rate
-        self.n_layers = n_layers
-        self.n_flows = n_flows
-        self.gin_channels = gin_channels
-
-        self.flows = nn.ModuleList()
-        for i in range(n_flows):
-            self.flows.append(
-                modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
-                                              gin_channels=gin_channels, mean_only=True))
-            self.flows.append(modules.Flip())
-
-    def forward(self, x, x_mask, g=None, reverse=False):
-        if not reverse:
-            for flow in self.flows:
-                x, _ = flow(x, x_mask, g=g, reverse=reverse)
-        else:
-            for flow in reversed(self.flows):
-                x = flow(x, x_mask, g=g, reverse=reverse)
-        return x
-
-
-class PosteriorEncoder(nn.Module):
-    def __init__(self,
-                 in_channels,
-                 out_channels,
-                 hidden_channels,
-                 kernel_size,
-                 dilation_rate,
-                 n_layers,
-                 gin_channels=0):
-        super().__init__()
-        self.in_channels = in_channels
-        self.out_channels = out_channels
-        self.hidden_channels = hidden_channels
-        self.kernel_size = kernel_size
-        self.dilation_rate = dilation_rate
-        self.n_layers = n_layers
-        self.gin_channels = gin_channels
-
-        self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
-        self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
-        self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
-    def forward(self, x, x_lengths, g=None):
-        x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-        x = self.pre(x) * x_mask
-        x = self.enc(x, x_mask, g=g)
-        stats = self.proj(x) * x_mask
-        m, logs = torch.split(stats, self.out_channels, dim=1)
-        z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
-        return z, m, logs, x_mask
-
-
-class Generator(torch.nn.Module):
-    def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
-                 upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
-        super(Generator, self).__init__()
-        self.num_kernels = len(resblock_kernel_sizes)
-        self.num_upsamples = len(upsample_rates)
-        self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
-        resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
-
-        self.ups = nn.ModuleList()
-        for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
-            self.ups.append(weight_norm(
-                ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)),
-                                k, u, padding=(k - u) // 2)))
-
-        self.resblocks = nn.ModuleList()
-        for i in range(len(self.ups)):
-            ch = upsample_initial_channel // (2 ** (i + 1))
-            for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
-                self.resblocks.append(resblock(ch, k, d))
-
-        self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
-        self.ups.apply(init_weights)
-
-        if gin_channels != 0:
-            self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
-    def forward(self, x, g=None):
-        x = self.conv_pre(x)
-        if g is not None:
-            x = x + self.cond(g)
-
-        for i in range(self.num_upsamples):
-            x = F.leaky_relu(x, modules.LRELU_SLOPE)
-            x = self.ups[i](x)
-            xs = None
-            for j in range(self.num_kernels):
-                if xs is None:
-                    xs = self.resblocks[i * self.num_kernels + j](x)
-                else:
-                    xs += self.resblocks[i * self.num_kernels + j](x)
-            x = xs / self.num_kernels
-        x = F.leaky_relu(x)
-        x = self.conv_post(x)
-        x = torch.tanh(x)
-
-        return x
-
-    def remove_weight_norm(self):
-        print('Removing weight norm...')
-        for l in self.ups:
-            remove_weight_norm(l)
-        for l in self.resblocks:
-            l.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
-    def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
-        super(DiscriminatorP, self).__init__()
-        self.period = period
-        self.use_spectral_norm = use_spectral_norm
-        norm_f = weight_norm if use_spectral_norm == False else spectral_norm
-        self.convs = nn.ModuleList([
-            norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
-            norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
-            norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
-            norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
-            norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
-        ])
-        self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
-    def forward(self, x):
-        fmap = []
-
-        # 1d to 2d
-        b, c, t = x.shape
-        if t % self.period != 0:  # pad first
-            n_pad = self.period - (t % self.period)
-            x = F.pad(x, (0, n_pad), "reflect")
-            t = t + n_pad
-        x = x.view(b, c, t // self.period, self.period)
-
-        for l in self.convs:
-            x = l(x)
-            x = F.leaky_relu(x, modules.LRELU_SLOPE)
-            fmap.append(x)
-        x = self.conv_post(x)
-        fmap.append(x)
-        x = torch.flatten(x, 1, -1)
-
-        return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
-    def __init__(self, use_spectral_norm=False):
-        super(DiscriminatorS, self).__init__()
-        norm_f = weight_norm if use_spectral_norm == False else spectral_norm
-        self.convs = nn.ModuleList([
-            norm_f(Conv1d(1, 16, 15, 1, padding=7)),
-            norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
-            norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
-            norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
-            norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
-            norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
-        ])
-        self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
-    def forward(self, x):
-        fmap = []
-
-        for l in self.convs:
-            x = l(x)
-            x = F.leaky_relu(x, modules.LRELU_SLOPE)
-            fmap.append(x)
-        x = self.conv_post(x)
-        fmap.append(x)
-        x = torch.flatten(x, 1, -1)
-
-        return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
-    def __init__(self, use_spectral_norm=False):
-        super(MultiPeriodDiscriminator, self).__init__()
-        periods = [2, 3, 5, 7, 11]
-
-        discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
-        discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
-        self.discriminators = nn.ModuleList(discs)
-
-    def forward(self, y, y_hat):
-        y_d_rs = []
-        y_d_gs = []
-        fmap_rs = []
-        fmap_gs = []
-        for i, d in enumerate(self.discriminators):
-            y_d_r, fmap_r = d(y)
-            y_d_g, fmap_g = d(y_hat)
-            y_d_rs.append(y_d_r)
-            y_d_gs.append(y_d_g)
-            fmap_rs.append(fmap_r)
-            fmap_gs.append(fmap_g)
-
-        return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class SynthesizerTrn(nn.Module):
-    """
-  Synthesizer for Training
-  """
-
-    def __init__(self,
-                 n_vocab,
-                 spec_channels,
-                 segment_size,
-                 inter_channels,
-                 hidden_channels,
-                 filter_channels,
-                 n_heads,
-                 n_layers,
-                 kernel_size,
-                 p_dropout,
-                 resblock,
-                 resblock_kernel_sizes,
-                 resblock_dilation_sizes,
-                 upsample_rates,
-                 upsample_initial_channel,
-                 upsample_kernel_sizes,
-                 n_speakers=0,
-                 gin_channels=0,
-                 use_sdp=True,
-                 **kwargs):
-
-        super().__init__()
-        self.n_vocab = n_vocab
-        self.spec_channels = spec_channels
-        self.inter_channels = inter_channels
-        self.hidden_channels = hidden_channels
-        self.filter_channels = filter_channels
-        self.n_heads = n_heads
-        self.n_layers = n_layers
-        self.kernel_size = kernel_size
-        self.p_dropout = p_dropout
-        self.resblock = resblock
-        self.resblock_kernel_sizes = resblock_kernel_sizes
-        self.resblock_dilation_sizes = resblock_dilation_sizes
-        self.upsample_rates = upsample_rates
-        self.upsample_initial_channel = upsample_initial_channel
-        self.upsample_kernel_sizes = upsample_kernel_sizes
-        self.segment_size = segment_size
-        self.n_speakers = n_speakers
-        self.gin_channels = gin_channels
-
-        self.use_sdp = use_sdp
-
-        self.enc_p = TextEncoder(n_vocab,
-                                 inter_channels,
-                                 hidden_channels,
-                                 filter_channels,
-                                 n_heads,
-                                 n_layers,
-                                 kernel_size,
-                                 p_dropout)
-        self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
-                             upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
-        self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,
-                                      gin_channels=gin_channels)
-        self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
-
-        if use_sdp:
-            self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
-        else:
-            self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
-
-        if n_speakers > 1:
-            self.emb_g = nn.Embedding(n_speakers, gin_channels)
-
-    def forward(self, x, x_lengths, y, y_lengths, sid=None):
-
-        x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
-        if self.n_speakers > 1:
-            g = self.emb_g(sid).unsqueeze(-1)  # [b, h, 1]
-        else:
-            g = None
-
-        z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
-        z_p = self.flow(z, y_mask, g=g)
-
-        with torch.no_grad():
-            # negative cross-entropy
-            s_p_sq_r = torch.exp(-2 * logs_p)  # [b, d, t]
-            neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True)  # [b, 1, t_s]
-            neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2),
-                                     s_p_sq_r)  # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
-            neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r))  # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
-            neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True)  # [b, 1, t_s]
-            neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
-
-            attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
-            attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
-
-        w = attn.sum(2)
-        if self.use_sdp:
-            l_length = self.dp(x, x_mask, w, g=g)
-            l_length = l_length / torch.sum(x_mask)
-        else:
-            logw_ = torch.log(w + 1e-6) * x_mask
-            logw = self.dp(x, x_mask, g=g)
-            l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask)  # for averaging
-
-        # expand prior
-        m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
-        logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
-
-        z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
-        o = self.dec(z_slice, g=g)
-        return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
-    def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
-        x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
-        if self.n_speakers > 1:
-            g = self.emb_g(sid).unsqueeze(-1)  # [b, h, 1]
-        else:
-            g = None
-
-        if self.use_sdp:
-            logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
-        else:
-            logw = self.dp(x, x_mask, g=g)
-        w = torch.exp(logw) * x_mask * length_scale
-        w_ceil = torch.ceil(w)
-        y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
-        y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
-        attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
-        attn = commons.generate_path(w_ceil, attn_mask)
-
-        m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)  # [b, t', t], [b, t, d] -> [b, d, t']
-        logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,
-                                                                                 2)  # [b, t', t], [b, t, d] -> [b, d, t']
-
-        z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
-        z = self.flow(z_p, y_mask, g=g, reverse=True)
-        o = self.dec((z * y_mask)[:, :, :max_len], g=g)
-        return o, attn, y_mask, (z, z_p, m_p, logs_p)
-
-    def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
-        assert self.n_speakers > 1, "n_speakers have to be larger than 1."
-        g_src = self.emb_g(sid_src).unsqueeze(-1)
-        g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
-        z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
-        z_p = self.flow(z, y_mask, g=g_src)
-        z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
-        o_hat = self.dec(z_hat * y_mask, g=g_tgt)
-        return o_hat, y_mask, (z, z_p, z_hat)
diff --git a/spaces/ORI-Muchim/RaidenTTS/text/__init__.py b/spaces/ORI-Muchim/RaidenTTS/text/__init__.py
deleted file mode 100644
index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000
--- a/spaces/ORI-Muchim/RaidenTTS/text/__init__.py
+++ /dev/null
@@ -1,32 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-
-
-def text_to_sequence(text, symbols, cleaner_names):
-  '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
-    Args:
-      text: string to convert to a sequence
-      cleaner_names: names of the cleaner functions to run the text through
-    Returns:
-      List of integers corresponding to the symbols in the text
-  '''
-  _symbol_to_id = {s: i for i, s in enumerate(symbols)}
-
-  sequence = []
-
-  clean_text = _clean_text(text, cleaner_names)
-  for symbol in clean_text:
-    if symbol not in _symbol_to_id.keys():
-      continue
-    symbol_id = _symbol_to_id[symbol]
-    sequence += [symbol_id]
-  return sequence
-
-
-def _clean_text(text, cleaner_names):
-  for name in cleaner_names:
-    cleaner = getattr(cleaners, name)
-    if not cleaner:
-      raise Exception('Unknown cleaner: %s' % name)
-    text = cleaner(text)
-  return text
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/data/samplers/__init__.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/data/samplers/__init__.py
deleted file mode 100644
index 85c9f1a9df8a4038fbd4246239b699402e382309..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/data/samplers/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-from .distributed_sampler import (
-    InferenceSampler,
-    RandomSubsetTrainingSampler,
-    RepeatFactorTrainingSampler,
-    TrainingSampler,
-)
-
-from .grouped_batch_sampler import GroupedBatchSampler
-
-__all__ = [
-    "GroupedBatchSampler",
-    "TrainingSampler",
-    "RandomSubsetTrainingSampler",
-    "InferenceSampler",
-    "RepeatFactorTrainingSampler",
-]
diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/archs/tools/resnet.py b/spaces/OpenMotionLab/MotionGPT/mGPT/archs/tools/resnet.py
deleted file mode 100644
index 062346e3ba2fc4d6ae5636f228c5b7565bdb62b7..0000000000000000000000000000000000000000
--- a/spaces/OpenMotionLab/MotionGPT/mGPT/archs/tools/resnet.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import torch.nn as nn
-import torch
-
-class nonlinearity(nn.Module):
-    def __init__(self):
-        super().__init__()
-
-    def forward(self, x):
-        # swish
-        return x * torch.sigmoid(x)
-
-class ResConv1DBlock(nn.Module):
-    def __init__(self, n_in, n_state, dilation=1, activation='silu', norm=None, dropout=None):
-        super().__init__()
-        padding = dilation
-        self.norm = norm
-        if norm == "LN":
-            self.norm1 = nn.LayerNorm(n_in)
-            self.norm2 = nn.LayerNorm(n_in)
-        elif norm == "GN":
-            self.norm1 = nn.GroupNorm(num_groups=32, num_channels=n_in, eps=1e-6, affine=True)
-            self.norm2 = nn.GroupNorm(num_groups=32, num_channels=n_in, eps=1e-6, affine=True)
-        elif norm == "BN":
-            self.norm1 = nn.BatchNorm1d(num_features=n_in, eps=1e-6, affine=True)
-            self.norm2 = nn.BatchNorm1d(num_features=n_in, eps=1e-6, affine=True)
-        
-        else:
-            self.norm1 = nn.Identity()
-            self.norm2 = nn.Identity()
-
-        if activation == "relu":
-            self.activation1 = nn.ReLU()
-            self.activation2 = nn.ReLU()
-            
-        elif activation == "silu":
-            self.activation1 = nonlinearity()
-            self.activation2 = nonlinearity()
-            
-        elif activation == "gelu":
-            self.activation1 = nn.GELU()
-            self.activation2 = nn.GELU()
-            
-        
-
-        self.conv1 = nn.Conv1d(n_in, n_state, 3, 1, padding, dilation)
-        self.conv2 = nn.Conv1d(n_state, n_in, 1, 1, 0,)     
-
-
-    def forward(self, x):
-        x_orig = x
-        if self.norm == "LN":
-            x = self.norm1(x.transpose(-2, -1))
-            x = self.activation1(x.transpose(-2, -1))
-        else:
-            x = self.norm1(x)
-            x = self.activation1(x)
-            
-        x = self.conv1(x)
-
-        if self.norm == "LN":
-            x = self.norm2(x.transpose(-2, -1))
-            x = self.activation2(x.transpose(-2, -1))
-        else:
-            x = self.norm2(x)
-            x = self.activation2(x)
-
-        x = self.conv2(x)
-        x = x + x_orig
-        return x
-
-class Resnet1D(nn.Module):
-    def __init__(self, n_in, n_depth, dilation_growth_rate=1, reverse_dilation=True, activation='relu', norm=None):
-        super().__init__()
-        
-        blocks = [ResConv1DBlock(n_in, n_in, dilation=dilation_growth_rate ** depth, activation=activation, norm=norm) for depth in range(n_depth)]
-        if reverse_dilation:
-            blocks = blocks[::-1]
-        
-        self.model = nn.Sequential(*blocks)
-
-    def forward(self, x):        
-        return self.model(x)
\ No newline at end of file
diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/data/humanml/common/skeleton.py b/spaces/OpenMotionLab/MotionGPT/mGPT/data/humanml/common/skeleton.py
deleted file mode 100644
index b2ae85ad14df8c1a8d77e689b1cffbc6c814a979..0000000000000000000000000000000000000000
--- a/spaces/OpenMotionLab/MotionGPT/mGPT/data/humanml/common/skeleton.py
+++ /dev/null
@@ -1,199 +0,0 @@
-from .quaternion import *
-import scipy.ndimage.filters as filters
-
-class Skeleton(object):
-    def __init__(self, offset, kinematic_tree, device):
-        self.device = device
-        self._raw_offset_np = offset.numpy()
-        self._raw_offset = offset.clone().detach().to(device).float()
-        self._kinematic_tree = kinematic_tree
-        self._offset = None
-        self._parents = [0] * len(self._raw_offset)
-        self._parents[0] = -1
-        for chain in self._kinematic_tree:
-            for j in range(1, len(chain)):
-                self._parents[chain[j]] = chain[j-1]
-
-    def njoints(self):
-        return len(self._raw_offset)
-
-    def offset(self):
-        return self._offset
-
-    def set_offset(self, offsets):
-        self._offset = offsets.clone().detach().to(self.device).float()
-
-    def kinematic_tree(self):
-        return self._kinematic_tree
-
-    def parents(self):
-        return self._parents
-
-    # joints (batch_size, joints_num, 3)
-    def get_offsets_joints_batch(self, joints):
-        assert len(joints.shape) == 3
-        _offsets = self._raw_offset.expand(joints.shape[0], -1, -1).clone()
-        for i in range(1, self._raw_offset.shape[0]):
-            _offsets[:, i] = torch.norm(joints[:, i] - joints[:, self._parents[i]], p=2, dim=1)[:, None] * _offsets[:, i]
-
-        self._offset = _offsets.detach()
-        return _offsets
-
-    # joints (joints_num, 3)
-    def get_offsets_joints(self, joints):
-        assert len(joints.shape) == 2
-        _offsets = self._raw_offset.clone()
-        for i in range(1, self._raw_offset.shape[0]):
-            # print(joints.shape)
-            _offsets[i] = torch.norm(joints[i] - joints[self._parents[i]], p=2, dim=0) * _offsets[i]
-
-        self._offset = _offsets.detach()
-        return _offsets
-
-    # face_joint_idx should follow the order of right hip, left hip, right shoulder, left shoulder
-    # joints (batch_size, joints_num, 3)
-    def inverse_kinematics_np(self, joints, face_joint_idx, smooth_forward=False):
-        assert len(face_joint_idx) == 4
-        '''Get Forward Direction'''
-        l_hip, r_hip, sdr_r, sdr_l = face_joint_idx
-        across1 = joints[:, r_hip] - joints[:, l_hip]
-        across2 = joints[:, sdr_r] - joints[:, sdr_l]
-        across = across1 + across2
-        across = across / np.sqrt((across**2).sum(axis=-1))[:, np.newaxis]
-        # print(across1.shape, across2.shape)
-
-        # forward (batch_size, 3)
-        forward = np.cross(np.array([[0, 1, 0]]), across, axis=-1)
-        if smooth_forward:
-            forward = filters.gaussian_filter1d(forward, 20, axis=0, mode='nearest')
-            # forward (batch_size, 3)
-        forward = forward / np.sqrt((forward**2).sum(axis=-1))[..., np.newaxis]
-
-        '''Get Root Rotation'''
-        target = np.array([[0,0,1]]).repeat(len(forward), axis=0)
-        root_quat = qbetween_np(forward, target)
-
-        '''Inverse Kinematics'''
-        # quat_params (batch_size, joints_num, 4)
-        # print(joints.shape[:-1])
-        quat_params = np.zeros(joints.shape[:-1] + (4,))
-        # print(quat_params.shape)
-        root_quat[0] = np.array([[1.0, 0.0, 0.0, 0.0]])
-        quat_params[:, 0] = root_quat
-        # quat_params[0, 0] = np.array([[1.0, 0.0, 0.0, 0.0]])
-        for chain in self._kinematic_tree:
-            R = root_quat
-            for j in range(len(chain) - 1):
-                # (batch, 3)
-                u = self._raw_offset_np[chain[j+1]][np.newaxis,...].repeat(len(joints), axis=0)
-                # print(u.shape)
-                # (batch, 3)
-                v = joints[:, chain[j+1]] - joints[:, chain[j]]
-                v = v / np.sqrt((v**2).sum(axis=-1))[:, np.newaxis]
-                # print(u.shape, v.shape)
-                rot_u_v = qbetween_np(u, v)
-
-                R_loc = qmul_np(qinv_np(R), rot_u_v)
-
-                quat_params[:,chain[j + 1], :] = R_loc
-                R = qmul_np(R, R_loc)
-
-        return quat_params
-
-    # Be sure root joint is at the beginning of kinematic chains
-    def forward_kinematics(self, quat_params, root_pos, skel_joints=None, do_root_R=True):
-        # quat_params (batch_size, joints_num, 4)
-        # joints (batch_size, joints_num, 3)
-        # root_pos (batch_size, 3)
-        if skel_joints is not None:
-            offsets = self.get_offsets_joints_batch(skel_joints)
-        if len(self._offset.shape) == 2:
-            offsets = self._offset.expand(quat_params.shape[0], -1, -1)
-        joints = torch.zeros(quat_params.shape[:-1] + (3,)).to(self.device)
-        joints[:, 0] = root_pos
-        for chain in self._kinematic_tree:
-            if do_root_R:
-                R = quat_params[:, 0]
-            else:
-                R = torch.tensor([[1.0, 0.0, 0.0, 0.0]]).expand(len(quat_params), -1).detach().to(self.device)
-            for i in range(1, len(chain)):
-                R = qmul(R, quat_params[:, chain[i]])
-                offset_vec = offsets[:, chain[i]]
-                joints[:, chain[i]] = qrot(R, offset_vec) + joints[:, chain[i-1]]
-        return joints
-
-    # Be sure root joint is at the beginning of kinematic chains
-    def forward_kinematics_np(self, quat_params, root_pos, skel_joints=None, do_root_R=True):
-        # quat_params (batch_size, joints_num, 4)
-        # joints (batch_size, joints_num, 3)
-        # root_pos (batch_size, 3)
-        if skel_joints is not None:
-            skel_joints = torch.from_numpy(skel_joints)
-            offsets = self.get_offsets_joints_batch(skel_joints)
-        if len(self._offset.shape) == 2:
-            offsets = self._offset.expand(quat_params.shape[0], -1, -1)
-        offsets = offsets.numpy()
-        joints = np.zeros(quat_params.shape[:-1] + (3,))
-        joints[:, 0] = root_pos
-        for chain in self._kinematic_tree:
-            if do_root_R:
-                R = quat_params[:, 0]
-            else:
-                R = np.array([[1.0, 0.0, 0.0, 0.0]]).repeat(len(quat_params), axis=0)
-            for i in range(1, len(chain)):
-                R = qmul_np(R, quat_params[:, chain[i]])
-                offset_vec = offsets[:, chain[i]]
-                joints[:, chain[i]] = qrot_np(R, offset_vec) + joints[:, chain[i - 1]]
-        return joints
-
-    def forward_kinematics_cont6d_np(self, cont6d_params, root_pos, skel_joints=None, do_root_R=True):
-        # cont6d_params (batch_size, joints_num, 6)
-        # joints (batch_size, joints_num, 3)
-        # root_pos (batch_size, 3)
-        if skel_joints is not None:
-            skel_joints = torch.from_numpy(skel_joints)
-            offsets = self.get_offsets_joints_batch(skel_joints)
-        if len(self._offset.shape) == 2:
-            offsets = self._offset.expand(cont6d_params.shape[0], -1, -1)
-        offsets = offsets.numpy()
-        joints = np.zeros(cont6d_params.shape[:-1] + (3,))
-        joints[:, 0] = root_pos
-        for chain in self._kinematic_tree:
-            if do_root_R:
-                matR = cont6d_to_matrix_np(cont6d_params[:, 0])
-            else:
-                matR = np.eye(3)[np.newaxis, :].repeat(len(cont6d_params), axis=0)
-            for i in range(1, len(chain)):
-                matR = np.matmul(matR, cont6d_to_matrix_np(cont6d_params[:, chain[i]]))
-                offset_vec = offsets[:, chain[i]][..., np.newaxis]
-                # print(matR.shape, offset_vec.shape)
-                joints[:, chain[i]] = np.matmul(matR, offset_vec).squeeze(-1) + joints[:, chain[i-1]]
-        return joints
-
-    def forward_kinematics_cont6d(self, cont6d_params, root_pos, skel_joints=None, do_root_R=True):
-        # cont6d_params (batch_size, joints_num, 6)
-        # joints (batch_size, joints_num, 3)
-        # root_pos (batch_size, 3)
-        if skel_joints is not None:
-            # skel_joints = torch.from_numpy(skel_joints)
-            offsets = self.get_offsets_joints_batch(skel_joints)
-        if len(self._offset.shape) == 2:
-            offsets = self._offset.expand(cont6d_params.shape[0], -1, -1)
-        joints = torch.zeros(cont6d_params.shape[:-1] + (3,)).to(cont6d_params.device)
-        joints[..., 0, :] = root_pos
-        for chain in self._kinematic_tree:
-            if do_root_R:
-                matR = cont6d_to_matrix(cont6d_params[:, 0])
-            else:
-                matR = torch.eye(3).expand((len(cont6d_params), -1, -1)).detach().to(cont6d_params.device)
-            for i in range(1, len(chain)):
-                matR = torch.matmul(matR, cont6d_to_matrix(cont6d_params[:, chain[i]]))
-                offset_vec = offsets[:, chain[i]].unsqueeze(-1)
-                # print(matR.shape, offset_vec.shape)
-                joints[:, chain[i]] = torch.matmul(matR, offset_vec).squeeze(-1) + joints[:, chain[i-1]]
-        return joints
-
-
-
-
-
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/builder.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/builder.py
deleted file mode 100644
index 1f5b971252bfc971c3ffbaa27746d69b1d3ea9fd..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/builder.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import warnings
-
-from annotator.uniformer.mmcv.cnn import MODELS as MMCV_MODELS
-from annotator.uniformer.mmcv.utils import Registry
-
-MODELS = Registry('models', parent=MMCV_MODELS)
-
-BACKBONES = MODELS
-NECKS = MODELS
-HEADS = MODELS
-LOSSES = MODELS
-SEGMENTORS = MODELS
-
-
-def build_backbone(cfg):
-    """Build backbone."""
-    return BACKBONES.build(cfg)
-
-
-def build_neck(cfg):
-    """Build neck."""
-    return NECKS.build(cfg)
-
-
-def build_head(cfg):
-    """Build head."""
-    return HEADS.build(cfg)
-
-
-def build_loss(cfg):
-    """Build loss."""
-    return LOSSES.build(cfg)
-
-
-def build_segmentor(cfg, train_cfg=None, test_cfg=None):
-    """Build segmentor."""
-    if train_cfg is not None or test_cfg is not None:
-        warnings.warn(
-            'train_cfg and test_cfg is deprecated, '
-            'please specify them in model', UserWarning)
-    assert cfg.get('train_cfg') is None or train_cfg is None, \
-        'train_cfg specified in both outer field and model field '
-    assert cfg.get('test_cfg') is None or test_cfg is None, \
-        'test_cfg specified in both outer field and model field '
-    return SEGMENTORS.build(
-        cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/decode_heads/ema_head.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/decode_heads/ema_head.py
deleted file mode 100644
index 12267cb40569d2b5a4a2955a6dc2671377ff5e0a..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/decode_heads/ema_head.py
+++ /dev/null
@@ -1,168 +0,0 @@
-import math
-
-import torch
-import torch.distributed as dist
-import torch.nn as nn
-import torch.nn.functional as F
-from annotator.uniformer.mmcv.cnn import ConvModule
-
-from ..builder import HEADS
-from .decode_head import BaseDecodeHead
-
-
-def reduce_mean(tensor):
-    """Reduce mean when distributed training."""
-    if not (dist.is_available() and dist.is_initialized()):
-        return tensor
-    tensor = tensor.clone()
-    dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
-    return tensor
-
-
-class EMAModule(nn.Module):
-    """Expectation Maximization Attention Module used in EMANet.
-
-    Args:
-        channels (int): Channels of the whole module.
-        num_bases (int): Number of bases.
-        num_stages (int): Number of the EM iterations.
-    """
-
-    def __init__(self, channels, num_bases, num_stages, momentum):
-        super(EMAModule, self).__init__()
-        assert num_stages >= 1, 'num_stages must be at least 1!'
-        self.num_bases = num_bases
-        self.num_stages = num_stages
-        self.momentum = momentum
-
-        bases = torch.zeros(1, channels, self.num_bases)
-        bases.normal_(0, math.sqrt(2. / self.num_bases))
-        # [1, channels, num_bases]
-        bases = F.normalize(bases, dim=1, p=2)
-        self.register_buffer('bases', bases)
-
-    def forward(self, feats):
-        """Forward function."""
-        batch_size, channels, height, width = feats.size()
-        # [batch_size, channels, height*width]
-        feats = feats.view(batch_size, channels, height * width)
-        # [batch_size, channels, num_bases]
-        bases = self.bases.repeat(batch_size, 1, 1)
-
-        with torch.no_grad():
-            for i in range(self.num_stages):
-                # [batch_size, height*width, num_bases]
-                attention = torch.einsum('bcn,bck->bnk', feats, bases)
-                attention = F.softmax(attention, dim=2)
-                # l1 norm
-                attention_normed = F.normalize(attention, dim=1, p=1)
-                # [batch_size, channels, num_bases]
-                bases = torch.einsum('bcn,bnk->bck', feats, attention_normed)
-                # l2 norm
-                bases = F.normalize(bases, dim=1, p=2)
-
-        feats_recon = torch.einsum('bck,bnk->bcn', bases, attention)
-        feats_recon = feats_recon.view(batch_size, channels, height, width)
-
-        if self.training:
-            bases = bases.mean(dim=0, keepdim=True)
-            bases = reduce_mean(bases)
-            # l2 norm
-            bases = F.normalize(bases, dim=1, p=2)
-            self.bases = (1 -
-                          self.momentum) * self.bases + self.momentum * bases
-
-        return feats_recon
-
-
-@HEADS.register_module()
-class EMAHead(BaseDecodeHead):
-    """Expectation Maximization Attention Networks for Semantic Segmentation.
-
-    This head is the implementation of `EMANet
-    <https://arxiv.org/abs/1907.13426>`_.
-
-    Args:
-        ema_channels (int): EMA module channels
-        num_bases (int): Number of bases.
-        num_stages (int): Number of the EM iterations.
-        concat_input (bool): Whether concat the input and output of convs
-            before classification layer. Default: True
-        momentum (float): Momentum to update the base. Default: 0.1.
-    """
-
-    def __init__(self,
-                 ema_channels,
-                 num_bases,
-                 num_stages,
-                 concat_input=True,
-                 momentum=0.1,
-                 **kwargs):
-        super(EMAHead, self).__init__(**kwargs)
-        self.ema_channels = ema_channels
-        self.num_bases = num_bases
-        self.num_stages = num_stages
-        self.concat_input = concat_input
-        self.momentum = momentum
-        self.ema_module = EMAModule(self.ema_channels, self.num_bases,
-                                    self.num_stages, self.momentum)
-
-        self.ema_in_conv = ConvModule(
-            self.in_channels,
-            self.ema_channels,
-            3,
-            padding=1,
-            conv_cfg=self.conv_cfg,
-            norm_cfg=self.norm_cfg,
-            act_cfg=self.act_cfg)
-        # project (0, inf) -> (-inf, inf)
-        self.ema_mid_conv = ConvModule(
-            self.ema_channels,
-            self.ema_channels,
-            1,
-            conv_cfg=self.conv_cfg,
-            norm_cfg=None,
-            act_cfg=None)
-        for param in self.ema_mid_conv.parameters():
-            param.requires_grad = False
-
-        self.ema_out_conv = ConvModule(
-            self.ema_channels,
-            self.ema_channels,
-            1,
-            conv_cfg=self.conv_cfg,
-            norm_cfg=self.norm_cfg,
-            act_cfg=None)
-        self.bottleneck = ConvModule(
-            self.ema_channels,
-            self.channels,
-            3,
-            padding=1,
-            conv_cfg=self.conv_cfg,
-            norm_cfg=self.norm_cfg,
-            act_cfg=self.act_cfg)
-        if self.concat_input:
-            self.conv_cat = ConvModule(
-                self.in_channels + self.channels,
-                self.channels,
-                kernel_size=3,
-                padding=1,
-                conv_cfg=self.conv_cfg,
-                norm_cfg=self.norm_cfg,
-                act_cfg=self.act_cfg)
-
-    def forward(self, inputs):
-        """Forward function."""
-        x = self._transform_inputs(inputs)
-        feats = self.ema_in_conv(x)
-        identity = feats
-        feats = self.ema_mid_conv(feats)
-        recon = self.ema_module(feats)
-        recon = F.relu(recon, inplace=True)
-        recon = self.ema_out_conv(recon)
-        output = F.relu(identity + recon, inplace=True)
-        output = self.bottleneck(output)
-        if self.concat_input:
-            output = self.conv_cat(torch.cat([x, output], dim=1))
-        output = self.cls_seg(output)
-        return output
diff --git a/spaces/PSLD/PSLD/stable-diffusion/ldm/modules/ema.py b/spaces/PSLD/PSLD/stable-diffusion/ldm/modules/ema.py
deleted file mode 100644
index c8c75af43565f6e140287644aaaefa97dd6e67c5..0000000000000000000000000000000000000000
--- a/spaces/PSLD/PSLD/stable-diffusion/ldm/modules/ema.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import torch
-from torch import nn
-
-
-class LitEma(nn.Module):
-    def __init__(self, model, decay=0.9999, use_num_upates=True):
-        super().__init__()
-        if decay < 0.0 or decay > 1.0:
-            raise ValueError('Decay must be between 0 and 1')
-
-        self.m_name2s_name = {}
-        self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
-        self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates
-                             else torch.tensor(-1,dtype=torch.int))
-
-        for name, p in model.named_parameters():
-            if p.requires_grad:
-                #remove as '.'-character is not allowed in buffers
-                s_name = name.replace('.','')
-                self.m_name2s_name.update({name:s_name})
-                self.register_buffer(s_name,p.clone().detach().data)
-
-        self.collected_params = []
-
-    def forward(self,model):
-        decay = self.decay
-
-        if self.num_updates >= 0:
-            self.num_updates += 1
-            decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))
-
-        one_minus_decay = 1.0 - decay
-
-        with torch.no_grad():
-            m_param = dict(model.named_parameters())
-            shadow_params = dict(self.named_buffers())
-
-            for key in m_param:
-                if m_param[key].requires_grad:
-                    sname = self.m_name2s_name[key]
-                    shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
-                    shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
-                else:
-                    assert not key in self.m_name2s_name
-
-    def copy_to(self, model):
-        m_param = dict(model.named_parameters())
-        shadow_params = dict(self.named_buffers())
-        for key in m_param:
-            if m_param[key].requires_grad:
-                m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
-            else:
-                assert not key in self.m_name2s_name
-
-    def store(self, parameters):
-        """
-        Save the current parameters for restoring later.
-        Args:
-          parameters: Iterable of `torch.nn.Parameter`; the parameters to be
-            temporarily stored.
-        """
-        self.collected_params = [param.clone() for param in parameters]
-
-    def restore(self, parameters):
-        """
-        Restore the parameters stored with the `store` method.
-        Useful to validate the model with EMA parameters without affecting the
-        original optimization process. Store the parameters before the
-        `copy_to` method. After validation (or model saving), use this to
-        restore the former parameters.
-        Args:
-          parameters: Iterable of `torch.nn.Parameter`; the parameters to be
-            updated with the stored parameters.
-        """
-        for c_param, param in zip(self.collected_params, parameters):
-            param.data.copy_(c_param.data)
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/vlist.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/vlist.go
deleted file mode 100644
index a7755e9db203e03f182dc861a1f6adccc5fba22c..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/vlist.go and /dev/null differ
diff --git a/spaces/PeepDaSlan9/AutoGPT/autogpt/configurator.py b/spaces/PeepDaSlan9/AutoGPT/autogpt/configurator.py
deleted file mode 100644
index 1dc3be124f638b8859eb459bcb2d46696f62e2b7..0000000000000000000000000000000000000000
--- a/spaces/PeepDaSlan9/AutoGPT/autogpt/configurator.py
+++ /dev/null
@@ -1,134 +0,0 @@
-"""Configurator module."""
-import click
-from colorama import Back, Fore, Style
-
-from autogpt import utils
-from autogpt.config import Config
-from autogpt.logs import logger
-from autogpt.memory import get_supported_memory_backends
-
-CFG = Config()
-
-
-def create_config(
-    continuous: bool,
-    continuous_limit: int,
-    ai_settings_file: str,
-    skip_reprompt: bool,
-    speak: bool,
-    debug: bool,
-    gpt3only: bool,
-    gpt4only: bool,
-    memory_type: str,
-    browser_name: str,
-    allow_downloads: bool,
-    skip_news: bool,
-) -> None:
-    """Updates the config object with the given arguments.
-
-    Args:
-        continuous (bool): Whether to run in continuous mode
-        continuous_limit (int): The number of times to run in continuous mode
-        ai_settings_file (str): The path to the ai_settings.yaml file
-        skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
-        speak (bool): Whether to enable speak mode
-        debug (bool): Whether to enable debug mode
-        gpt3only (bool): Whether to enable GPT3.5 only mode
-        gpt4only (bool): Whether to enable GPT4 only mode
-        memory_type (str): The type of memory backend to use
-        browser_name (str): The name of the browser to use when using selenium to scrape the web
-        allow_downloads (bool): Whether to allow Auto-GPT to download files natively
-        skips_news (bool): Whether to suppress the output of latest news on startup
-    """
-    CFG.set_debug_mode(False)
-    CFG.set_continuous_mode(False)
-    CFG.set_speak_mode(False)
-
-    if debug:
-        logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
-        CFG.set_debug_mode(True)
-
-    if continuous:
-        logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
-        logger.typewriter_log(
-            "WARNING: ",
-            Fore.RED,
-            "Continuous mode is not recommended. It is potentially dangerous and may"
-            " cause your AI to run forever or carry out actions you would not usually"
-            " authorise. Use at your own risk.",
-        )
-        CFG.set_continuous_mode(True)
-
-        if continuous_limit:
-            logger.typewriter_log(
-                "Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
-            )
-            CFG.set_continuous_limit(continuous_limit)
-
-    # Check if continuous limit is used without continuous mode
-    if continuous_limit and not continuous:
-        raise click.UsageError("--continuous-limit can only be used with --continuous")
-
-    if speak:
-        logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
-        CFG.set_speak_mode(True)
-
-    if gpt3only:
-        logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
-        CFG.set_smart_llm_model(CFG.fast_llm_model)
-
-    if gpt4only:
-        logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
-        CFG.set_fast_llm_model(CFG.smart_llm_model)
-
-    if memory_type:
-        supported_memory = get_supported_memory_backends()
-        chosen = memory_type
-        if chosen not in supported_memory:
-            logger.typewriter_log(
-                "ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
-                Fore.RED,
-                f"{supported_memory}",
-            )
-            logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend)
-        else:
-            CFG.memory_backend = chosen
-
-    if skip_reprompt:
-        logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
-        CFG.skip_reprompt = True
-
-    if ai_settings_file:
-        file = ai_settings_file
-
-        # Validate file
-        (validated, message) = utils.validate_yaml_file(file)
-        if not validated:
-            logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
-            logger.double_check()
-            exit(1)
-
-        logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
-        CFG.ai_settings_file = file
-        CFG.skip_reprompt = True
-
-    if allow_downloads:
-        logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
-        logger.typewriter_log(
-            "WARNING: ",
-            Fore.YELLOW,
-            f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} "
-            + "It is recommended that you monitor any files it downloads carefully.",
-        )
-        logger.typewriter_log(
-            "WARNING: ",
-            Fore.YELLOW,
-            f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
-        )
-        CFG.allow_downloads = True
-
-    if skip_news:
-        CFG.skip_news = True
-
-    if browser_name:
-        CFG.selenium_web_browser = browser_name
diff --git a/spaces/PeepDaSlan9/AutoGPT/autogpt/workspace.py b/spaces/PeepDaSlan9/AutoGPT/autogpt/workspace.py
deleted file mode 100644
index 6fb0e3113eb2c1338edf7f86c6e162fc27c61e50..0000000000000000000000000000000000000000
--- a/spaces/PeepDaSlan9/AutoGPT/autogpt/workspace.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from __future__ import annotations
-
-import os
-from pathlib import Path
-
-from autogpt.config import Config
-
-CFG = Config()
-
-# Set a dedicated folder for file I/O
-WORKSPACE_PATH = Path(os.getcwd()) / "auto_gpt_workspace"
-
-# Create the directory if it doesn't exist
-if not os.path.exists(WORKSPACE_PATH):
-    os.makedirs(WORKSPACE_PATH)
-
-
-def path_in_workspace(relative_path: str | Path) -> Path:
-    """Get full path for item in workspace
-
-    Parameters:
-        relative_path (str | Path): Path to translate into the workspace
-
-    Returns:
-        Path: Absolute path for the given path in the workspace
-    """
-    return safe_path_join(WORKSPACE_PATH, relative_path)
-
-
-def safe_path_join(base: Path, *paths: str | Path) -> Path:
-    """Join one or more path components, asserting the resulting path is within the workspace.
-
-    Args:
-        base (Path): The base path
-        *paths (str): The paths to join to the base path
-
-    Returns:
-        Path: The joined path
-    """
-    joined_path = base.joinpath(*paths).resolve()
-
-    if CFG.restrict_to_workspace and not joined_path.is_relative_to(base):
-        raise ValueError(
-            f"Attempted to access path '{joined_path}' outside of workspace '{base}'."
-        )
-
-    return joined_path
diff --git a/spaces/PeepDaSlan9/AutoGPT/tests/unit/test_browse_scrape_text.py b/spaces/PeepDaSlan9/AutoGPT/tests/unit/test_browse_scrape_text.py
deleted file mode 100644
index fea5ebfc05d466c7cb5711b5ac10e2ea102ddc45..0000000000000000000000000000000000000000
--- a/spaces/PeepDaSlan9/AutoGPT/tests/unit/test_browse_scrape_text.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# Generated by CodiumAI
-
-import requests
-
-from autogpt.commands.web_requests import scrape_text
-
-"""
-Code Analysis
-
-Objective:
-The objective of the "scrape_text" function is to scrape the text content from
-a given URL and return it as a string, after removing any unwanted HTML tags and scripts.
-
-Inputs:
-- url: a string representing the URL of the webpage to be scraped.
-
-Flow:
-1. Send a GET request to the given URL using the requests library and the user agent header from the config file.
-2. Check if the response contains an HTTP error. If it does, return an error message.
-3. Use BeautifulSoup to parse the HTML content of the response and extract all script and style tags.
-4. Get the text content of the remaining HTML using the get_text() method of BeautifulSoup.
-5. Split the text into lines and then into chunks, removing any extra whitespace.
-6. Join the chunks into a single string with newline characters between them.
-7. Return the cleaned text.
-
-Outputs:
-- A string representing the cleaned text content of the webpage.
-
-Additional aspects:
-- The function uses the requests library and BeautifulSoup to handle the HTTP request and HTML parsing, respectively.
-- The function removes script and style tags from the HTML to avoid including unwanted content in the text output.
-- The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text.
-"""
-
-
-class TestScrapeText:
-    # Tests that scrape_text() returns the expected text when given a valid URL.
-    def test_scrape_text_with_valid_url(self, mocker):
-        # Mock the requests.get() method to return a response with expected text
-        expected_text = "This is some sample text"
-        mock_response = mocker.Mock()
-        mock_response.status_code = 200
-        mock_response.text = f"<html><body><div><p style='color: blue;'>{expected_text}</p></div></body></html>"
-        mocker.patch("requests.Session.get", return_value=mock_response)
-
-        # Call the function with a valid URL and assert that it returns the expected text
-        url = "http://www.example.com"
-        assert scrape_text(url) == expected_text
-
-    # Tests that the function returns an error message when an invalid or unreachable url is provided.
-    def test_invalid_url(self, mocker):
-        # Mock the requests.get() method to raise an exception
-        mocker.patch(
-            "requests.Session.get", side_effect=requests.exceptions.RequestException
-        )
-
-        # Call the function with an invalid URL and assert that it returns an error message
-        url = "http://www.invalidurl.com"
-        error_message = scrape_text(url)
-        assert "Error:" in error_message
-
-    # Tests that the function returns an empty string when the html page contains no text to be scraped.
-    def test_no_text(self, mocker):
-        # Mock the requests.get() method to return a response with no text
-        mock_response = mocker.Mock()
-        mock_response.status_code = 200
-        mock_response.text = "<html><body></body></html>"
-        mocker.patch("requests.Session.get", return_value=mock_response)
-
-        # Call the function with a valid URL and assert that it returns an empty string
-        url = "http://www.example.com"
-        assert scrape_text(url) == ""
-
-    # Tests that the function returns an error message when the response status code is an http error (>=400).
-    def test_http_error(self, mocker):
-        # Mock the requests.get() method to return a response with a 404 status code
-        mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404))
-
-        # Call the function with a URL
-        result = scrape_text("https://www.example.com")
-
-        # Check that the function returns an error message
-        assert result == "Error: HTTP 404 error"
-
-    # Tests that scrape_text() properly handles HTML tags.
-    def test_scrape_text_with_html_tags(self, mocker):
-        # Create a mock response object with HTML containing tags
-        html = "<html><body><p>This is <b>bold</b> text.</p></body></html>"
-        mock_response = mocker.Mock()
-        mock_response.status_code = 200
-        mock_response.text = html
-        mocker.patch("requests.Session.get", return_value=mock_response)
-
-        # Call the function with a URL
-        result = scrape_text("https://www.example.com")
-
-        # Check that the function properly handles HTML tags
-        assert result == "This is bold text."
diff --git a/spaces/ProteinDesignLab/protpardelle/models.py b/spaces/ProteinDesignLab/protpardelle/models.py
deleted file mode 100644
index 4536510aea1f38cecddee1a6b8b2cc81161e657f..0000000000000000000000000000000000000000
--- a/spaces/ProteinDesignLab/protpardelle/models.py
+++ /dev/null
@@ -1,778 +0,0 @@
-"""
-https://github.com/ProteinDesignLab/protpardelle
-License: MIT
-Author: Alex Chu
-
-Top-level model definitions.
-Typically these are initialized with config rather than arguments.
-"""
-import argparse
-from functools import partial
-import os
-from typing import Callable, List, Optional
-
-from einops import rearrange, repeat
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torchtyping import TensorType
-
-from core import protein_mpnn
-from core import residue_constants
-from core import utils
-import diffusion
-import evaluation
-import modules
-
-
-class MiniMPNN(nn.Module):
-    """Wrapper for ProteinMPNN network to predict sequence from structure."""
-
-    def __init__(self, config: argparse.Namespace):
-        super().__init__()
-        self.config = config
-        self.model_config = cfg = config.model.mpnn_model
-        self.n_tokens = config.data.n_aatype_tokens
-        self.seq_emb_dim = cfg.n_channel
-        time_cond_dim = cfg.n_channel * cfg.noise_cond_mult
-
-        self.noise_block = modules.NoiseConditioningBlock(cfg.n_channel, time_cond_dim)
-        self.token_embedding = nn.Linear(self.n_tokens, self.seq_emb_dim)
-        self.mpnn_net = modules.NoiseConditionalProteinMPNN(
-            n_channel=cfg.n_channel,
-            n_layers=cfg.n_layers,
-            n_neighbors=cfg.n_neighbors,
-            time_cond_dim=time_cond_dim,
-            vocab_size=config.data.n_aatype_tokens,
-            input_S_is_embeddings=True,
-        )
-        self.proj_out = nn.Linear(cfg.n_channel, self.n_tokens)
-
-    def forward(
-        self,
-        denoised_coords: TensorType["b n a x", float],
-        coords_noise_level: TensorType["b", float],
-        seq_mask: TensorType["b n", float],
-        residue_index: TensorType["b n", int],
-        seq_self_cond: Optional[TensorType["b n t", float]] = None,  # logprobs
-        return_embeddings: bool = False,
-    ):
-        coords_noise_level_scaled = 0.25 * torch.log(coords_noise_level)
-        noise_cond = self.noise_block(coords_noise_level_scaled)
-
-        b, n, _, _ = denoised_coords.shape
-        if seq_self_cond is None or not self.model_config.use_self_conditioning:
-            seq_emb_in = torch.zeros(b, n, self.seq_emb_dim).to(denoised_coords)
-        else:
-            seq_emb_in = self.token_embedding(seq_self_cond.exp())
-
-        node_embs, encoder_embs = self.mpnn_net(
-            denoised_coords, seq_emb_in, seq_mask, residue_index, noise_cond
-        )
-
-        logits = self.proj_out(node_embs)
-        pred_logprobs = F.log_softmax(logits, -1)
-
-        if return_embeddings:
-            return pred_logprobs, node_embs, encoder_embs
-        return pred_logprobs
-
-
-class CoordinateDenoiser(nn.Module):
-    """Wrapper for U-ViT module to denoise structure coordinates."""
-
-    def __init__(self, config: argparse.Namespace):
-        super().__init__()
-        self.config = config
-
-        # Configuration
-        self.sigma_data = config.data.sigma_data
-        m_cfg = config.model.struct_model
-        nc = m_cfg.n_channel
-        bb_atoms = ["N", "CA", "C", "O"]
-        n_atoms = config.model.struct_model.n_atoms
-        self.use_conv = len(m_cfg.uvit.n_filt_per_layer) > 0
-        if self.use_conv and n_atoms == 37:
-            n_atoms += 1  # make it an even number
-        self.n_atoms = n_atoms
-        self.bb_idxs = [residue_constants.atom_order[a] for a in bb_atoms]
-        n_xyz = 9 if config.model.crop_conditional else 6
-        nc_in = n_xyz * n_atoms  # xyz + selfcond xyz + maybe cropcond xyz
-
-        # Neural networks
-        n_noise_channel = nc * m_cfg.noise_cond_mult
-        self.net = modules.TimeCondUViT(
-            seq_len=config.data.fixed_size,
-            patch_size=m_cfg.uvit.patch_size,
-            dim=nc,
-            depth=m_cfg.uvit.n_layers,
-            n_filt_per_layer=m_cfg.uvit.n_filt_per_layer,
-            heads=m_cfg.uvit.n_heads,
-            dim_head=m_cfg.uvit.dim_head,
-            conv_skip_connection=m_cfg.uvit.conv_skip_connection,
-            n_atoms=n_atoms,
-            channels_per_atom=n_xyz,
-            time_cond_dim=n_noise_channel,
-            position_embedding_type=m_cfg.uvit.position_embedding_type,
-        )
-        self.noise_block = modules.NoiseConditioningBlock(nc, n_noise_channel)
-
-    def forward(
-        self,
-        noisy_coords: TensorType["b n a x", float],
-        noise_level: TensorType["b", float],
-        seq_mask: TensorType["b n", float],
-        residue_index: Optional[TensorType["b n", int]] = None,
-        struct_self_cond: Optional[TensorType["b n a x", float]] = None,
-        struct_crop_cond: Optional[TensorType["b n a x", float]] = None,
-    ):
-        # Prep inputs and time conditioning
-        actual_var_data = self.sigma_data**2
-        var_noisy_coords = noise_level**2 + actual_var_data
-        emb = noisy_coords / utils.expand(var_noisy_coords.sqrt(), noisy_coords)
-        struct_noise_scaled = 0.25 * torch.log(noise_level)
-        noise_cond = self.noise_block(struct_noise_scaled)
-
-        # Prepare self- and crop-conditioning and concatenate along channels
-        if struct_self_cond is None:
-            struct_self_cond = torch.zeros_like(noisy_coords)
-        if self.config.model.crop_conditional:
-            if struct_crop_cond is None:
-                struct_crop_cond = torch.zeros_like(noisy_coords)
-            else:
-                struct_crop_cond = struct_crop_cond / self.sigma_data
-            emb = torch.cat([emb, struct_self_cond, struct_crop_cond], -1)
-        else:
-            emb = torch.cat([emb, struct_self_cond], -1)
-
-        # Run neural network
-        emb = self.net(emb, noise_cond, seq_mask=seq_mask, residue_index=residue_index)
-
-        # Preconditioning from Karras et al.
-        out_scale = noise_level * actual_var_data**0.5 / torch.sqrt(var_noisy_coords)
-        skip_scale = actual_var_data / var_noisy_coords
-        emb = emb * utils.expand(out_scale, emb)
-        skip_info = noisy_coords * utils.expand(skip_scale, noisy_coords)
-        denoised_coords_x0 = emb + skip_info
-
-        # Don't use atom mask; denoise all atoms
-        denoised_coords_x0 *= utils.expand(seq_mask, denoised_coords_x0)
-        return denoised_coords_x0
-
-
-class Protpardelle(nn.Module):
-    """All-atom protein diffusion-based generative model.
-
-    This class wraps a structure denoising network and a sequence prediction network
-    to do structure/sequence co-design (for all-atom generation), or backbone generation.
-
-    It can be trained for one of four main tasks. To produce the all-atom (co-design)
-    Protpardelle model, we will typically pretrain an 'allatom' model, then use this
-    to train a 'seqdes' model. A 'seqdes' model can be trained with either a backbone
-    or allatom denoiser. The two can be combined to yield all-atom (co-design) Protpardelle
-    without further training.
-        'backbone': train only a backbone coords denoiser.
-        'seqdes': train only a mini-MPNN, using a pretrained coords denoiser.
-        'allatom': train only an allatom coords denoiser (cannot do all-atom generation
-            by itself).
-        'codesign': train both an allatom denoiser and mini-MPNN at once.
-
-    """
-
-    def __init__(self, config: argparse.Namespace, device: str = "cpu"):
-        super().__init__()
-        self.config = config
-        self.device = device
-        self.task = config.model.task
-        self.n_tokens = config.data.n_aatype_tokens
-
-        self.use_mpnn_model = self.task in ["seqdes", "codesign"]
-
-        # Modules
-        self.all_modules = {}
-        self.bb_idxs = [0, 1, 2, 4]
-        self.n_atoms = 37
-        self.struct_model = CoordinateDenoiser(config)
-        self.all_modules["struct_model"] = self.struct_model
-        self.bb_idxs = self.struct_model.bb_idxs
-        self.n_atoms = self.struct_model.n_atoms
-
-        if self.use_mpnn_model:
-            self.mpnn_model = MiniMPNN(config)
-            self.all_modules["mpnn_model"] = self.mpnn_model
-
-        # Load any pretrained modules
-        for module_name in self.config.model.pretrained_modules:
-            self.load_pretrained_module(module_name)
-
-        # Diffusion-related
-        self.sigma_data = self.struct_model.sigma_data
-        self.training_noise_schedule = partial(
-            diffusion.noise_schedule,
-            sigma_data=self.sigma_data,
-            **vars(config.diffusion.training),
-        )
-        self.sampling_noise_schedule_default = self.make_sampling_noise_schedule()
-
-    def load_pretrained_module(self, module_name: str, ckpt_path: Optional[str] = None):
-        """Load pretrained weights for a given module name."""
-        assert module_name in ["struct_model", "mpnn_model"], module_name
-
-        # Load pretrained checkpoint
-        if ckpt_path is None:
-            ckpt_path = getattr(self.config.model, f"{module_name}_checkpoint")
-            ckpt_path = os.path.join(self.config.train.home_dir, ckpt_path)
-        ckpt_dict = torch.load(ckpt_path, map_location=self.device)
-        model_state_dict = ckpt_dict["model_state_dict"]
-
-        # Get only submodule state_dict
-        submodule_state_dict = {
-            sk[len(module_name) + 1 :]: sv
-            for sk, sv in model_state_dict.items()
-            if sk.startswith(module_name)
-        }
-
-        # Load into module
-        module = dict(self.named_modules())[module_name]
-        module.load_state_dict(submodule_state_dict)
-
-        # Freeze unneeded modules
-        if module_name == "struct_model":
-            self.struct_model = module
-            if self.task == "seqdes":
-                for p in module.parameters():
-                    p.requires_grad = False
-        if module_name == "mpnn_model":
-            self.mpnn_model = module
-            if self.task not in ["codesign", "seqdes"]:
-                for p in module.parameters():
-                    p.requires_grad = False
-
-        return module
-
-    def load_minimpnn(self, mpnn_ckpt_path: Optional[str] = None):
-        """Convert an allatom model to a codesign model."""
-        if mpnn_ckpt_path is None:
-            mpnn_ckpt_path = "checkpoints/minimpnn_state_dict.pth"
-        self.mpnn_model = MiniMPNN(self.config).to(self.device)
-        self.load_pretrained_module("mpnn_model", ckpt_path=mpnn_ckpt_path)
-        self.use_mpnn_model = True
-        return
-
-    def remove_minimpnn(self):
-        """Revert a codesign model to an allatom model to a codesign model."""
-        self.use_mpnn_model = False
-        self.mpnn_model = None
-        self.all_modules["mpnn_model"] = None
-
-    def make_sampling_noise_schedule(self, **noise_kwargs):
-        """Make the default sampling noise schedule function."""
-        noise_schedule_kwargs = vars(self.config.diffusion.sampling)
-        if len(noise_kwargs) > 0:
-            noise_schedule_kwargs.update(noise_kwargs)
-        return partial(diffusion.noise_schedule, **noise_schedule_kwargs)
-
-    def forward(
-        self,
-        *,
-        noisy_coords: TensorType["b n a x", float],
-        noise_level: TensorType["b", float],
-        seq_mask: TensorType["b n", float],
-        residue_index: TensorType["b n", int],
-        struct_self_cond: Optional[TensorType["b n a x", float]] = None,
-        struct_crop_cond: Optional[TensorType["b n a x", float]] = None,
-        seq_self_cond: Optional[TensorType["b n t", float]] = None,  # logprobs
-        run_struct_model: bool = True,
-        run_mpnn_model: bool = True,
-    ):
-        """Main forward function for denoising/co-design.
-
-        Arguments:
-            noisy_coords: noisy array of xyz coordinates.
-            noise_level: std of noise for each example in the batch.
-            seq_mask: mask indicating which indexes contain data.
-            residue_index: residue ordering. This is used by proteinMPNN, but currently
-                only used by the diffusion model when the 'absolute_residx' or
-                'relative' position_embedding_type is specified.
-            struct_self_cond: denoised coordinates from the previous step, scaled
-                down by sigma data.
-            struct_crop_cond: unnoised coordinates. unscaled (scaled down by sigma
-                data inside the denoiser)
-            seq_self_cond: mpnn-predicted sequence logprobs from the previous step.
-            run_struct_model: flag to optionally not run structure denoiser.
-            run_mpnn_model: flag to optionally not run mini-mpnn.
-        """
-
-        # Coordinate denoiser
-        denoised_x0 = noisy_coords
-        if run_struct_model:
-            denoised_x0 = self.struct_model(
-                noisy_coords,
-                noise_level,
-                seq_mask,
-                residue_index=residue_index,
-                struct_self_cond=struct_self_cond,
-                struct_crop_cond=struct_crop_cond,
-            )
-
-        # Mini-MPNN
-        aatype_logprobs = None
-        if self.use_mpnn_model and run_mpnn_model:
-            aatype_logprobs = self.mpnn_model(
-                denoised_x0.detach(),
-                noise_level,
-                seq_mask,
-                residue_index,
-                seq_self_cond=seq_self_cond,
-                return_embeddings=False,
-            )
-            aatype_logprobs = aatype_logprobs * seq_mask[..., None]
-
-        # Process outputs
-        if aatype_logprobs is None:
-            aatype_logprobs = repeat(seq_mask, "b n -> b n t", t=self.n_tokens)
-            aatype_logprobs = torch.ones_like(aatype_logprobs)
-            aatype_logprobs = F.log_softmax(aatype_logprobs, -1)
-        struct_self_cond_out = denoised_x0.detach() / self.sigma_data
-        seq_self_cond_out = aatype_logprobs.detach()
-
-        return denoised_x0, aatype_logprobs, struct_self_cond_out, seq_self_cond_out
-
-    def make_seq_mask_for_sampling(
-        self,
-        prot_lens: Optional[TensorType["b", int]] = None,
-        n_samples: int = 1,
-        min_len: int = 50,
-        max_len: Optional[int] = None,
-    ):
-        """Makes a sequence mask of varying protein lengths (only input required
-        to begin sampling).
-        """
-        if max_len is None:
-            max_len = self.config.data.fixed_size
-        if prot_lens is None:
-            possible_lens = np.arange(min_len, max_len)
-            prot_lens = torch.Tensor(np.random.choice(possible_lens, n_samples))
-        else:
-            n_samples = len(prot_lens)
-            max_len = max(prot_lens)
-        mask = repeat(torch.arange(max_len), "n -> b n", b=n_samples)
-        mask = (mask < prot_lens[:, None]).float().to(self.device)
-        return mask
-
-    def sample(
-        self,
-        *,
-        seq_mask: TensorType["b n", float] = None,
-        n_samples: int = 1,
-        min_len: int = 50,
-        max_len: int = 512,
-        residue_index: TensorType["b n", int] = None,
-        gt_coords: TensorType["b n a x", float] = None,
-        gt_coords_traj: List[TensorType["b n a x", float]] = None,
-        gt_cond_atom_mask: TensorType["b n a", float] = None,
-        gt_aatype: TensorType["b n", int] = None,
-        gt_cond_seq_mask: TensorType["b n", float] = None,
-        apply_cond_proportion: float = 1.0,
-        n_steps: int = 200,
-        step_scale: float = 1.2,
-        s_churn: float = 50.0,
-        noise_scale: float = 1.0,
-        s_t_min: float = 0.01,
-        s_t_max: float = 50.0,
-        temperature: float = 1.0,
-        top_p: float = 1.0,
-        disallow_aas: List[int] = [4, 20],  # cys, unk
-        sidechain_mode: bool = False,
-        skip_mpnn_proportion: float = 0.7,
-        anneal_seq_resampling_rate: Optional[str] = None,  # linear, cosine
-        use_fullmpnn: bool = False,
-        use_fullmpnn_for_final: bool = True,
-        use_reconstruction_guidance: bool = False,
-        use_classifier_free_guidance: bool = False,  # defaults to replacement guidance if these are all false
-        guidance_scale: float = 1.0,
-        noise_schedule: Optional[Callable] = None,
-        tqdm_pbar: Optional[Callable] = None,
-        return_last: bool = True,
-        return_aux: bool = False,
-    ):
-        """Sampling function for backbone or all-atom diffusion. All arguments are optional.
-
-        Arguments:
-            seq_mask: mask defining the number and lengths of proteins to be sampled.
-            n_samples: number of samples to draw (if seq_mask not provided).
-            min_len: minimum length of proteins to be sampled (if seq_mask not provided).
-            max_len: maximum length of proteins to be sampled (if seq_mask not provided).
-            residue_index: residue index of proteins to be sampled.
-            gt_coords: conditioning information for coords.
-            gt_coords_traj: conditioning information for coords specified for each timestep
-                (if gt_coords is not provided).
-            gt_cond_atom_mask: mask identifying atoms to apply gt_coords.
-            gt_aatype: conditioning information for sequence.
-            gt_cond_seq_mask: sequence positions to apply gt_aatype.
-            apply_cond_proportion: the proportion of timesteps to apply the conditioning.
-                e.g. if 0.5, then the first 50% of steps use conditioning, and the last 50%
-                are unconditional.
-            n_steps: number of denoising steps (ODE discretizations).
-            step_scale: scale to apply to the score.
-            s_churn: gamma = s_churn / n_steps describes the additional noise to add
-                relatively at each denoising step. Use 0.0 for deterministic sampling or
-                0.2 * n_steps as a rough default for stochastic sampling.
-            noise_scale: scale to apply to gamma.
-            s_t_min: don't apply s_churn below this noise level.
-            s_t_max: don't apply s_churn above this noise level.
-            temperature: scale to apply to aatype logits.
-            top_p: don't tokens which fall outside this proportion of the total probability.
-            disallow_aas: don't sample these token indices.
-            sidechain_mode: whether to do all-atom sampling (False for backbone-only).
-            skip_mpnn_proportion: proportion of timesteps from the start to skip running
-                mini-MPNN.
-            anneal_seq_resampling_rate: whether and how to decay the probability of
-                running mini-MPNN. None, 'linear', or 'cosine'
-            use_fullmpnn: use "full" ProteinMPNN at each step.
-            use_fullmpnn_for_final: use "full" ProteinMPNN at the final step.
-            use_reconstruction_guidance: use reconstruction guidance on the conditioning.
-            use_classifier_free_guidance: use classifier-free guidance on the conditioning.
-            guidance_scale: weight for reconstruction/classifier-free guidance.
-            noise_schedule: specify the noise level timesteps for sampling.
-            tqdm_pbar: progress bar in interactive contexts.
-            return_last: return only the sampled structure and sequence.
-            return_aux: return a dict of everything associated with the sampling run.
-        """
-
-        def ode_step(sigma_in, sigma_next, xt_in, x0_pred, gamma, guidance_in=None):
-            if gamma > 0:
-                t_hat = sigma_in + gamma * sigma_in
-                sigma_delta = torch.sqrt(t_hat**2 - sigma_in**2)
-                noisier_x = xt_in + utils.expand(
-                    sigma_delta, xt_in
-                ) * noise_scale * torch.randn_like(xt_in).to(xt_in)
-                xt_in = noisier_x * utils.expand(seq_mask, noisier_x)
-                sigma_in = t_hat
-
-            mask = (sigma_in > 0).float()
-            score = (xt_in - x0_pred) / utils.expand(sigma_in.clamp(min=1e-6), xt_in)
-            score = score * utils.expand(mask, score)
-            if use_reconstruction_guidance:
-                guidance, guidance_mask = guidance_in
-                guidance = guidance * guidance_mask[..., None]
-                guidance_std = guidance[guidance_mask.bool()].var().sqrt()
-                score_std = score[guidance_mask.bool()].var().sqrt()
-                score = score + guidance * guidance_scale
-            if use_classifier_free_guidance:
-                # guidance_in is the unconditional x0 (x0_pred is the conditional x0)
-                # guidance_scale = 1 + w from Ho paper
-                # ==0: use only unconditional score; <1: interpolate the scores;
-                # ==1: use only conditional score; >1: skew towards conditional score
-                uncond_x0 = guidance_in
-                uncond_score = (xt_in - uncond_x0) / utils.expand(
-                    sigma_in.clamp(min=1e-6), xt_in
-                )
-                uncond_score = uncond_score * utils.expand(mask, uncond_score)
-                score = guidance_scale * score + (1 - guidance_scale) * uncond_score
-            step = score * step_scale * utils.expand(sigma_next - sigma_in, score)
-            new_xt = xt_in + step
-            return new_xt
-
-        def sample_aatype(logprobs):
-            # Top-p truncation
-            probs = F.softmax(logprobs.clone(), dim=-1)
-            sorted_prob, sorted_idxs = torch.sort(probs, descending=True)
-            cumsum_prob = torch.cumsum(sorted_prob, dim=-1)
-            sorted_indices_to_remove = cumsum_prob > top_p
-            sorted_indices_to_remove[..., 0] = 0
-            sorted_prob[sorted_indices_to_remove] = 0
-            orig_probs = torch.scatter(
-                torch.zeros_like(sorted_prob),
-                dim=-1,
-                index=sorted_idxs,
-                src=sorted_prob,
-            )
-
-            # Apply temperature and disallowed AAs and sample
-            assert temperature >= 0.0
-            scaled_logits = orig_probs.clamp(min=1e-9).log() / (temperature + 1e-4)
-            if disallow_aas:
-                unwanted_mask = torch.zeros(scaled_logits.shape[-1]).to(scaled_logits)
-                unwanted_mask[disallow_aas] = 1
-                scaled_logits -= unwanted_mask * 1e10
-            orig_probs = F.softmax(scaled_logits, dim=-1)
-            categorical = torch.distributions.Categorical(probs=orig_probs)
-            samp_aatype = categorical.sample()
-            return samp_aatype
-
-        def design_with_fullmpnn(batched_coords, seq_mask):
-            seq_lens = seq_mask.sum(-1).long()
-            designed_seqs = [
-                evaluation.design_sequence(c[: seq_lens[i]], model=fullmpnn_model)[0]
-                for i, c in enumerate(batched_coords)
-            ]
-            designed_aatypes, _ = utils.batched_seq_to_aatype_and_mask(
-                designed_seqs, max_len=seq_mask.shape[-1]
-            )
-            return designed_aatypes
-
-        # Initialize masks/features
-        if seq_mask is None:  # Sample random lengths
-            assert gt_aatype is None  # Don't condition on aatype without seq_mask
-            seq_mask = self.make_seq_mask_for_sampling(
-                n_samples=n_samples,
-                min_len=min_len,
-                max_len=max_len,
-            )
-        if residue_index is None:
-            residue_index = torch.arange(seq_mask.shape[-1])
-            residue_index = repeat(residue_index, "n -> b n", b=seq_mask.shape[0])
-            residue_index = residue_index.to(seq_mask) * seq_mask
-        if use_fullmpnn or use_fullmpnn_for_final:
-            fullmpnn_model = protein_mpnn.get_mpnn_model(
-                path_to_model_weights=self.config.train.home_dir
-                + "/ProteinMPNN/vanilla_model_weights",
-                device=self.device,
-            )
-
-        # Initialize noise schedule/parameters
-        to_batch_size = lambda x: x * torch.ones(seq_mask.shape[0]).to(self.device)
-        s_t_min = s_t_min * self.sigma_data
-        s_t_max = s_t_max * self.sigma_data
-        if noise_schedule is None:
-            noise_schedule = self.sampling_noise_schedule_default
-        sigma = noise_schedule(1)
-        timesteps = torch.linspace(1, 0, n_steps + 1)
-
-        # Set up conditioning/guidance information
-        crop_cond_coords = None
-        if gt_coords is None:
-            coords_shape = seq_mask.shape + (self.n_atoms, 3)
-            xt = torch.randn(*coords_shape).to(self.device) * sigma
-            xt *= utils.expand(seq_mask, xt)
-        else:
-            assert gt_coords_traj is None
-            noise_levels = [to_batch_size(noise_schedule(t)) for t in timesteps]
-            gt_coords_traj = [
-                diffusion.noise_coords(gt_coords, nl) for nl in noise_levels
-            ]
-            xt = gt_coords_traj[0]
-            if gt_cond_atom_mask is not None:
-                crop_cond_coords = gt_coords * gt_cond_atom_mask[..., None]
-        gt_atom_mask = None
-        if gt_aatype is not None:
-            gt_atom_mask = utils.atom37_mask_from_aatype(gt_aatype, seq_mask)
-        fake_logits = repeat(seq_mask, "b n -> b n t", t=self.n_tokens)
-        s_hat = (sample_aatype(fake_logits) * seq_mask).long()
-
-        # Initialize superposition for all-atom sampling
-        if sidechain_mode:
-            b, n = seq_mask.shape[:2]
-
-            # Latest predicted x0 for sidechain superpositions
-            atom73_state_0 = torch.zeros(b, n, 73, 3).to(xt)
-
-            # Current state xt for sidechain superpositions (denoised to different levels)
-            atom73_state_t = torch.randn(b, n, 73, 3).to(xt) * sigma
-
-            # Noise level of xt
-            sigma73_last = torch.ones(b, n, 73).to(xt) * sigma
-
-            # Seqhat and mask used to choose sidechains for euler step (b, n)
-            s_hat = (seq_mask * 7).long()
-            mask37 = utils.atom37_mask_from_aatype(s_hat, seq_mask).bool()
-            mask73 = utils.atom73_mask_from_aatype(s_hat, seq_mask).bool()
-            begin_mpnn_step = int(n_steps * skip_mpnn_proportion)
-
-        # Prepare to run sampling trajectory
-        sigma = to_batch_size(sigma)
-        x0 = None
-        x0_prev = None
-        x_self_cond = None
-        s_logprobs = None
-        s_self_cond = None
-        if tqdm_pbar is None:
-            tqdm_pbar = lambda x: x
-        torch.set_grad_enabled(False)
-
-        # *t_traj is the denoising trajectory; *0_traj is the evolution of predicted clean data
-        # s0 are aatype probs of shape (b n t); s_hat are discrete aatype of shape (b n)
-        xt_traj, x0_traj, st_traj, s0_traj = [], [], [], []
-
-        # Sampling trajectory
-        for i, t in tqdm_pbar(enumerate(iter(timesteps[1:]))):
-            # Set up noise levels
-            sigma_next = noise_schedule(t)
-            if i == n_steps - 1:
-                sigma_next *= 0
-            gamma = (
-                s_churn / n_steps
-                if (sigma_next >= s_t_min and sigma_next <= s_t_max)
-                else 0.0
-            )
-            sigma_next = to_batch_size(sigma_next)
-
-            if sidechain_mode:
-                # Fill in noise for masked positions since xt is initialized to zeros at each step
-                dummy_fill_noise = torch.randn_like(xt) * utils.expand(sigma, xt)
-                zero_atom_mask = utils.atom37_mask_from_aatype(s_hat, seq_mask)
-                dummy_fill_mask = 1 - zero_atom_mask[..., None]
-                xt = xt * zero_atom_mask[..., None] + dummy_fill_noise * dummy_fill_mask
-            else:  # backbone only
-                bb_seq = (seq_mask * residue_constants.restype_order["G"]).long()
-                bb_atom_mask = utils.atom37_mask_from_aatype(bb_seq, seq_mask)
-                xt *= bb_atom_mask[..., None]
-
-            # Enable grad for reconstruction guidance
-            if use_reconstruction_guidance:
-                torch.set_grad_enabled(True)
-                xt.requires_grad = True
-
-            # Run denoising network
-            run_mpnn = not sidechain_mode or i > begin_mpnn_step
-            x0, s_logprobs, x_self_cond, s_self_cond = self.forward(
-                noisy_coords=xt,
-                noise_level=sigma,
-                seq_mask=seq_mask,
-                residue_index=residue_index,
-                struct_self_cond=x_self_cond,
-                struct_crop_cond=crop_cond_coords,
-                seq_self_cond=s_self_cond,
-                run_mpnn_model=run_mpnn,
-            )
-
-            # Compute additional stuff for guidance
-            if use_reconstruction_guidance:
-                loss = (x0 - gt_coords).pow(2).sum(-1)
-                loss = loss * gt_cond_atom_mask
-                loss = loss.sum() / gt_cond_atom_mask.sum().clamp(min=1)
-                xt.retain_grad()
-                loss.backward()
-                guidance = xt.grad.clone()
-                xt.grad *= 0
-                torch.set_grad_enabled(False)
-            if use_classifier_free_guidance:
-                assert not use_reconstruction_guidance
-                uncond_x0, _, _, _ = self.forward(
-                    noisy_coords=xt,
-                    noise_level=sigma,
-                    seq_mask=seq_mask,
-                    residue_index=residue_index,
-                    struct_self_cond=x_self_cond,
-                    seq_self_cond=s_self_cond,
-                    run_mpnn_model=run_mpnn,
-                )
-
-            # Structure denoising step
-            if not sidechain_mode:  # backbone
-                if sigma[0] > 0:
-                    xt = ode_step(sigma, sigma_next, xt, x0, gamma)
-                else:
-                    xt = x0
-            else:  # allatom
-                # Write x0 into atom73_state_0 for atoms corresponding to old seqhat
-                atom73_state_0[mask73] = x0[mask37]
-
-                # Determine sequence resampling probability
-                if anneal_seq_resampling_rate is not None:
-                    step_time = 1 - (i - begin_mpnn_step) / max(
-                        1, n_steps - begin_mpnn_step
-                    )
-                    if anneal_seq_resampling_rate == "linear":
-                        resampling_rate = step_time
-                    elif anneal_seq_resampling_rate == "cosine":
-                        k = 2
-                        resampling_rate = (
-                            1 + np.cos(2 * np.pi * (step_time - 0.5))
-                        ) / k
-                    resample_this_step = np.random.uniform() < resampling_rate
-
-                # Resample sequence or design with full ProteinMPNN
-                if i == n_steps - 1 and use_fullmpnn_for_final:
-                    s_hat = design_with_fullmpnn(x0, seq_mask).to(x0.device)
-                elif anneal_seq_resampling_rate is None or resample_this_step:
-                    if run_mpnn and use_fullmpnn:
-                        s_hat = design_with_fullmpnn(x0, seq_mask).to(x0.device)
-                    else:
-                        s_hat = sample_aatype(s_logprobs)
-
-                # Overwrite s_hat with any conditioning information
-                if (i + 1) / n_steps <= apply_cond_proportion:
-                    if gt_cond_seq_mask is not None and gt_aatype is not None:
-                        s_hat = (
-                            1 - gt_cond_seq_mask
-                        ) * s_hat + gt_cond_seq_mask * gt_aatype
-                        s_hat = s_hat.long()
-
-                # Set masks for collapsing superposition using new sequence
-                mask37 = utils.atom37_mask_from_aatype(s_hat, seq_mask).bool()
-                mask73 = utils.atom73_mask_from_aatype(s_hat, seq_mask).bool()
-
-                # Determine prev noise levels for atoms corresponding to new sequence
-                step_sigma_prev = (
-                    torch.ones(*xt.shape[:-1]).to(xt) * sigma[..., None, None]
-                )
-                step_sigma_prev[mask37] = sigma73_last[mask73]  # b, n, 37
-                step_sigma_next = sigma_next[..., None, None]  # b, 1, 1
-
-                # Denoising step on atoms corresponding to new sequence
-                b, n = mask37.shape[:2]
-                step_xt = torch.zeros(b, n, 37, 3).to(xt)
-                step_x0 = torch.zeros(b, n, 37, 3).to(xt)
-                step_xt[mask37] = atom73_state_t[mask73]
-                step_x0[mask37] = atom73_state_0[mask73]
-
-                guidance_in = None
-                if (i + 1) / n_steps <= apply_cond_proportion:
-                    if use_reconstruction_guidance:
-                        guidance_in = (guidance, mask37.float())
-                    elif use_classifier_free_guidance:
-                        guidance_in = uncond_x0
-
-                step_xt = ode_step(
-                    step_sigma_prev,
-                    step_sigma_next,
-                    step_xt,
-                    step_x0,
-                    gamma,
-                    guidance_in=guidance_in,
-                )
-                xt = step_xt
-
-                # Write new xt into atom73_state_t for atoms corresponding to new seqhat and update sigma_last
-                atom73_state_t[mask73] = step_xt[mask37]
-                sigma73_last[mask73] = step_sigma_next[0].item()
-
-            # Replacement guidance if conditioning information provided
-            if (i + 1) / n_steps <= apply_cond_proportion:
-                if gt_coords_traj is not None:
-                    if gt_cond_atom_mask is None:
-                        xt = gt_coords_traj[i + 1]
-                    else:
-                        xt = (1 - gt_cond_atom_mask)[
-                            ..., None
-                        ] * xt + gt_cond_atom_mask[..., None] * gt_coords_traj[i + 1]
-
-            sigma = sigma_next
-
-            # Logging
-            xt_scale = self.sigma_data / utils.expand(
-                torch.sqrt(sigma_next**2 + self.sigma_data**2), xt
-            )
-            scaled_xt = xt * xt_scale
-            xt_traj.append(scaled_xt.cpu())
-            x0_traj.append(x0.cpu())
-            st_traj.append(s_hat.cpu())
-            s0_traj.append(s_logprobs.cpu())
-
-        if return_last:
-            return xt, s_hat, seq_mask
-        elif return_aux:
-            return {
-                "x": xt,
-                "s": s_hat,
-                "seq_mask": seq_mask,
-                "xt_traj": xt_traj,
-                "x0_traj": x0_traj,
-                "st_traj": st_traj,
-                "s0_traj": s0_traj,
-            }
-        else:
-            return xt_traj, x0_traj, st_traj, s0_traj, seq_mask
diff --git a/spaces/RMXK/RVC_HFF/julius/bands.py b/spaces/RMXK/RVC_HFF/julius/bands.py
deleted file mode 100644
index ef2162440b69e960770aa7bf81b9aaec48a63243..0000000000000000000000000000000000000000
--- a/spaces/RMXK/RVC_HFF/julius/bands.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
-# Author: adefossez, 2020
-"""
-Decomposition of a signal over frequency bands in the waveform domain.
-"""
-from typing import Optional, Sequence
-import torch
-
-from .core import mel_frequencies
-from .lowpass import LowPassFilters
-from .utils import simple_repr
-
-
-class SplitBands(torch.nn.Module):
-    """
-    Decomposes a signal over the given frequency bands in the waveform domain using
-    a cascade of low pass filters as implemented by `julius.lowpass.LowPassFilters`.
-    You can either specify explicitely the frequency cutoffs, or just the number of bands,
-    in which case the frequency cutoffs will be spread out evenly in mel scale.
-
-    Args:
-        sample_rate (float): Sample rate of the input signal in Hz.
-        n_bands (int or None): number of bands, when not giving them explictely with `cutoffs`.
-            In that case, the cutoff frequencies will be evenly spaced in mel-space.
-        cutoffs (list[float] or None): list of frequency cutoffs in Hz.
-        pad (bool): if True, appropriately pad the input with zero over the edge. If `stride=1`,
-            the output will have the same length as the input.
-        zeros (float): Number of zero crossings to keep. See `LowPassFilters` for more informations.
-        fft (bool or None): See `LowPassFilters` for more info.
-
-    ..note::
-        The sum of all the bands will always be the input signal.
-
-    ..warning::
-        Unlike `julius.lowpass.LowPassFilters`, the cutoffs frequencies must be provided in Hz along
-        with the sample rate.
-
-    Shape:
-
-        - Input: `[*, T]`
-        - Output: `[B, *, T']`, with `T'=T` if `pad` is True.
-            If `n_bands` was provided, `B = n_bands` otherwise `B = len(cutoffs) + 1`
-
-    >>> bands = SplitBands(sample_rate=128, n_bands=10)
-    >>> x = torch.randn(6, 4, 1024)
-    >>> list(bands(x).shape)
-    [10, 6, 4, 1024]
-    """
-
-    def __init__(self, sample_rate: float, n_bands: Optional[int] = None,
-                 cutoffs: Optional[Sequence[float]] = None, pad: bool = True,
-                 zeros: float = 8, fft: Optional[bool] = None):
-        super().__init__()
-        if (cutoffs is None) + (n_bands is None) != 1:
-            raise ValueError("You must provide either n_bands, or cutoffs, but not boths.")
-
-        self.sample_rate = sample_rate
-        self.n_bands = n_bands
-        self._cutoffs = list(cutoffs) if cutoffs is not None else None
-        self.pad = pad
-        self.zeros = zeros
-        self.fft = fft
-
-        if cutoffs is None:
-            if n_bands is None:
-                raise ValueError("You must provide one of n_bands or cutoffs.")
-            if not n_bands >= 1:
-                raise ValueError(f"n_bands must be greater than one (got {n_bands})")
-            cutoffs = mel_frequencies(n_bands + 1, 0, sample_rate / 2)[1:-1]
-        else:
-            if max(cutoffs) > 0.5 * sample_rate:
-                raise ValueError("A cutoff above sample_rate/2 does not make sense.")
-        if len(cutoffs) > 0:
-            self.lowpass = LowPassFilters(
-                [c / sample_rate for c in cutoffs], pad=pad, zeros=zeros, fft=fft)
-        else:
-            # Here I cannot make both TorchScript and MyPy happy.
-            # I miss the good old times, before all this madness was created.
-            self.lowpass = None  # type: ignore
-
-    def forward(self, input):
-        if self.lowpass is None:
-            return input[None]
-        lows = self.lowpass(input)
-        low = lows[0]
-        bands = [low]
-        for low_and_band in lows[1:]:
-            # Get a bandpass filter by substracting lowpasses
-            band = low_and_band - low
-            bands.append(band)
-            low = low_and_band
-        # Last band is whatever is left in the signal
-        bands.append(input - low)
-        return torch.stack(bands)
-
-    @property
-    def cutoffs(self):
-        if self._cutoffs is not None:
-            return self._cutoffs
-        elif self.lowpass is not None:
-            return [c * self.sample_rate for c in self.lowpass.cutoffs]
-        else:
-            return []
-
-    def __repr__(self):
-        return simple_repr(self, overrides={"cutoffs": self._cutoffs})
-
-
-def split_bands(signal: torch.Tensor, sample_rate: float, n_bands: Optional[int] = None,
-                cutoffs: Optional[Sequence[float]] = None, pad: bool = True,
-                zeros: float = 8, fft: Optional[bool] = None):
-    """
-    Functional version of `SplitBands`, refer to this class for more information.
-
-    >>> x = torch.randn(6, 4, 1024)
-    >>> list(split_bands(x, sample_rate=64, cutoffs=[12, 24]).shape)
-    [3, 6, 4, 1024]
-    """
-    return SplitBands(sample_rate, n_bands, cutoffs, pad, zeros, fft).to(signal)(signal)
diff --git a/spaces/Raghav001/Experiment/app.py b/spaces/Raghav001/Experiment/app.py
deleted file mode 100644
index 75b9acedaf6070a1f78b6492ecbe23cc038b2c9c..0000000000000000000000000000000000000000
--- a/spaces/Raghav001/Experiment/app.py
+++ /dev/null
@@ -1,247 +0,0 @@
-import requests
-import json
-import gradio as gr
-# from concurrent.futures import ThreadPoolExecutor
-import pdfplumber
-import pandas as pd
-import langchain
-import time
-from cnocr import CnOcr
-
-# from langchain.document_loaders import PyPDFLoader
-from langchain.document_loaders import UnstructuredWordDocumentLoader
-from langchain.document_loaders import UnstructuredPowerPointLoader
-# from langchain.document_loaders.image import UnstructuredImageLoader
-
-
-
-
-from sentence_transformers import SentenceTransformer, models, util
-word_embedding_model = models.Transformer('sentence-transformers/all-MiniLM-L6-v2', do_lower_case=True)
-pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='cls')
-embedder = SentenceTransformer(modules=[word_embedding_model, pooling_model])
-ocr = CnOcr()
-# chat_url = 'https://Raghav001-API.hf.space/sale'
-chat_url = 'https://Raghav001-API.hf.space/chatpdf'
-headers = {
-    'Content-Type': 'application/json',
-}
-# thread_pool_executor = ThreadPoolExecutor(max_workers=4)
-history_max_len = 500
-all_max_len = 3000
-
-
-def get_emb(text):
-    emb_url = 'https://Raghav001-API.hf.space/embeddings'
-    data = {"content": text}
-    try:
-        result = requests.post(url=emb_url,
-                               data=json.dumps(data),
-                               headers=headers
-                               )
-        return result.json()['data'][0]['embedding']
-    except Exception as e:
-        print('data', data, 'result json', result.json())
-
-
-def doc_emb(doc: str):
-    texts = doc.split('\n')
-    # futures = []
-    emb_list = embedder.encode(texts)
-    # for text in texts:
-    #     futures.append(thread_pool_executor.submit(get_emb, text))
-    # for f in futures:
-    #     emb_list.append(f.result())
-    print('\n'.join(texts))
-    gr.Textbox.update(value="")
-    return texts, emb_list, gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.Markdown.update(
-        value="""success ! Let's talk"""), gr.Chatbot.update(visible=True)
-
-
-def get_response(msg, bot, doc_text_list, doc_embeddings):
-    # future = thread_pool_executor.submit(get_emb, msg)
-    gr.Textbox.update(value="")
-    now_len = len(msg)
-    req_json = {'question': msg}
-    his_bg = -1
-    for i in range(len(bot) - 1, -1, -1):
-        if now_len + len(bot[i][0]) + len(bot[i][1]) > history_max_len:
-            break
-        now_len += len(bot[i][0]) + len(bot[i][1])
-        his_bg = i
-    req_json['history'] = [] if his_bg == -1 else bot[his_bg:]
-    # query_embedding = future.result()
-    query_embedding = embedder.encode([msg])
-    cos_scores = util.cos_sim(query_embedding, doc_embeddings)[0]
-    score_index = [[score, index] for score, index in zip(cos_scores, [i for i in range(len(cos_scores))])]
-    score_index.sort(key=lambda x: x[0], reverse=True)
-    print('score_index:\n', score_index)
-    index_set, sub_doc_list = set(), []
-    for s_i in score_index:
-        doc = doc_text_list[s_i[1]]
-        if now_len + len(doc) > all_max_len:
-            break
-        index_set.add(s_i[1])
-        now_len += len(doc)
-       # Maybe the paragraph is truncated wrong, so add the upper and lower paragraphs
-        if s_i[1] > 0 and s_i[1] -1 not in index_set:
-            doc = doc_text_list[s_i[1]-1]
-            if now_len + len(doc) > all_max_len:
-                break
-            index_set.add(s_i[1]-1)
-            now_len += len(doc)
-        if s_i[1] + 1 < len(doc_text_list) and s_i[1] + 1 not in index_set:
-            doc = doc_text_list[s_i[1]+1]
-            if now_len + len(doc) > all_max_len:
-                break
-            index_set.add(s_i[1]+1)
-            now_len += len(doc)
-
-    index_list = list(index_set)
-    index_list.sort()
-    for i in index_list:
-        sub_doc_list.append(doc_text_list[i])
-    req_json['doc'] = '' if len(sub_doc_list) == 0 else '\n'.join(sub_doc_list)
-    data = {"content": json.dumps(req_json)}
-    print('data:\n', req_json)
-    result = requests.post(url=chat_url,
-                           data=json.dumps(data),
-                           headers=headers
-                           )
-    res = result.json()['content']
-    bot.append([msg, res])
-    return bot[max(0, len(bot) - 3):]
-
-
-def up_file(fls):
-    doc_text_list = []
-
-    
-    names = []
-    print(names)
-    for i in fls:
-        names.append(str(i.name))
-
-    
-    pdf = []
-    docs = []
-    pptx = []
-
-    for i in names:
-        
-        if i[-3:] == "pdf":
-            pdf.append(i)
-        elif i[-4:] == "docx":
-            docs.append(i)
-        else:
-            pptx.append(i)
-
-
-    #Pdf Extracting
-    for idx, file in enumerate(pdf):
-        print("11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
-        #print(file.name)
-        with pdfplumber.open(file) as pdf:
-            for i in range(len(pdf.pages)):
-                # Read page i+1 of a PDF document
-                page = pdf.pages[i]
-                res_list = page.extract_text().split('\n')[:-1]
-
-                for j in range(len(page.images)):
-                   # Get the binary stream of the image
-                    img = page.images[j]
-                    file_name = '{}-{}-{}.png'.format(str(time.time()), str(i), str(j))
-                    with open(file_name, mode='wb') as f:
-                        f.write(img['stream'].get_data())
-                    try:
-                        res = ocr.ocr(file_name)
-                        # res = PyPDFLoader(file_name)
-                    except Exception as e:
-                        res = []
-                    if len(res) > 0:
-                        res_list.append(' '.join([re['text'] for re in res]))
-
-                tables = page.extract_tables()
-                for table in tables:
-                    # The first column is used as the header
-                    df = pd.DataFrame(table[1:], columns=table[0])
-                    try:
-                        records = json.loads(df.to_json(orient="records", force_ascii=False))
-                        for rec in records:
-                            res_list.append(json.dumps(rec, ensure_ascii=False))
-                    except Exception as e:
-                        res_list.append(str(df))
-
-                doc_text_list += res_list
-
-        #pptx Extracting
-    for i in pptx:
-        loader = UnstructuredPowerPointLoader(i)
-        data = loader.load()
-        # content = str(data).split("'")
-        # cnt = content[1]
-        # # c = cnt.split('\\n\\n')
-        # # final = "".join(c)
-        # c = cnt.replace('\\n\\n',"").replace("<PAGE BREAK>","").replace("\t","")
-        doc_text_list.append(data)
-
-    
-
-    #Doc Extracting
-    for i in docs:
-        loader = UnstructuredWordDocumentLoader(i)
-        data = loader.load()
-        # content = str(data).split("'")
-        # cnt = content[1]
-        # # c = cnt.split('\\n\\n')
-        # # final = "".join(c)
-        # c = cnt.replace('\\n\\n',"").replace("<PAGE BREAK>","").replace("\t","")
-        doc_text_list.append(data)
-
-    # #Image Extraction
-    # for i in jpg:
-    #     loader = UnstructuredImageLoader(i)
-    #     data = loader.load()
-    #     # content = str(data).split("'")
-    #     # cnt = content[1]
-    #     # # c = cnt.split('\\n\\n')
-    #     # # final = "".join(c)
-    #     # c = cnt.replace('\\n\\n',"").replace("<PAGE BREAK>","").replace("\t","")
-    #     doc_text_list.append(data)
-                
-    doc_text_list = [str(text).strip() for text in doc_text_list if len(str(text).strip()) > 0]
-    # print(doc_text_list)
-    return gr.Textbox.update(value='\n'.join(doc_text_list), visible=True), gr.Button.update(
-        visible=True), gr.Markdown.update(
-        value="Processing")
-
-
-
-
-
-with gr.Blocks(css=".gradio-container {background: url('file= https://th.bing.com/th/id/OIP.VixxfZq3hIYiX_DGd3knTwHaEK?pid=ImgDet&rs=1')}") as demo:
-    with gr.Row():
-        with gr.Column():
-            file = gr.File(file_types=['.pptx','.docx','.pdf'], label='Click to upload Document', file_count='multiple')
-            doc_bu = gr.Button(value='Submit', visible=False)
-
-            
-            txt = gr.Textbox(label='result', visible=False)
-            
-            
-            doc_text_state = gr.State([])
-            doc_emb_state = gr.State([])
-        with gr.Column():
-            md = gr.Markdown("Please Upload the PDF")
-            chat_bot = gr.Chatbot(visible=False)
-            msg_txt = gr.Textbox(visible = False)
-            chat_bu = gr.Button(value='Clear', visible=False)
-
-    file.change(up_file, [file], [txt, doc_bu, md]) #hiding the text
-    doc_bu.click(doc_emb, [txt], [doc_text_state, doc_emb_state, msg_txt, chat_bu, md, chat_bot])
-    msg_txt.submit(get_response, [msg_txt, chat_bot,doc_text_state, doc_emb_state], [chat_bot],queue=False)
-    chat_bu.click(lambda: None, None, chat_bot, queue=False)
-
-if __name__ == "__main__":
-    demo.queue().launch(show_api=False)
-    # demo.queue().launch(share=False, server_name='172.22.2.54', server_port=9191)
\ No newline at end of file
diff --git a/spaces/Rardilit/Rardilit-Panther_v1_test1/README.md b/spaces/Rardilit/Rardilit-Panther_v1_test1/README.md
deleted file mode 100644
index 698cd87dd2addfd81bd5346248ada6b23940b971..0000000000000000000000000000000000000000
--- a/spaces/Rardilit/Rardilit-Panther_v1_test1/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Rardilit-Panther V1 Test1
-emoji: 🌍
-colorFrom: pink
-colorTo: red
-sdk: gradio
-sdk_version: 3.32.0
-app_file: app.py
-pinned: false
-license: other
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/_spinners.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/_spinners.py
deleted file mode 100644
index d0bb1fe751677f0ee83fc6bb876ed72443fdcde7..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/_spinners.py
+++ /dev/null
@@ -1,482 +0,0 @@
-"""
-Spinners are from:
-* cli-spinners:
-    MIT License
-    Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (sindresorhus.com)
-    Permission is hereby granted, free of charge, to any person obtaining a copy
-    of this software and associated documentation files (the "Software"), to deal
-    in the Software without restriction, including without limitation the rights to
-    use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-    the Software, and to permit persons to whom the Software is furnished to do so,
-    subject to the following conditions:
-    The above copyright notice and this permission notice shall be included
-    in all copies or substantial portions of the Software.
-    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
-    INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
-    PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
-    FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-    ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-    IN THE SOFTWARE.
-"""
-
-SPINNERS = {
-    "dots": {
-        "interval": 80,
-        "frames": "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏",
-    },
-    "dots2": {"interval": 80, "frames": "⣾⣽⣻⢿⡿⣟⣯⣷"},
-    "dots3": {
-        "interval": 80,
-        "frames": "⠋⠙⠚⠞⠖⠦⠴⠲⠳⠓",
-    },
-    "dots4": {
-        "interval": 80,
-        "frames": "⠄⠆⠇⠋⠙⠸⠰⠠⠰⠸⠙⠋⠇⠆",
-    },
-    "dots5": {
-        "interval": 80,
-        "frames": "⠋⠙⠚⠒⠂⠂⠒⠲⠴⠦⠖⠒⠐⠐⠒⠓⠋",
-    },
-    "dots6": {
-        "interval": 80,
-        "frames": "⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠴⠲⠒⠂⠂⠒⠚⠙⠉⠁",
-    },
-    "dots7": {
-        "interval": 80,
-        "frames": "⠈⠉⠋⠓⠒⠐⠐⠒⠖⠦⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈",
-    },
-    "dots8": {
-        "interval": 80,
-        "frames": "⠁⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈⠈",
-    },
-    "dots9": {"interval": 80, "frames": "⢹⢺⢼⣸⣇⡧⡗⡏"},
-    "dots10": {"interval": 80, "frames": "⢄⢂⢁⡁⡈⡐⡠"},
-    "dots11": {"interval": 100, "frames": "⠁⠂⠄⡀⢀⠠⠐⠈"},
-    "dots12": {
-        "interval": 80,
-        "frames": [
-            "⢀⠀",
-            "⡀⠀",
-            "⠄⠀",
-            "⢂⠀",
-            "⡂⠀",
-            "⠅⠀",
-            "⢃⠀",
-            "⡃⠀",
-            "⠍⠀",
-            "⢋⠀",
-            "⡋⠀",
-            "⠍⠁",
-            "⢋⠁",
-            "⡋⠁",
-            "⠍⠉",
-            "⠋⠉",
-            "⠋⠉",
-            "⠉⠙",
-            "⠉⠙",
-            "⠉⠩",
-            "⠈⢙",
-            "⠈⡙",
-            "⢈⠩",
-            "⡀⢙",
-            "⠄⡙",
-            "⢂⠩",
-            "⡂⢘",
-            "⠅⡘",
-            "⢃⠨",
-            "⡃⢐",
-            "⠍⡐",
-            "⢋⠠",
-            "⡋⢀",
-            "⠍⡁",
-            "⢋⠁",
-            "⡋⠁",
-            "⠍⠉",
-            "⠋⠉",
-            "⠋⠉",
-            "⠉⠙",
-            "⠉⠙",
-            "⠉⠩",
-            "⠈⢙",
-            "⠈⡙",
-            "⠈⠩",
-            "⠀⢙",
-            "⠀⡙",
-            "⠀⠩",
-            "⠀⢘",
-            "⠀⡘",
-            "⠀⠨",
-            "⠀⢐",
-            "⠀⡐",
-            "⠀⠠",
-            "⠀⢀",
-            "⠀⡀",
-        ],
-    },
-    "dots8Bit": {
-        "interval": 80,
-        "frames": "⠀⠁⠂⠃⠄⠅⠆⠇⡀⡁⡂⡃⡄⡅⡆⡇⠈⠉⠊⠋⠌⠍⠎⠏⡈⡉⡊⡋⡌⡍⡎⡏⠐⠑⠒⠓⠔⠕⠖⠗⡐⡑⡒⡓⡔⡕⡖⡗⠘⠙⠚⠛⠜⠝⠞⠟⡘⡙"
-        "⡚⡛⡜⡝⡞⡟⠠⠡⠢⠣⠤⠥⠦⠧⡠⡡⡢⡣⡤⡥⡦⡧⠨⠩⠪⠫⠬⠭⠮⠯⡨⡩⡪⡫⡬⡭⡮⡯⠰⠱⠲⠳⠴⠵⠶⠷⡰⡱⡲⡳⡴⡵⡶⡷⠸⠹⠺⠻"
-        "⠼⠽⠾⠿⡸⡹⡺⡻⡼⡽⡾⡿⢀⢁⢂⢃⢄⢅⢆⢇⣀⣁⣂⣃⣄⣅⣆⣇⢈⢉⢊⢋⢌⢍⢎⢏⣈⣉⣊⣋⣌⣍⣎⣏⢐⢑⢒⢓⢔⢕⢖⢗⣐⣑⣒⣓⣔⣕"
-        "⣖⣗⢘⢙⢚⢛⢜⢝⢞⢟⣘⣙⣚⣛⣜⣝⣞⣟⢠⢡⢢⢣⢤⢥⢦⢧⣠⣡⣢⣣⣤⣥⣦⣧⢨⢩⢪⢫⢬⢭⢮⢯⣨⣩⣪⣫⣬⣭⣮⣯⢰⢱⢲⢳⢴⢵⢶⢷"
-        "⣰⣱⣲⣳⣴⣵⣶⣷⢸⢹⢺⢻⢼⢽⢾⢿⣸⣹⣺⣻⣼⣽⣾⣿",
-    },
-    "line": {"interval": 130, "frames": ["-", "\\", "|", "/"]},
-    "line2": {"interval": 100, "frames": "⠂-–—–-"},
-    "pipe": {"interval": 100, "frames": "┤┘┴└├┌┬┐"},
-    "simpleDots": {"interval": 400, "frames": [".  ", ".. ", "...", "   "]},
-    "simpleDotsScrolling": {
-        "interval": 200,
-        "frames": [".  ", ".. ", "...", " ..", "  .", "   "],
-    },
-    "star": {"interval": 70, "frames": "✶✸✹✺✹✷"},
-    "star2": {"interval": 80, "frames": "+x*"},
-    "flip": {
-        "interval": 70,
-        "frames": "___-``'´-___",
-    },
-    "hamburger": {"interval": 100, "frames": "☱☲☴"},
-    "growVertical": {
-        "interval": 120,
-        "frames": "▁▃▄▅▆▇▆▅▄▃",
-    },
-    "growHorizontal": {
-        "interval": 120,
-        "frames": "▏▎▍▌▋▊▉▊▋▌▍▎",
-    },
-    "balloon": {"interval": 140, "frames": " .oO@* "},
-    "balloon2": {"interval": 120, "frames": ".oO°Oo."},
-    "noise": {"interval": 100, "frames": "▓▒░"},
-    "bounce": {"interval": 120, "frames": "⠁⠂⠄⠂"},
-    "boxBounce": {"interval": 120, "frames": "▖▘▝▗"},
-    "boxBounce2": {"interval": 100, "frames": "▌▀▐▄"},
-    "triangle": {"interval": 50, "frames": "◢◣◤◥"},
-    "arc": {"interval": 100, "frames": "◜◠◝◞◡◟"},
-    "circle": {"interval": 120, "frames": "◡⊙◠"},
-    "squareCorners": {"interval": 180, "frames": "◰◳◲◱"},
-    "circleQuarters": {"interval": 120, "frames": "◴◷◶◵"},
-    "circleHalves": {"interval": 50, "frames": "◐◓◑◒"},
-    "squish": {"interval": 100, "frames": "╫╪"},
-    "toggle": {"interval": 250, "frames": "⊶⊷"},
-    "toggle2": {"interval": 80, "frames": "▫▪"},
-    "toggle3": {"interval": 120, "frames": "□■"},
-    "toggle4": {"interval": 100, "frames": "■□▪▫"},
-    "toggle5": {"interval": 100, "frames": "▮▯"},
-    "toggle6": {"interval": 300, "frames": "ဝ၀"},
-    "toggle7": {"interval": 80, "frames": "⦾⦿"},
-    "toggle8": {"interval": 100, "frames": "◍◌"},
-    "toggle9": {"interval": 100, "frames": "◉◎"},
-    "toggle10": {"interval": 100, "frames": "㊂㊀㊁"},
-    "toggle11": {"interval": 50, "frames": "⧇⧆"},
-    "toggle12": {"interval": 120, "frames": "☗☖"},
-    "toggle13": {"interval": 80, "frames": "=*-"},
-    "arrow": {"interval": 100, "frames": "←↖↑↗→↘↓↙"},
-    "arrow2": {
-        "interval": 80,
-        "frames": ["⬆️ ", "↗️ ", "➡️ ", "↘️ ", "⬇️ ", "↙️ ", "⬅️ ", "↖️ "],
-    },
-    "arrow3": {
-        "interval": 120,
-        "frames": ["▹▹▹▹▹", "▸▹▹▹▹", "▹▸▹▹▹", "▹▹▸▹▹", "▹▹▹▸▹", "▹▹▹▹▸"],
-    },
-    "bouncingBar": {
-        "interval": 80,
-        "frames": [
-            "[    ]",
-            "[=   ]",
-            "[==  ]",
-            "[=== ]",
-            "[ ===]",
-            "[  ==]",
-            "[   =]",
-            "[    ]",
-            "[   =]",
-            "[  ==]",
-            "[ ===]",
-            "[====]",
-            "[=== ]",
-            "[==  ]",
-            "[=   ]",
-        ],
-    },
-    "bouncingBall": {
-        "interval": 80,
-        "frames": [
-            "( ●    )",
-            "(  ●   )",
-            "(   ●  )",
-            "(    ● )",
-            "(     ●)",
-            "(    ● )",
-            "(   ●  )",
-            "(  ●   )",
-            "( ●    )",
-            "(●     )",
-        ],
-    },
-    "smiley": {"interval": 200, "frames": ["😄 ", "😝 "]},
-    "monkey": {"interval": 300, "frames": ["🙈 ", "🙈 ", "🙉 ", "🙊 "]},
-    "hearts": {"interval": 100, "frames": ["💛 ", "💙 ", "💜 ", "💚 ", "❤️ "]},
-    "clock": {
-        "interval": 100,
-        "frames": [
-            "🕛 ",
-            "🕐 ",
-            "🕑 ",
-            "🕒 ",
-            "🕓 ",
-            "🕔 ",
-            "🕕 ",
-            "🕖 ",
-            "🕗 ",
-            "🕘 ",
-            "🕙 ",
-            "🕚 ",
-        ],
-    },
-    "earth": {"interval": 180, "frames": ["🌍 ", "🌎 ", "🌏 "]},
-    "material": {
-        "interval": 17,
-        "frames": [
-            "█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
-            "██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
-            "███▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
-            "████▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
-            "██████▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
-            "██████▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
-            "███████▁▁▁▁▁▁▁▁▁▁▁▁▁",
-            "████████▁▁▁▁▁▁▁▁▁▁▁▁",
-            "█████████▁▁▁▁▁▁▁▁▁▁▁",
-            "█████████▁▁▁▁▁▁▁▁▁▁▁",
-            "██████████▁▁▁▁▁▁▁▁▁▁",
-            "███████████▁▁▁▁▁▁▁▁▁",
-            "█████████████▁▁▁▁▁▁▁",
-            "██████████████▁▁▁▁▁▁",
-            "██████████████▁▁▁▁▁▁",
-            "▁██████████████▁▁▁▁▁",
-            "▁██████████████▁▁▁▁▁",
-            "▁██████████████▁▁▁▁▁",
-            "▁▁██████████████▁▁▁▁",
-            "▁▁▁██████████████▁▁▁",
-            "▁▁▁▁█████████████▁▁▁",
-            "▁▁▁▁██████████████▁▁",
-            "▁▁▁▁██████████████▁▁",
-            "▁▁▁▁▁██████████████▁",
-            "▁▁▁▁▁██████████████▁",
-            "▁▁▁▁▁██████████████▁",
-            "▁▁▁▁▁▁██████████████",
-            "▁▁▁▁▁▁██████████████",
-            "▁▁▁▁▁▁▁█████████████",
-            "▁▁▁▁▁▁▁█████████████",
-            "▁▁▁▁▁▁▁▁████████████",
-            "▁▁▁▁▁▁▁▁████████████",
-            "▁▁▁▁▁▁▁▁▁███████████",
-            "▁▁▁▁▁▁▁▁▁███████████",
-            "▁▁▁▁▁▁▁▁▁▁██████████",
-            "▁▁▁▁▁▁▁▁▁▁██████████",
-            "▁▁▁▁▁▁▁▁▁▁▁▁████████",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁███████",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁██████",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████",
-            "█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████",
-            "██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
-            "██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
-            "███▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
-            "████▁▁▁▁▁▁▁▁▁▁▁▁▁▁██",
-            "█████▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
-            "█████▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
-            "██████▁▁▁▁▁▁▁▁▁▁▁▁▁█",
-            "████████▁▁▁▁▁▁▁▁▁▁▁▁",
-            "█████████▁▁▁▁▁▁▁▁▁▁▁",
-            "█████████▁▁▁▁▁▁▁▁▁▁▁",
-            "█████████▁▁▁▁▁▁▁▁▁▁▁",
-            "█████████▁▁▁▁▁▁▁▁▁▁▁",
-            "███████████▁▁▁▁▁▁▁▁▁",
-            "████████████▁▁▁▁▁▁▁▁",
-            "████████████▁▁▁▁▁▁▁▁",
-            "██████████████▁▁▁▁▁▁",
-            "██████████████▁▁▁▁▁▁",
-            "▁██████████████▁▁▁▁▁",
-            "▁██████████████▁▁▁▁▁",
-            "▁▁▁█████████████▁▁▁▁",
-            "▁▁▁▁▁████████████▁▁▁",
-            "▁▁▁▁▁████████████▁▁▁",
-            "▁▁▁▁▁▁███████████▁▁▁",
-            "▁▁▁▁▁▁▁▁█████████▁▁▁",
-            "▁▁▁▁▁▁▁▁█████████▁▁▁",
-            "▁▁▁▁▁▁▁▁▁█████████▁▁",
-            "▁▁▁▁▁▁▁▁▁█████████▁▁",
-            "▁▁▁▁▁▁▁▁▁▁█████████▁",
-            "▁▁▁▁▁▁▁▁▁▁▁████████▁",
-            "▁▁▁▁▁▁▁▁▁▁▁████████▁",
-            "▁▁▁▁▁▁▁▁▁▁▁▁███████▁",
-            "▁▁▁▁▁▁▁▁▁▁▁▁███████▁",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁███████",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁███████",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
-            "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
-        ],
-    },
-    "moon": {
-        "interval": 80,
-        "frames": ["🌑 ", "🌒 ", "🌓 ", "🌔 ", "🌕 ", "🌖 ", "🌗 ", "🌘 "],
-    },
-    "runner": {"interval": 140, "frames": ["🚶 ", "🏃 "]},
-    "pong": {
-        "interval": 80,
-        "frames": [
-            "▐⠂       ▌",
-            "▐⠈       ▌",
-            "▐ ⠂      ▌",
-            "▐ ⠠      ▌",
-            "▐  ⡀     ▌",
-            "▐  ⠠     ▌",
-            "▐   ⠂    ▌",
-            "▐   ⠈    ▌",
-            "▐    ⠂   ▌",
-            "▐    ⠠   ▌",
-            "▐     ⡀  ▌",
-            "▐     ⠠  ▌",
-            "▐      ⠂ ▌",
-            "▐      ⠈ ▌",
-            "▐       ⠂▌",
-            "▐       ⠠▌",
-            "▐       ⡀▌",
-            "▐      ⠠ ▌",
-            "▐      ⠂ ▌",
-            "▐     ⠈  ▌",
-            "▐     ⠂  ▌",
-            "▐    ⠠   ▌",
-            "▐    ⡀   ▌",
-            "▐   ⠠    ▌",
-            "▐   ⠂    ▌",
-            "▐  ⠈     ▌",
-            "▐  ⠂     ▌",
-            "▐ ⠠      ▌",
-            "▐ ⡀      ▌",
-            "▐⠠       ▌",
-        ],
-    },
-    "shark": {
-        "interval": 120,
-        "frames": [
-            "▐|\\____________▌",
-            "▐_|\\___________▌",
-            "▐__|\\__________▌",
-            "▐___|\\_________▌",
-            "▐____|\\________▌",
-            "▐_____|\\_______▌",
-            "▐______|\\______▌",
-            "▐_______|\\_____▌",
-            "▐________|\\____▌",
-            "▐_________|\\___▌",
-            "▐__________|\\__▌",
-            "▐___________|\\_▌",
-            "▐____________|\\▌",
-            "▐____________/|▌",
-            "▐___________/|_▌",
-            "▐__________/|__▌",
-            "▐_________/|___▌",
-            "▐________/|____▌",
-            "▐_______/|_____▌",
-            "▐______/|______▌",
-            "▐_____/|_______▌",
-            "▐____/|________▌",
-            "▐___/|_________▌",
-            "▐__/|__________▌",
-            "▐_/|___________▌",
-            "▐/|____________▌",
-        ],
-    },
-    "dqpb": {"interval": 100, "frames": "dqpb"},
-    "weather": {
-        "interval": 100,
-        "frames": [
-            "☀️ ",
-            "☀️ ",
-            "☀️ ",
-            "🌤 ",
-            "⛅️ ",
-            "🌥 ",
-            "☁️ ",
-            "🌧 ",
-            "🌨 ",
-            "🌧 ",
-            "🌨 ",
-            "🌧 ",
-            "🌨 ",
-            "⛈ ",
-            "🌨 ",
-            "🌧 ",
-            "🌨 ",
-            "☁️ ",
-            "🌥 ",
-            "⛅️ ",
-            "🌤 ",
-            "☀️ ",
-            "☀️ ",
-        ],
-    },
-    "christmas": {"interval": 400, "frames": "🌲🎄"},
-    "grenade": {
-        "interval": 80,
-        "frames": [
-            "،   ",
-            "′   ",
-            " ´ ",
-            " ‾ ",
-            "  ⸌",
-            "  ⸊",
-            "  |",
-            "  ⁎",
-            "  ⁕",
-            " ෴ ",
-            "  ⁓",
-            "   ",
-            "   ",
-            "   ",
-        ],
-    },
-    "point": {"interval": 125, "frames": ["∙∙∙", "●∙∙", "∙●∙", "∙∙●", "∙∙∙"]},
-    "layer": {"interval": 150, "frames": "-=≡"},
-    "betaWave": {
-        "interval": 80,
-        "frames": [
-            "ρββββββ",
-            "βρβββββ",
-            "ββρββββ",
-            "βββρβββ",
-            "ββββρββ",
-            "βββββρβ",
-            "ββββββρ",
-        ],
-    },
-    "aesthetic": {
-        "interval": 80,
-        "frames": [
-            "▰▱▱▱▱▱▱",
-            "▰▰▱▱▱▱▱",
-            "▰▰▰▱▱▱▱",
-            "▰▰▰▰▱▱▱",
-            "▰▰▰▰▰▱▱",
-            "▰▰▰▰▰▰▱",
-            "▰▰▰▰▰▰▰",
-            "▰▱▱▱▱▱▱",
-        ],
-    },
-}
diff --git a/spaces/Rbrq/DeticChatGPT/tools/preprocess_imagenet22k.py b/spaces/Rbrq/DeticChatGPT/tools/preprocess_imagenet22k.py
deleted file mode 100644
index 6dda56c222a30c7be23fafbdab4be3fe611597e2..0000000000000000000000000000000000000000
--- a/spaces/Rbrq/DeticChatGPT/tools/preprocess_imagenet22k.py
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import os
-import numpy as np
-import sys
-
-sys.path.insert(0, 'third_party/CenterNet2/projects/CenterNet2/')
-sys.path.insert(0, 'third_party/Deformable-DETR')
-from detic.data.tar_dataset import _TarDataset, DiskTarDataset
-import pickle
-import io
-import gzip
-import time
-
-
-class _RawTarDataset(object):
-
-    def __init__(self, filename, indexname, preload=False):
-        self.filename = filename
-        self.names = []
-        self.offsets = []
-
-        for l in open(indexname):
-            ll = l.split()
-            a, b, c = ll[:3]
-            offset = int(b[:-1])
-            if l.endswith('** Block of NULs **\n'):
-                self.offsets.append(offset)
-                break
-            else:
-                if c.endswith('JPEG'):
-                    self.names.append(c)
-                    self.offsets.append(offset)
-                else:
-                    # ignore directories
-                    pass
-        if preload:
-            self.data = np.memmap(filename, mode='r', dtype='uint8')
-        else:
-            self.data = None
-
-    def __len__(self):
-        return len(self.names)
-
-    def __getitem__(self, idx):
-        if self.data is None:
-            self.data = np.memmap(self.filename, mode='r', dtype='uint8')
-        ofs = self.offsets[idx] * 512
-        fsize = 512 * (self.offsets[idx + 1] - self.offsets[idx])
-        data = self.data[ofs:ofs + fsize]
-
-        if data[:13].tostring() == '././@LongLink':
-            data = data[3 * 512:]
-        else:
-            data = data[512:]
-
-        # just to make it more fun a few JPEGs are GZIP compressed...
-        # catch this case
-        if tuple(data[:2]) == (0x1f, 0x8b):
-            s = io.StringIO(data.tostring())
-            g = gzip.GzipFile(None, 'r', 0, s)
-            sdata = g.read()
-        else:
-            sdata = data.tostring()
-        return sdata
-
-
-
-def preprocess():
-    # Follow https://github.com/Alibaba-MIIL/ImageNet21K/blob/main/dataset_preprocessing/processing_script.sh
-    # Expect 12358684 samples with 11221 classes
-    # ImageNet folder has 21841 classes (synsets)
-
-    i22kdir = '/datasets01/imagenet-22k/062717/'
-    i22ktarlogs = '/checkpoint/imisra/datasets/imagenet-22k/tarindex'
-    class_names_file = '/checkpoint/imisra/datasets/imagenet-22k/words.txt'
-
-    output_dir = '/checkpoint/zhouxy/Datasets/ImageNet/metadata-22k/'
-    i22knpytarlogs = '/checkpoint/zhouxy/Datasets/ImageNet/metadata-22k/tarindex_npy'
-    print('Listing dir')
-    log_files = os.listdir(i22ktarlogs)
-    log_files = [x for x in log_files if x.endswith(".tarlog")]
-    log_files.sort()
-    chunk_datasets = []
-    dataset_lens = []
-    min_count = 0
-    create_npy_tarlogs = True
-    print('Creating folders')
-    if create_npy_tarlogs:
-        os.makedirs(i22knpytarlogs, exist_ok=True)
-        for log_file in log_files:
-            syn = log_file.replace(".tarlog", "")
-            dataset = _RawTarDataset(os.path.join(i22kdir, syn + ".tar"),
-                                    os.path.join(i22ktarlogs, syn + ".tarlog"),
-                                    preload=False)
-            names = np.array(dataset.names)
-            offsets = np.array(dataset.offsets, dtype=np.int64)
-            np.save(os.path.join(i22knpytarlogs, f"{syn}_names.npy"), names)
-            np.save(os.path.join(i22knpytarlogs, f"{syn}_offsets.npy"), offsets)
-
-    os.makedirs(output_dir, exist_ok=True)
-
-    start_time = time.time()
-    for log_file in log_files:
-        syn = log_file.replace(".tarlog", "")
-        dataset = _TarDataset(os.path.join(i22kdir, syn + ".tar"), i22knpytarlogs)
-        # dataset = _RawTarDataset(os.path.join(i22kdir, syn + ".tar"),
-        #                             os.path.join(i22ktarlogs, syn + ".tarlog"),
-        #                             preload=False)
-        dataset_lens.append(len(dataset))
-    end_time = time.time()
-    print(f"Time {end_time - start_time}")
-
-
-    dataset_lens = np.array(dataset_lens)
-    dataset_valid = dataset_lens > min_count
-
-    syn2class = {}
-    with open(class_names_file) as fh:
-        for line in fh:
-            line = line.strip().split("\t")
-            syn2class[line[0]] = line[1]
-
-    tarlog_files = []
-    class_names = []
-    tar_files = []
-    for k in range(len(dataset_valid)):
-        if not dataset_valid[k]:
-            continue
-        syn = log_files[k].replace(".tarlog", "")
-        tarlog_files.append(os.path.join(i22ktarlogs, syn + ".tarlog"))
-        tar_files.append(os.path.join(i22kdir, syn + ".tar"))
-        class_names.append(syn2class[syn])
-
-    tarlog_files = np.array(tarlog_files)
-    tar_files = np.array(tar_files)
-    class_names = np.array(class_names)
-    print(f"Have {len(class_names)} classes and {dataset_lens[dataset_valid].sum()} samples")
-
-    np.save(os.path.join(output_dir, "tarlog_files.npy"), tarlog_files)
-    np.save(os.path.join(output_dir, "tar_files.npy"), tar_files)
-    np.save(os.path.join(output_dir, "class_names.npy"), class_names)
-    np.save(os.path.join(output_dir, "tar_files.npy"), tar_files)
-
-
-if __name__ == "__main__":
-    preprocess()
diff --git a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/demo/demo.py b/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/demo/demo.py
deleted file mode 100644
index dceb13523faec756063b40fd586bcd81f483e274..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/demo/demo.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import os
-import sys
-
-ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
-sys.path.insert(0, ROOT_DIR)
-
-from src.ASpanFormer.aspanformer import ASpanFormer
-from src.config.default import get_cfg_defaults
-from src.utils.misc import lower_config
-import demo_utils
-
-import cv2
-import torch
-import numpy as np
-
-import argparse
-
-parser = argparse.ArgumentParser()
-parser.add_argument(
-    "--config_path",
-    type=str,
-    default="../configs/aspan/outdoor/aspan_test.py",
-    help="path for config file.",
-)
-parser.add_argument(
-    "--img0_path",
-    type=str,
-    default="../assets/phototourism_sample_images/piazza_san_marco_06795901_3725050516.jpg",
-    help="path for image0.",
-)
-parser.add_argument(
-    "--img1_path",
-    type=str,
-    default="../assets/phototourism_sample_images/piazza_san_marco_15148634_5228701572.jpg",
-    help="path for image1.",
-)
-parser.add_argument(
-    "--weights_path",
-    type=str,
-    default="../weights/outdoor.ckpt",
-    help="path for model weights.",
-)
-parser.add_argument(
-    "--long_dim0", type=int, default=1024, help="resize for longest dim of image0."
-)
-parser.add_argument(
-    "--long_dim1", type=int, default=1024, help="resize for longest dim of image1."
-)
-
-args = parser.parse_args()
-
-
-if __name__ == "__main__":
-    config = get_cfg_defaults()
-    config.merge_from_file(args.config_path)
-    _config = lower_config(config)
-    matcher = ASpanFormer(config=_config["aspan"])
-    state_dict = torch.load(args.weights_path, map_location="cpu")["state_dict"]
-    matcher.load_state_dict(state_dict, strict=False)
-    matcher.cuda(), matcher.eval()
-
-    img0, img1 = cv2.imread(args.img0_path), cv2.imread(args.img1_path)
-    img0_g, img1_g = cv2.imread(args.img0_path, 0), cv2.imread(args.img1_path, 0)
-    img0, img1 = demo_utils.resize(img0, args.long_dim0), demo_utils.resize(
-        img1, args.long_dim1
-    )
-    img0_g, img1_g = demo_utils.resize(img0_g, args.long_dim0), demo_utils.resize(
-        img1_g, args.long_dim1
-    )
-    data = {
-        "image0": torch.from_numpy(img0_g / 255.0)[None, None].cuda().float(),
-        "image1": torch.from_numpy(img1_g / 255.0)[None, None].cuda().float(),
-    }
-    with torch.no_grad():
-        matcher(data, online_resize=True)
-        corr0, corr1 = data["mkpts0_f"].cpu().numpy(), data["mkpts1_f"].cpu().numpy()
-
-    F_hat, mask_F = cv2.findFundamentalMat(
-        corr0, corr1, method=cv2.FM_RANSAC, ransacReprojThreshold=1
-    )
-    if mask_F is not None:
-        mask_F = mask_F[:, 0].astype(bool)
-    else:
-        mask_F = np.zeros_like(corr0[:, 0]).astype(bool)
-
-    # visualize match
-    display = demo_utils.draw_match(img0, img1, corr0, corr1)
-    display_ransac = demo_utils.draw_match(img0, img1, corr0[mask_F], corr1[mask_F])
-    cv2.imwrite("match.png", display)
-    cv2.imwrite("match_ransac.png", display_ransac)
-    print(len(corr1), len(corr1[mask_F]))
diff --git a/spaces/Redgon/bingo/src/lib/hooks/use-bing.ts b/spaces/Redgon/bingo/src/lib/hooks/use-bing.ts
deleted file mode 100644
index dcdb1667ced0cba299b0825c0e91c4732411308c..0000000000000000000000000000000000000000
--- a/spaces/Redgon/bingo/src/lib/hooks/use-bing.ts
+++ /dev/null
@@ -1,173 +0,0 @@
-'use client'
-
-import { useState, useCallback, useEffect, useMemo } from 'react'
-import { useAtom, useAtomValue } from 'jotai'
-import { chatFamily, bingConversationStyleAtom, GreetMessages, hashAtom, voiceAtom } from '@/state'
-import { setConversationMessages } from './chat-history'
-import { ChatMessageModel, BotId, FileItem } from '@/lib/bots/bing/types'
-import { nanoid } from '../utils'
-import { TTS } from '../bots/bing/tts'
-
-export function useBing(botId: BotId = 'bing') {
-  const chatAtom = useMemo(() => chatFamily({ botId, page: 'singleton' }), [botId])
-  const [enableTTS] = useAtom(voiceAtom)
-  const speaker = useMemo(() => new TTS(), [])
-  const [hash, setHash] = useAtom(hashAtom)
-  const bingConversationStyle = useAtomValue(bingConversationStyleAtom)
-  const [chatState, setChatState] = useAtom(chatAtom)
-  const [input, setInput] = useState('')
-  const [attachmentList, setAttachmentList] = useState<FileItem[]>([])
-
-  const updateMessage = useCallback(
-    (messageId: string, updater: (message: ChatMessageModel) => void) => {
-      setChatState((draft) => {
-        const message = draft.messages.find((m) => m.id === messageId)
-        if (message) {
-          updater(message)
-        }
-      })
-    },
-    [setChatState],
-  )
-
-  const sendMessage = useCallback(
-    async (input: string, options = {}) => {
-      const botMessageId = nanoid()
-      const imageUrl = attachmentList?.[0]?.status === 'loaded' ? attachmentList[0].url : undefined
-      setChatState((draft) => {
-        const text = imageUrl ? `${input}\n\n![image](${imageUrl})` : input
-        draft.messages.push({ id: nanoid(), text, author: 'user' }, { id: botMessageId, text: '', author: 'bot' })
-        setAttachmentList([])
-      })
-      const abortController = new AbortController()
-      setChatState((draft) => {
-        draft.generatingMessageId = botMessageId
-        draft.abortController = abortController
-      })
-      speaker.reset()
-      await chatState.bot.sendMessage({
-        prompt: input,
-        imageUrl: /\?bcid=([^&]+)/.test(imageUrl ?? '') ? `https://www.bing.com/images/blob?bcid=${RegExp.$1}` : imageUrl,
-        options: {
-          ...options,
-          bingConversationStyle,
-        },
-        signal: abortController.signal,
-        onEvent(event) {
-          if (event.type === 'UPDATE_ANSWER') {
-            updateMessage(botMessageId, (message) => {
-              if (event.data.text.length > message.text.length) {
-                message.text = event.data.text
-              }
-
-              if (event.data.spokenText && enableTTS) {
-                speaker.speak(event.data.spokenText)
-              }
-
-              message.throttling = event.data.throttling || message.throttling
-              message.sourceAttributions = event.data.sourceAttributions || message.sourceAttributions
-              message.suggestedResponses = event.data.suggestedResponses || message.suggestedResponses
-            })
-          } else if (event.type === 'ERROR') {
-            updateMessage(botMessageId, (message) => {
-              message.error = event.error
-            })
-            setChatState((draft) => {
-              draft.abortController = undefined
-              draft.generatingMessageId = ''
-            })
-          } else if (event.type === 'DONE') {
-            setChatState((draft) => {
-              draft.abortController = undefined
-              draft.generatingMessageId = ''
-            })
-          }
-        },
-      })
-    },
-    [botId, attachmentList, chatState.bot, setChatState, updateMessage],
-  )
-
-  const uploadImage = useCallback(async (imgUrl: string) => {
-    setAttachmentList([{ url: imgUrl, status: 'loading' }])
-    const response = await chatState.bot.uploadImage(imgUrl, bingConversationStyle)
-    if (response?.blobId) {
-      setAttachmentList([{ url: `/api/blob?bcid=${response.blobId}`, status: 'loaded' }])
-    } else {
-      setAttachmentList([{ url: imgUrl, status: 'error' }])
-    }
-  }, [chatState.bot])
-
-  const resetConversation = useCallback(() => {
-    chatState.bot.resetConversation()
-    speaker.abort()
-    setChatState((draft) => {
-      draft.abortController = undefined
-      draft.generatingMessageId = ''
-      draft.messages = [{ author: 'bot', text: GreetMessages[Math.floor(GreetMessages.length * Math.random())], id: nanoid() }]
-      draft.conversationId = nanoid()
-    })
-  }, [chatState.bot, setChatState])
-
-  const stopGenerating = useCallback(() => {
-    chatState.abortController?.abort()
-    if (chatState.generatingMessageId) {
-      updateMessage(chatState.generatingMessageId, (message) => {
-        if (!message.text && !message.error) {
-          message.text = 'Cancelled'
-        }
-      })
-    }
-    setChatState((draft) => {
-      draft.generatingMessageId = ''
-    })
-  }, [chatState.abortController, chatState.generatingMessageId, setChatState, updateMessage])
-
-  useEffect(() => {
-    if (chatState.messages.length) {
-      setConversationMessages(botId, chatState.conversationId, chatState.messages)
-    }
-  }, [botId, chatState.conversationId, chatState.messages])
-
-  useEffect(() => {
-    if (hash === 'reset') {
-      resetConversation()
-      setHash('')
-    }
-  }, [hash, setHash])
-
-  const chat = useMemo(
-    () => ({
-      botId,
-      bot: chatState.bot,
-      isSpeaking: speaker.isSpeaking,
-      messages: chatState.messages,
-      sendMessage,
-      setInput,
-      input,
-      resetConversation,
-      generating: !!chatState.generatingMessageId,
-      stopGenerating,
-      uploadImage,
-      setAttachmentList,
-      attachmentList,
-    }),
-    [
-      botId,
-      bingConversationStyle,
-      chatState.bot,
-      chatState.generatingMessageId,
-      chatState.messages,
-      speaker.isSpeaking,
-      setInput,
-      input,
-      setAttachmentList,
-      attachmentList,
-      resetConversation,
-      sendMessage,
-      stopGenerating,
-    ],
-  )
-
-  return chat
-}
diff --git a/spaces/Renxd/devast/app.py b/spaces/Renxd/devast/app.py
deleted file mode 100644
index a4491fa68b763a8a344f905b856e79f8ff7aabf7..0000000000000000000000000000000000000000
--- a/spaces/Renxd/devast/app.py
+++ /dev/null
@@ -1,4 +0,0 @@
-import streamlit as st
-
-x = st.slider('Select a value')
-st.write(x, 'squared is', x * x)
\ No newline at end of file
diff --git a/spaces/Robert001/UniControl-Demo/annotator/openpose/util.py b/spaces/Robert001/UniControl-Demo/annotator/openpose/util.py
deleted file mode 100644
index 0a144ff7f2f6f0a2d544786cbe37fbd00b5cbd7d..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/openpose/util.py
+++ /dev/null
@@ -1,175 +0,0 @@
-'''
- * Copyright (c) 2023 Salesforce, Inc.
- * All rights reserved.
- * SPDX-License-Identifier: Apache License 2.0
- * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
- * By Can Qin
- * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
- * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
-'''
-
-
-import math
-import numpy as np
-import matplotlib
-import cv2
-
-
-def padRightDownCorner(img, stride, padValue):
-    h = img.shape[0]
-    w = img.shape[1]
-
-    pad = 4 * [None]
-    pad[0] = 0 # up
-    pad[1] = 0 # left
-    pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
-    pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
-
-    img_padded = img
-    pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
-    img_padded = np.concatenate((pad_up, img_padded), axis=0)
-    pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
-    img_padded = np.concatenate((pad_left, img_padded), axis=1)
-    pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
-    img_padded = np.concatenate((img_padded, pad_down), axis=0)
-    pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
-    img_padded = np.concatenate((img_padded, pad_right), axis=1)
-
-    return img_padded, pad
-
-# transfer caffe model to pytorch which will match the layer name
-def transfer(model, model_weights):
-    transfered_model_weights = {}
-    for weights_name in model.state_dict().keys():
-        transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
-    return transfered_model_weights
-
-# draw the body keypoint and lims
-def draw_bodypose(canvas, candidate, subset):
-    stickwidth = 4
-    limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
-               [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
-               [1, 16], [16, 18], [3, 17], [6, 18]]
-
-    colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
-              [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
-              [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
-    for i in range(18):
-        for n in range(len(subset)):
-            index = int(subset[n][i])
-            if index == -1:
-                continue
-            x, y = candidate[index][0:2]
-            cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
-    for i in range(17):
-        for n in range(len(subset)):
-            index = subset[n][np.array(limbSeq[i]) - 1]
-            if -1 in index:
-                continue
-            cur_canvas = canvas.copy()
-            Y = candidate[index.astype(int), 0]
-            X = candidate[index.astype(int), 1]
-            mX = np.mean(X)
-            mY = np.mean(Y)
-            length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
-            angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
-            polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
-            cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
-            canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
-    # plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]])
-    # plt.imshow(canvas[:, :, [2, 1, 0]])
-    return canvas
-
-
-# image drawed by opencv is not good.
-def draw_handpose(canvas, all_hand_peaks, show_number=False):
-    edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
-             [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
-
-    for peaks in all_hand_peaks:
-        for ie, e in enumerate(edges):
-            if np.sum(np.all(peaks[e], axis=1)==0)==0:
-                x1, y1 = peaks[e[0]]
-                x2, y2 = peaks[e[1]]
-                cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0])*255, thickness=2)
-
-        for i, keyponit in enumerate(peaks):
-            x, y = keyponit
-            cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
-            if show_number:
-                cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA)
-    return canvas
-
-# detect hand according to body pose keypoints
-# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
-def handDetect(candidate, subset, oriImg):
-    # right hand: wrist 4, elbow 3, shoulder 2
-    # left hand: wrist 7, elbow 6, shoulder 5
-    ratioWristElbow = 0.33
-    detect_result = []
-    image_height, image_width = oriImg.shape[0:2]
-    for person in subset.astype(int):
-        # if any of three not detected
-        has_left = np.sum(person[[5, 6, 7]] == -1) == 0
-        has_right = np.sum(person[[2, 3, 4]] == -1) == 0
-        if not (has_left or has_right):
-            continue
-        hands = []
-        #left hand
-        if has_left:
-            left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]]
-            x1, y1 = candidate[left_shoulder_index][:2]
-            x2, y2 = candidate[left_elbow_index][:2]
-            x3, y3 = candidate[left_wrist_index][:2]
-            hands.append([x1, y1, x2, y2, x3, y3, True])
-        # right hand
-        if has_right:
-            right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]]
-            x1, y1 = candidate[right_shoulder_index][:2]
-            x2, y2 = candidate[right_elbow_index][:2]
-            x3, y3 = candidate[right_wrist_index][:2]
-            hands.append([x1, y1, x2, y2, x3, y3, False])
-
-        for x1, y1, x2, y2, x3, y3, is_left in hands:
-            # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
-            # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
-            # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
-            # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
-            # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
-            # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
-            x = x3 + ratioWristElbow * (x3 - x2)
-            y = y3 + ratioWristElbow * (y3 - y2)
-            distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
-            distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
-            width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
-            # x-y refers to the center --> offset to topLeft point
-            # handRectangle.x -= handRectangle.width / 2.f;
-            # handRectangle.y -= handRectangle.height / 2.f;
-            x -= width / 2
-            y -= width / 2  # width = height
-            # overflow the image
-            if x < 0: x = 0
-            if y < 0: y = 0
-            width1 = width
-            width2 = width
-            if x + width > image_width: width1 = image_width - x
-            if y + width > image_height: width2 = image_height - y
-            width = min(width1, width2)
-            # the max hand box value is 20 pixels
-            if width >= 20:
-                detect_result.append([int(x), int(y), int(width), is_left])
-
-    '''
-    return value: [[x, y, w, True if left hand else False]].
-    width=height since the network require squared input.
-    x, y is the coordinate of top left 
-    '''
-    return detect_result
-
-# get max index of 2d array
-def npmax(array):
-    arrayindex = array.argmax(1)
-    arrayvalue = array.max(1)
-    i = arrayvalue.argmax()
-    j = arrayindex[i]
-    return i, j
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/configs/_base_/models/ann_r50-d8.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/configs/_base_/models/ann_r50-d8.py
deleted file mode 100644
index a2cb653827e44e6015b3b83bc578003e614a6aa1..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/configs/_base_/models/ann_r50-d8.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
-    type='EncoderDecoder',
-    pretrained='open-mmlab://resnet50_v1c',
-    backbone=dict(
-        type='ResNetV1c',
-        depth=50,
-        num_stages=4,
-        out_indices=(0, 1, 2, 3),
-        dilations=(1, 1, 2, 4),
-        strides=(1, 2, 1, 1),
-        norm_cfg=norm_cfg,
-        norm_eval=False,
-        style='pytorch',
-        contract_dilation=True),
-    decode_head=dict(
-        type='ANNHead',
-        in_channels=[1024, 2048],
-        in_index=[2, 3],
-        channels=512,
-        project_channels=256,
-        query_scales=(1, ),
-        key_pool_scales=(1, 3, 6, 8),
-        dropout_ratio=0.1,
-        num_classes=19,
-        norm_cfg=norm_cfg,
-        align_corners=False,
-        loss_decode=dict(
-            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
-    auxiliary_head=dict(
-        type='FCNHead',
-        in_channels=1024,
-        in_index=2,
-        channels=256,
-        num_convs=1,
-        concat_input=False,
-        dropout_ratio=0.1,
-        num_classes=19,
-        norm_cfg=norm_cfg,
-        align_corners=False,
-        loss_decode=dict(
-            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
-    # model training and testing settings
-    train_cfg=dict(),
-    test_cfg=dict(mode='whole'))
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/__init__.py
deleted file mode 100644
index abfbe2624eecb73b029e9bcb7e2283bbf2a744ea..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from .coarse_mask_head import CoarseMaskHead
-from .fcn_mask_head import FCNMaskHead
-from .feature_relay_head import FeatureRelayHead
-from .fused_semantic_head import FusedSemanticHead
-from .global_context_head import GlobalContextHead
-from .grid_head import GridHead
-from .htc_mask_head import HTCMaskHead
-from .mask_point_head import MaskPointHead
-from .maskiou_head import MaskIoUHead
-from .scnet_mask_head import SCNetMaskHead
-from .scnet_semantic_head import SCNetSemanticHead
-
-__all__ = [
-    'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
-    'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead',
-    'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead'
-]
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/furthest_point_sample.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/furthest_point_sample.py
deleted file mode 100644
index 374b7a878f1972c183941af28ba1df216ac1a60f..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/furthest_point_sample.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import torch
-from torch.autograd import Function
-
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext('_ext', [
-    'furthest_point_sampling_forward',
-    'furthest_point_sampling_with_dist_forward'
-])
-
-
-class FurthestPointSampling(Function):
-    """Uses iterative furthest point sampling to select a set of features whose
-    corresponding points have the furthest distance."""
-
-    @staticmethod
-    def forward(ctx, points_xyz: torch.Tensor,
-                num_points: int) -> torch.Tensor:
-        """
-        Args:
-            points_xyz (Tensor): (B, N, 3) where N > num_points.
-            num_points (int): Number of points in the sampled set.
-
-        Returns:
-             Tensor: (B, num_points) indices of the sampled points.
-        """
-        assert points_xyz.is_contiguous()
-
-        B, N = points_xyz.size()[:2]
-        output = torch.cuda.IntTensor(B, num_points)
-        temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
-
-        ext_module.furthest_point_sampling_forward(
-            points_xyz,
-            temp,
-            output,
-            b=B,
-            n=N,
-            m=num_points,
-        )
-        if torch.__version__ != 'parrots':
-            ctx.mark_non_differentiable(output)
-        return output
-
-    @staticmethod
-    def backward(xyz, a=None):
-        return None, None
-
-
-class FurthestPointSamplingWithDist(Function):
-    """Uses iterative furthest point sampling to select a set of features whose
-    corresponding points have the furthest distance."""
-
-    @staticmethod
-    def forward(ctx, points_dist: torch.Tensor,
-                num_points: int) -> torch.Tensor:
-        """
-        Args:
-            points_dist (Tensor): (B, N, N) Distance between each point pair.
-            num_points (int): Number of points in the sampled set.
-
-        Returns:
-             Tensor: (B, num_points) indices of the sampled points.
-        """
-        assert points_dist.is_contiguous()
-
-        B, N, _ = points_dist.size()
-        output = points_dist.new_zeros([B, num_points], dtype=torch.int32)
-        temp = points_dist.new_zeros([B, N]).fill_(1e10)
-
-        ext_module.furthest_point_sampling_with_dist_forward(
-            points_dist, temp, output, b=B, n=N, m=num_points)
-        if torch.__version__ != 'parrots':
-            ctx.mark_non_differentiable(output)
-        return output
-
-    @staticmethod
-    def backward(xyz, a=None):
-        return None, None
-
-
-furthest_point_sample = FurthestPointSampling.apply
-furthest_point_sample_with_dist = FurthestPointSamplingWithDist.apply
diff --git a/spaces/Salesforce/EDICT/my_diffusers/schedulers/scheduling_pndm.py b/spaces/Salesforce/EDICT/my_diffusers/schedulers/scheduling_pndm.py
deleted file mode 100644
index b43d88bbab7745e3e8579cc66f2ee2ed246e52d7..0000000000000000000000000000000000000000
--- a/spaces/Salesforce/EDICT/my_diffusers/schedulers/scheduling_pndm.py
+++ /dev/null
@@ -1,378 +0,0 @@
-# Copyright 2022 Zhejiang University Team and The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
-
-import math
-from typing import Optional, Tuple, Union
-
-import numpy as np
-import torch
-
-from ..configuration_utils import ConfigMixin, register_to_config
-from .scheduling_utils import SchedulerMixin, SchedulerOutput
-
-
-def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
-    """
-    Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
-    (1-beta) over time from t = [0,1].
-
-    Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
-    to that part of the diffusion process.
-
-
-    Args:
-        num_diffusion_timesteps (`int`): the number of betas to produce.
-        max_beta (`float`): the maximum beta to use; use values lower than 1 to
-                     prevent singularities.
-
-    Returns:
-        betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
-    """
-
-    def alpha_bar(time_step):
-        return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
-
-    betas = []
-    for i in range(num_diffusion_timesteps):
-        t1 = i / num_diffusion_timesteps
-        t2 = (i + 1) / num_diffusion_timesteps
-        betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
-    return np.array(betas, dtype=np.float32)
-
-
-class PNDMScheduler(SchedulerMixin, ConfigMixin):
-    """
-    Pseudo numerical methods for diffusion models (PNDM) proposes using more advanced ODE integration techniques,
-    namely Runge-Kutta method and a linear multi-step method.
-
-    [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
-    function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
-    [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and
-    [`~ConfigMixin.from_config`] functios.
-
-    For more details, see the original paper: https://arxiv.org/abs/2202.09778
-
-    Args:
-        num_train_timesteps (`int`): number of diffusion steps used to train the model.
-        beta_start (`float`): the starting `beta` value of inference.
-        beta_end (`float`): the final `beta` value.
-        beta_schedule (`str`):
-            the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
-            `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
-        trained_betas (`np.ndarray`, optional): TODO
-        tensor_format (`str`): whether the scheduler expects pytorch or numpy arrays
-        skip_prk_steps (`bool`):
-            allows the scheduler to skip the Runge-Kutta steps that are defined in the original paper as being required
-            before plms steps; defaults to `False`.
-
-    """
-
-    @register_to_config
-    def __init__(
-        self,
-        num_train_timesteps: int = 1000,
-        beta_start: float = 0.0001,
-        beta_end: float = 0.02,
-        beta_schedule: str = "linear",
-        trained_betas: Optional[np.ndarray] = None,
-        tensor_format: str = "pt",
-        skip_prk_steps: bool = False,
-    ):
-        if trained_betas is not None:
-            self.betas = np.asarray(trained_betas)
-        if beta_schedule == "linear":
-            self.betas = np.linspace(beta_start, beta_end, num_train_timesteps, dtype=np.float32)
-        elif beta_schedule == "scaled_linear":
-            # this schedule is very specific to the latent diffusion model.
-            self.betas = np.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=np.float32) ** 2
-        elif beta_schedule == "squaredcos_cap_v2":
-            # Glide cosine schedule
-            self.betas = betas_for_alpha_bar(num_train_timesteps)
-        else:
-            raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
-
-        self.alphas = 1.0 - self.betas
-        self.alphas_cumprod = np.cumprod(self.alphas, axis=0)
-
-        self.one = np.array(1.0)
-
-        # For now we only support F-PNDM, i.e. the runge-kutta method
-        # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
-        # mainly at formula (9), (12), (13) and the Algorithm 2.
-        self.pndm_order = 4
-
-        # running values
-        self.cur_model_output = 0
-        self.counter = 0
-        self.cur_sample = None
-        self.ets = []
-
-        # setable values
-        self.num_inference_steps = None
-        self._timesteps = np.arange(0, num_train_timesteps)[::-1].copy()
-        self._offset = 0
-        self.prk_timesteps = None
-        self.plms_timesteps = None
-        self.timesteps = None
-
-        self.tensor_format = tensor_format
-        self.set_format(tensor_format=tensor_format)
-
-    def set_timesteps(self, num_inference_steps: int, offset: int = 0) -> torch.FloatTensor:
-        """
-        Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
-
-        Args:
-            num_inference_steps (`int`):
-                the number of diffusion steps used when generating samples with a pre-trained model.
-            offset (`int`): TODO
-        """
-        self.num_inference_steps = num_inference_steps
-        self._timesteps = list(
-            range(0, self.config.num_train_timesteps, self.config.num_train_timesteps // num_inference_steps)
-        )
-        self._offset = offset
-        self._timesteps = np.array([t + self._offset for t in self._timesteps])
-
-        if self.config.skip_prk_steps:
-            # for some models like stable diffusion the prk steps can/should be skipped to
-            # produce better results. When using PNDM with `self.config.skip_prk_steps` the implementation
-            # is based on crowsonkb's PLMS sampler implementation: https://github.com/CompVis/latent-diffusion/pull/51
-            self.prk_timesteps = np.array([])
-            self.plms_timesteps = np.concatenate([self._timesteps[:-1], self._timesteps[-2:-1], self._timesteps[-1:]])[
-                ::-1
-            ].copy()
-        else:
-            prk_timesteps = np.array(self._timesteps[-self.pndm_order :]).repeat(2) + np.tile(
-                np.array([0, self.config.num_train_timesteps // num_inference_steps // 2]), self.pndm_order
-            )
-            self.prk_timesteps = (prk_timesteps[:-1].repeat(2)[1:-1])[::-1].copy()
-            self.plms_timesteps = self._timesteps[:-3][
-                ::-1
-            ].copy()  # we copy to avoid having negative strides which are not supported by torch.from_numpy
-
-        self.timesteps = np.concatenate([self.prk_timesteps, self.plms_timesteps]).astype(np.int64)
-
-        self.ets = []
-        self.counter = 0
-        self.set_format(tensor_format=self.tensor_format)
-
-    def step(
-        self,
-        model_output: Union[torch.FloatTensor, np.ndarray],
-        timestep: int,
-        sample: Union[torch.FloatTensor, np.ndarray],
-        return_dict: bool = True,
-    ) -> Union[SchedulerOutput, Tuple]:
-        """
-        Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
-        process from the learned model outputs (most often the predicted noise).
-
-        This function calls `step_prk()` or `step_plms()` depending on the internal variable `counter`.
-
-        Args:
-            model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model.
-            timestep (`int`): current discrete timestep in the diffusion chain.
-            sample (`torch.FloatTensor` or `np.ndarray`):
-                current instance of sample being created by diffusion process.
-            return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
-
-        Returns:
-            [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`:
-            [`~schedulers.scheduling_utils.SchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
-            returning a tuple, the first element is the sample tensor.
-
-        """
-        if self.counter < len(self.prk_timesteps) and not self.config.skip_prk_steps:
-            return self.step_prk(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict)
-        else:
-            return self.step_plms(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict)
-
-    def step_prk(
-        self,
-        model_output: Union[torch.FloatTensor, np.ndarray],
-        timestep: int,
-        sample: Union[torch.FloatTensor, np.ndarray],
-        return_dict: bool = True,
-    ) -> Union[SchedulerOutput, Tuple]:
-        """
-        Step function propagating the sample with the Runge-Kutta method. RK takes 4 forward passes to approximate the
-        solution to the differential equation.
-
-        Args:
-            model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model.
-            timestep (`int`): current discrete timestep in the diffusion chain.
-            sample (`torch.FloatTensor` or `np.ndarray`):
-                current instance of sample being created by diffusion process.
-            return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
-
-        Returns:
-            [`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is
-            True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
-
-        """
-        if self.num_inference_steps is None:
-            raise ValueError(
-                "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
-            )
-
-        diff_to_prev = 0 if self.counter % 2 else self.config.num_train_timesteps // self.num_inference_steps // 2
-        prev_timestep = max(timestep - diff_to_prev, self.prk_timesteps[-1])
-        timestep = self.prk_timesteps[self.counter // 4 * 4]
-
-        if self.counter % 4 == 0:
-            self.cur_model_output += 1 / 6 * model_output
-            self.ets.append(model_output)
-            self.cur_sample = sample
-        elif (self.counter - 1) % 4 == 0:
-            self.cur_model_output += 1 / 3 * model_output
-        elif (self.counter - 2) % 4 == 0:
-            self.cur_model_output += 1 / 3 * model_output
-        elif (self.counter - 3) % 4 == 0:
-            model_output = self.cur_model_output + 1 / 6 * model_output
-            self.cur_model_output = 0
-
-        # cur_sample should not be `None`
-        cur_sample = self.cur_sample if self.cur_sample is not None else sample
-
-        prev_sample = self._get_prev_sample(cur_sample, timestep, prev_timestep, model_output)
-        self.counter += 1
-
-        if not return_dict:
-            return (prev_sample,)
-
-        return SchedulerOutput(prev_sample=prev_sample)
-
-    def step_plms(
-        self,
-        model_output: Union[torch.FloatTensor, np.ndarray],
-        timestep: int,
-        sample: Union[torch.FloatTensor, np.ndarray],
-        return_dict: bool = True,
-    ) -> Union[SchedulerOutput, Tuple]:
-        """
-        Step function propagating the sample with the linear multi-step method. This has one forward pass with multiple
-        times to approximate the solution.
-
-        Args:
-            model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model.
-            timestep (`int`): current discrete timestep in the diffusion chain.
-            sample (`torch.FloatTensor` or `np.ndarray`):
-                current instance of sample being created by diffusion process.
-            return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
-
-        Returns:
-            [`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is
-            True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
-
-        """
-        if self.num_inference_steps is None:
-            raise ValueError(
-                "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
-            )
-
-        if not self.config.skip_prk_steps and len(self.ets) < 3:
-            raise ValueError(
-                f"{self.__class__} can only be run AFTER scheduler has been run "
-                "in 'prk' mode for at least 12 iterations "
-                "See: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_pndm.py "
-                "for more information."
-            )
-
-        prev_timestep = max(timestep - self.config.num_train_timesteps // self.num_inference_steps, 0)
-
-        if self.counter != 1:
-            self.ets.append(model_output)
-        else:
-            prev_timestep = timestep
-            timestep = timestep + self.config.num_train_timesteps // self.num_inference_steps
-
-        if len(self.ets) == 1 and self.counter == 0:
-            model_output = model_output
-            self.cur_sample = sample
-        elif len(self.ets) == 1 and self.counter == 1:
-            model_output = (model_output + self.ets[-1]) / 2
-            sample = self.cur_sample
-            self.cur_sample = None
-        elif len(self.ets) == 2:
-            model_output = (3 * self.ets[-1] - self.ets[-2]) / 2
-        elif len(self.ets) == 3:
-            model_output = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
-        else:
-            model_output = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
-
-        prev_sample = self._get_prev_sample(sample, timestep, prev_timestep, model_output)
-        self.counter += 1
-
-        if not return_dict:
-            return (prev_sample,)
-
-        return SchedulerOutput(prev_sample=prev_sample)
-
-    def _get_prev_sample(self, sample, timestep, timestep_prev, model_output):
-        # See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf
-        # this function computes x_(t−δ) using the formula of (9)
-        # Note that x_t needs to be added to both sides of the equation
-
-        # Notation (<variable name> -> <name in paper>
-        # alpha_prod_t -> α_t
-        # alpha_prod_t_prev -> α_(t−δ)
-        # beta_prod_t -> (1 - α_t)
-        # beta_prod_t_prev -> (1 - α_(t−δ))
-        # sample -> x_t
-        # model_output -> e_θ(x_t, t)
-        # prev_sample -> x_(t−δ)
-        alpha_prod_t = self.alphas_cumprod[timestep + 1 - self._offset]
-        alpha_prod_t_prev = self.alphas_cumprod[timestep_prev + 1 - self._offset]
-        beta_prod_t = 1 - alpha_prod_t
-        beta_prod_t_prev = 1 - alpha_prod_t_prev
-
-        # corresponds to (α_(t−δ) - α_t) divided by
-        # denominator of x_t in formula (9) and plus 1
-        # Note: (α_(t−δ) - α_t) / (sqrt(α_t) * (sqrt(α_(t−δ)) + sqr(α_t))) =
-        # sqrt(α_(t−δ)) / sqrt(α_t))
-        sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5)
-
-        # corresponds to denominator of e_θ(x_t, t) in formula (9)
-        model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + (
-            alpha_prod_t * beta_prod_t * alpha_prod_t_prev
-        ) ** (0.5)
-
-        # full formula (9)
-        prev_sample = (
-            sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff
-        )
-
-        return prev_sample
-
-    def add_noise(
-        self,
-        original_samples: Union[torch.FloatTensor, np.ndarray],
-        noise: Union[torch.FloatTensor, np.ndarray],
-        timesteps: Union[torch.IntTensor, np.ndarray],
-    ) -> torch.Tensor:
-        # mps requires indices to be in the same device, so we use cpu as is the default with cuda
-        timesteps = timesteps.to(self.alphas_cumprod.device)
-        sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
-        sqrt_alpha_prod = self.match_shape(sqrt_alpha_prod, original_samples)
-        sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
-        sqrt_one_minus_alpha_prod = self.match_shape(sqrt_one_minus_alpha_prod, original_samples)
-
-        noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
-        return noisy_samples
-
-    def __len__(self):
-        return self.config.num_train_timesteps
diff --git a/spaces/Sambhavnoobcoder/StyleForge/app.py b/spaces/Sambhavnoobcoder/StyleForge/app.py
deleted file mode 100644
index 65a5344a6a05bce2ddc33c137c0bb6f24316666a..0000000000000000000000000000000000000000
--- a/spaces/Sambhavnoobcoder/StyleForge/app.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import gradio as gr
-import torch
-from transformers import logging
-import random
-from PIL import Image
-from Utils import MingleModel
-
-logging.set_verbosity_error()
-
-
-def get_concat_h(images):
-    widths, heights = zip(*(i.size for i in images))
-
-    total_width = sum(widths)
-    max_height = max(heights)
-
-    dst = Image.new('RGB', (total_width, max_height))
-    x_offset = 0
-    for im in images:
-      dst.paste(im, (x_offset,0))
-      x_offset += im.size[0]
-    return dst
-
-
-mingle_model = MingleModel()
-
-
-def mingle_prompts(first_prompt, second_prompt):
-    imgs = []
-    text_input1 = mingle_model.do_tokenizer(first_prompt)
-    text_input2 = mingle_model.do_tokenizer(second_prompt)
-    with torch.no_grad():
-        text_embeddings1 = mingle_model.get_text_encoder(text_input1)
-        text_embeddings2 = mingle_model.get_text_encoder(text_input2)
-
-    rand_generator = random.randint(1, 2048)
-    # Mix them together
-    # mix_factors = [0.1, 0.3, 0.5, 0.7, 0.9]
-    mix_factors = [0.5]
-    for mix_factor in mix_factors:
-        mixed_embeddings = (text_embeddings1 * mix_factor + text_embeddings2 * (1 - mix_factor))
-
-        # Generate!
-        steps = 20
-        guidence_scale = 8.0
-        img = mingle_model.generate_with_embs(mixed_embeddings, rand_generator, num_inference_steps=steps,
-                                 guidance_scale=guidence_scale)
-        imgs.append(img)
-
-    return get_concat_h(imgs)
-
-
-with gr.Blocks() as demo:
-    gr.Markdown(
-        '''
-        <h1 style="text-align: center;"> Fashion Generator GAN</h1>
-        ''')
-
-    gr.Markdown(
-        '''
-        <h3 style="text-align: center;"> Note : the gan is extremely resource extensive, so it running the inference on cpu takes long time . kindly wait patiently while the model generates the output. </h3>
-        ''')
-    
-    gr.Markdown(
-        '''
-        <p style="text-align: center;">generated an image as an average of 2 prompts inserted !!</p>
-        ''')
-
-    first_prompt = gr.Textbox(label="first_prompt")
-    second_prompt = gr.Textbox(label="second_prompt")
-    greet_btn = gr.Button("Submit")
-
-    gr.Markdown("# Output Results")
-    output = gr.Image(shape=(512,512))
-
-    greet_btn.click(fn=mingle_prompts, inputs=[first_prompt, second_prompt], outputs=[output])
-
-demo.launch()
-
diff --git a/spaces/SarthakSidhant/Go-Cattle/diseases/neoplasia (cancer).md b/spaces/SarthakSidhant/Go-Cattle/diseases/neoplasia (cancer).md
deleted file mode 100644
index 7241f22936f87586ccdca96966c0ea92e07d3c3b..0000000000000000000000000000000000000000
--- a/spaces/SarthakSidhant/Go-Cattle/diseases/neoplasia (cancer).md	
+++ /dev/null
@@ -1,43 +0,0 @@
-## Neoplasia (cancer)
-
-**Information** : Neoplasia is a general term for any abnormal growth of tissue. Cancer is a specific type of neoplasia that is characterized by uncontrolled cell growth and division.
-
-**Symptoms**
-
-The symptoms of neoplasia in cattle can vary depending on the type of cancer and the location of the tumor. Some common symptoms include:
-
-* Lumps or masses on the body
-* Weight loss
-* Loss of appetite
-* Coughing or difficulty breathing
-* Bleeding
-* Lameness
-* Depression
-
-**Remedies**
-
-The treatment of neoplasia in cattle depends on the type of cancer and the severity of the disease. Some common treatments include:
-
-* Surgery to remove the tumor
-* Chemotherapy to kill cancer cells
-* Radiation therapy to destroy cancer cells
-* Immunotherapy to boost the immune system's ability to fight cancer
-
-**Causes**
-
-The exact causes of neoplasia in cattle are not fully understood. However, there are a number of factors that are thought to increase the risk of cancer, including:
-
-* Age
-* Genetics
-* Exposure to environmental toxins
-* Viral infections
-* Immune system disorders
-
-**Prevention**
-
-There is no sure way to prevent neoplasia in cattle. However, there are some preventive measures that can be taken to reduce the risk, such as:
-
-* Vaccinating cattle against viral infections that can increase the risk of cancer
-* Avoiding exposure to environmental toxins
-* Practicing good herd management
-* Screening cattle for cancer at an early age
diff --git a/spaces/SeViLA/SeViLA/lavis/common/registry.py b/spaces/SeViLA/SeViLA/lavis/common/registry.py
deleted file mode 100644
index 9039d8aaa580f19cc0d43ed9330bd90055045867..0000000000000000000000000000000000000000
--- a/spaces/SeViLA/SeViLA/lavis/common/registry.py
+++ /dev/null
@@ -1,329 +0,0 @@
-"""
- Copyright (c) 2022, salesforce.com, inc.
- All rights reserved.
- SPDX-License-Identifier: BSD-3-Clause
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-"""
-
-
-class Registry:
-    mapping = {
-        "builder_name_mapping": {},
-        "task_name_mapping": {},
-        "processor_name_mapping": {},
-        "model_name_mapping": {},
-        "lr_scheduler_name_mapping": {},
-        "runner_name_mapping": {},
-        "state": {},
-        "paths": {},
-    }
-
-    @classmethod
-    def register_builder(cls, name):
-        r"""Register a dataset builder to registry with key 'name'
-
-        Args:
-            name: Key with which the builder will be registered.
-
-        Usage:
-
-            from lavis.common.registry import registry
-            from lavis.datasets.base_dataset_builder import BaseDatasetBuilder
-        """
-
-        def wrap(builder_cls):
-            from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder
-
-            assert issubclass(
-                builder_cls, BaseDatasetBuilder
-            ), "All builders must inherit BaseDatasetBuilder class, found {}".format(
-                builder_cls
-            )
-            if name in cls.mapping["builder_name_mapping"]:
-                raise KeyError(
-                    "Name '{}' already registered for {}.".format(
-                        name, cls.mapping["builder_name_mapping"][name]
-                    )
-                )
-            cls.mapping["builder_name_mapping"][name] = builder_cls
-            return builder_cls
-
-        return wrap
-
-    @classmethod
-    def register_task(cls, name):
-        r"""Register a task to registry with key 'name'
-
-        Args:
-            name: Key with which the task will be registered.
-
-        Usage:
-
-            from lavis.common.registry import registry
-        """
-
-        def wrap(task_cls):
-            from lavis.tasks.base_task import BaseTask
-
-            assert issubclass(
-                task_cls, BaseTask
-            ), "All tasks must inherit BaseTask class"
-            if name in cls.mapping["task_name_mapping"]:
-                raise KeyError(
-                    "Name '{}' already registered for {}.".format(
-                        name, cls.mapping["task_name_mapping"][name]
-                    )
-                )
-            cls.mapping["task_name_mapping"][name] = task_cls
-            return task_cls
-
-        return wrap
-
-    @classmethod
-    def register_model(cls, name):
-        r"""Register a task to registry with key 'name'
-
-        Args:
-            name: Key with which the task will be registered.
-
-        Usage:
-
-            from lavis.common.registry import registry
-        """
-
-        def wrap(model_cls):
-            from lavis.models import BaseModel
-
-            assert issubclass(
-                model_cls, BaseModel
-            ), "All models must inherit BaseModel class"
-            if name in cls.mapping["model_name_mapping"]:
-                raise KeyError(
-                    "Name '{}' already registered for {}.".format(
-                        name, cls.mapping["model_name_mapping"][name]
-                    )
-                )
-            cls.mapping["model_name_mapping"][name] = model_cls
-            return model_cls
-
-        return wrap
-
-    @classmethod
-    def register_processor(cls, name):
-        r"""Register a processor to registry with key 'name'
-
-        Args:
-            name: Key with which the task will be registered.
-
-        Usage:
-
-            from lavis.common.registry import registry
-        """
-
-        def wrap(processor_cls):
-            from lavis.processors import BaseProcessor
-
-            assert issubclass(
-                processor_cls, BaseProcessor
-            ), "All processors must inherit BaseProcessor class"
-            if name in cls.mapping["processor_name_mapping"]:
-                raise KeyError(
-                    "Name '{}' already registered for {}.".format(
-                        name, cls.mapping["processor_name_mapping"][name]
-                    )
-                )
-            cls.mapping["processor_name_mapping"][name] = processor_cls
-            return processor_cls
-
-        return wrap
-
-    @classmethod
-    def register_lr_scheduler(cls, name):
-        r"""Register a model to registry with key 'name'
-
-        Args:
-            name: Key with which the task will be registered.
-
-        Usage:
-
-            from lavis.common.registry import registry
-        """
-
-        def wrap(lr_sched_cls):
-            if name in cls.mapping["lr_scheduler_name_mapping"]:
-                raise KeyError(
-                    "Name '{}' already registered for {}.".format(
-                        name, cls.mapping["lr_scheduler_name_mapping"][name]
-                    )
-                )
-            cls.mapping["lr_scheduler_name_mapping"][name] = lr_sched_cls
-            return lr_sched_cls
-
-        return wrap
-
-    @classmethod
-    def register_runner(cls, name):
-        r"""Register a model to registry with key 'name'
-
-        Args:
-            name: Key with which the task will be registered.
-
-        Usage:
-
-            from lavis.common.registry import registry
-        """
-
-        def wrap(runner_cls):
-            if name in cls.mapping["runner_name_mapping"]:
-                raise KeyError(
-                    "Name '{}' already registered for {}.".format(
-                        name, cls.mapping["runner_name_mapping"][name]
-                    )
-                )
-            cls.mapping["runner_name_mapping"][name] = runner_cls
-            return runner_cls
-
-        return wrap
-
-    @classmethod
-    def register_path(cls, name, path):
-        r"""Register a path to registry with key 'name'
-
-        Args:
-            name: Key with which the path will be registered.
-
-        Usage:
-
-            from lavis.common.registry import registry
-        """
-        assert isinstance(path, str), "All path must be str."
-        if name in cls.mapping["paths"]:
-            raise KeyError("Name '{}' already registered.".format(name))
-        cls.mapping["paths"][name] = path
-
-    @classmethod
-    def register(cls, name, obj):
-        r"""Register an item to registry with key 'name'
-
-        Args:
-            name: Key with which the item will be registered.
-
-        Usage::
-
-            from lavis.common.registry import registry
-
-            registry.register("config", {})
-        """
-        path = name.split(".")
-        current = cls.mapping["state"]
-
-        for part in path[:-1]:
-            if part not in current:
-                current[part] = {}
-            current = current[part]
-
-        current[path[-1]] = obj
-
-    # @classmethod
-    # def get_trainer_class(cls, name):
-    #     return cls.mapping["trainer_name_mapping"].get(name, None)
-
-    @classmethod
-    def get_builder_class(cls, name):
-        return cls.mapping["builder_name_mapping"].get(name, None)
-
-    @classmethod
-    def get_model_class(cls, name):
-        return cls.mapping["model_name_mapping"].get(name, None)
-
-    @classmethod
-    def get_task_class(cls, name):
-        return cls.mapping["task_name_mapping"].get(name, None)
-
-    @classmethod
-    def get_processor_class(cls, name):
-        return cls.mapping["processor_name_mapping"].get(name, None)
-
-    @classmethod
-    def get_lr_scheduler_class(cls, name):
-        return cls.mapping["lr_scheduler_name_mapping"].get(name, None)
-
-    @classmethod
-    def get_runner_class(cls, name):
-        return cls.mapping["runner_name_mapping"].get(name, None)
-
-    @classmethod
-    def list_runners(cls):
-        return sorted(cls.mapping["runner_name_mapping"].keys())
-
-    @classmethod
-    def list_models(cls):
-        return sorted(cls.mapping["model_name_mapping"].keys())
-
-    @classmethod
-    def list_tasks(cls):
-        return sorted(cls.mapping["task_name_mapping"].keys())
-
-    @classmethod
-    def list_processors(cls):
-        return sorted(cls.mapping["processor_name_mapping"].keys())
-
-    @classmethod
-    def list_lr_schedulers(cls):
-        return sorted(cls.mapping["lr_scheduler_name_mapping"].keys())
-
-    @classmethod
-    def list_datasets(cls):
-        return sorted(cls.mapping["builder_name_mapping"].keys())
-
-    @classmethod
-    def get_path(cls, name):
-        return cls.mapping["paths"].get(name, None)
-
-    @classmethod
-    def get(cls, name, default=None, no_warning=False):
-        r"""Get an item from registry with key 'name'
-
-        Args:
-            name (string): Key whose value needs to be retrieved.
-            default: If passed and key is not in registry, default value will
-                     be returned with a warning. Default: None
-            no_warning (bool): If passed as True, warning when key doesn't exist
-                               will not be generated. Useful for MMF's
-                               internal operations. Default: False
-        """
-        original_name = name
-        name = name.split(".")
-        value = cls.mapping["state"]
-        for subname in name:
-            value = value.get(subname, default)
-            if value is default:
-                break
-
-        if (
-            "writer" in cls.mapping["state"]
-            and value == default
-            and no_warning is False
-        ):
-            cls.mapping["state"]["writer"].warning(
-                "Key {} is not present in registry, returning default value "
-                "of {}".format(original_name, default)
-            )
-        return value
-
-    @classmethod
-    def unregister(cls, name):
-        r"""Remove an item from registry with key 'name'
-
-        Args:
-            name: Key which needs to be removed.
-        Usage::
-
-            from mmf.common.registry import registry
-
-            config = registry.unregister("config")
-        """
-        return cls.mapping["state"].pop(name, None)
-
-
-registry = Registry()
diff --git a/spaces/SeaLLMs/SeaLLM-Chat-13b/app.py b/spaces/SeaLLMs/SeaLLM-Chat-13b/app.py
deleted file mode 100644
index 39d45ad23ecba938f831967dad32fc4fc15f3cbf..0000000000000000000000000000000000000000
--- a/spaces/SeaLLMs/SeaLLM-Chat-13b/app.py
+++ /dev/null
@@ -1,1425 +0,0 @@
-# Copyright: DAMO Academy, Alibaba Group
-# By Xuan Phi Nguyen at DAMO Academy, Alibaba Group
-
-# Description:
-"""
-VLLM-based demo script to launch Language chat model for Southeast Asian Languages
-"""
-
-
-import os
-import numpy as np
-import argparse
-import torch
-import gradio as gr
-from typing import Any, Iterator
-from typing import Iterator, List, Optional, Tuple
-import filelock
-import glob
-import json
-import time
-
-from gradio_client.documentation import document, set_documentation_group
-
-from typing import List, Optional, Union, Dict, Tuple
-from tqdm.auto import tqdm
-from huggingface_hub import snapshot_download
-
-
-# @@ environments ================
-
-DEBUG = bool(int(os.environ.get("DEBUG", "1")))
-
-# List of languages to block
-BLOCK_LANGS = str(os.environ.get("BLOCK_LANGS", ""))
-BLOCK_LANGS = BLOCK_LANGS.strip().split(";") if len(BLOCK_LANGS.strip()) > 0 else []
-
-# for lang block, wether to block in history too
-LANG_BLOCK_HISTORY = bool(int(os.environ.get("LANG_BLOCK_HISTORY", "0")))
-TENSOR_PARALLEL = int(os.environ.get("TENSOR_PARALLEL", "1"))
-DTYPE = os.environ.get("DTYPE", "bfloat16")
-
-# ! (no debug) whether to download HF_MODEL_NAME and save to MODEL_PATH
-DOWNLOAD_SNAPSHOT = bool(int(os.environ.get("DOWNLOAD_SNAPSHOT", "0")))
-LOG_RESPONSE = bool(int(os.environ.get("LOG_RESPONSE", "0")))
-# ! show model path in the demo page, only for internal
-DISPLAY_MODEL_PATH = bool(int(os.environ.get("DISPLAY_MODEL_PATH", "1")))
-
-# ! uploaded model path, will be downloaded to MODEL_PATH
-HF_MODEL_NAME = os.environ.get("HF_MODEL_NAME", "DAMO-NLP-SG/seal-13b-chat-a")
-# ! if model is private, need HF_TOKEN to access the model
-HF_TOKEN = os.environ.get("HF_TOKEN", None)
-# ! path where the model is downloaded, either on ./ or persistent disc
-MODEL_PATH = os.environ.get("MODEL_PATH", "./seal-13b-chat-a")
-
-# ! log path
-LOG_PATH = os.environ.get("LOG_PATH", "").strip()
-LOG_FILE = None
-SAVE_LOGS = LOG_PATH is not None and LOG_PATH != ''
-if SAVE_LOGS:
-    if os.path.exists(LOG_PATH):
-        print(f'LOG_PATH exist: {LOG_PATH}')
-    else:
-        LOG_DIR = os.path.dirname(LOG_PATH)
-        os.makedirs(LOG_DIR, exist_ok=True)
-
-# ! get LOG_PATH as aggregated outputs in log
-GET_LOG_CMD = os.environ.get("GET_LOG_CMD", "").strip()
-
-print(f'SAVE_LOGS: {SAVE_LOGS} | {LOG_PATH}')
-print(f'GET_LOG_CMD: {GET_LOG_CMD}')
-
-# ! !! Whether to delete the folder, ONLY SET THIS IF YOU WANT TO DELETE SAVED MODEL ON PERSISTENT DISC
-DELETE_FOLDER = os.environ.get("DELETE_FOLDER", "")
-IS_DELETE_FOLDER = DELETE_FOLDER is not None and os.path.exists(DELETE_FOLDER)
-print(f'DELETE_FOLDER: {DELETE_FOLDER} | {DOWNLOAD_SNAPSHOT=}')
-
-# ! list of keywords to disabled as security measures to comply with local regulation
-KEYWORDS = os.environ.get("KEYWORDS", "").strip()
-KEYWORDS = KEYWORDS.split(";") if len(KEYWORDS) > 0 else []
-KEYWORDS = [x.lower() for x in KEYWORDS]
-
-# gradio config
-PORT = int(os.environ.get("PORT", "7860"))
-# how many iterations to yield response
-STREAM_YIELD_MULTIPLE = int(os.environ.get("STREAM_YIELD_MULTIPLE", "1"))
-# how many iterations to perform safety check on response
-STREAM_CHECK_MULTIPLE = int(os.environ.get("STREAM_CHECK_MULTIPLE", "0"))
-
-# whether to enable to popup accept user
-ENABLE_AGREE_POPUP = bool(int(os.environ.get("ENABLE_AGREE_POPUP", "0")))
-
-# self explanatory
-MAX_TOKENS = int(os.environ.get("MAX_TOKENS", "2048"))
-TEMPERATURE = float(os.environ.get("TEMPERATURE", "0.1"))
-FREQUENCE_PENALTY = float(os.environ.get("FREQUENCE_PENALTY", "0.4"))
-gpu_memory_utilization = float(os.environ.get("gpu_memory_utilization", "0.9"))
-
-# whether to enable quantization, currently not in use
-QUANTIZATION = str(os.environ.get("QUANTIZATION", ""))
-
-
-"""
-Internal instructions of how to configure the DEMO
-
-1. Upload SFT model as a model to huggingface: hugginface/models/seal_13b_a
-2. If the model weights is private, set HF_TOKEN=<your private hf token> in https://huggingface.co/spaces/????/?????/settings
-3. space config env: `HF_MODEL_NAME=SeaLLMs/seal-13b-chat-a` or the underlining model
-4. If enable persistent storage: set
-HF_HOME=/data/.huggingface
-MODEL_PATH=/data/.huggingface/seal-13b-chat-a
-if not:
-MODEL_PATH=./seal-13b-chat-a
-
-
-HF_HOME=/data/.huggingface
-MODEL_PATH=/data/ckpt/seal-13b-chat-a
-DELETE_FOLDER=/data/
-
-"""
-
-# ==============================
-print(f'DEBUG mode: {DEBUG}')
-print(f'Torch version: {torch.__version__}')
-try:
-    print(f'Torch CUDA version: {torch.version.cuda}')
-except Exception as e:
-    print(f'Failed to print cuda version: {e}')
-
-try:
-    compute_capability = torch.cuda.get_device_capability()
-    print(f'Torch CUDA compute_capability: {compute_capability}')
-except Exception as e:
-    print(f'Failed to print compute_capability version: {e}')
-
-
-# @@ constants ================
-
-DTYPES = {
-    'float16': torch.float16,
-    'bfloat16': torch.bfloat16
-}
-
-llm = None
-demo = None
-
-
-BOS_TOKEN = '<s>'
-EOS_TOKEN = '</s>'
-
-B_INST, E_INST = "[INST]", "[/INST]"
-B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
-
-# TODO: should Hide the system prompt
-SYSTEM_PROMPT_1 = """You are a multilingual, helpful, respectful and honest assistant. Your name is SeaLLM and you are built by DAMO Academy, Alibaba Group. \
-Please always answer as helpfully as possible, while being safe. Your \
-answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure \
-that your responses are socially unbiased and positive in nature.
-
-If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
-correct. If you don't know the answer to a question, please don't share false information.
-
-As a multilingual assistant, you must respond and follow instructions in the native language of the user by default, unless told otherwise. \
-Your response should adapt to the norms and customs of the respective language and culture.
-"""
-
-# ============ CONSTANT ============
-# https://github.com/gradio-app/gradio/issues/884
-MODEL_NAME = "SeaLLM-13B"
-
-MODEL_TITLE = """
-<div class="container" style="
-    align-items: center;
-    justify-content: center;
-    display: flex;
-">
-    <div class="image" >
-        <img src="file/seal_logo.png" style="
-            max-width: 10em;
-            max-height: 5%;
-            height: 3em;
-            width: 3em;
-            float: left;
-            margin-left: auto;
-        ">
-      </div>
-      <div class="text" style="
-            padding-left: 20px;
-            padding-top: 1%;
-            float: left;
-        ">
-      <h1>SeaLLMs - Large Language Models for Southeast Asia</h1>
-      </div>
-</div>
-"""
-# <a href=''><img src='https://img.shields.io/badge/Paper-PDF-red'></a>
-MODEL_DESC = """
-<div style='display:flex; gap: 0.25rem; '>
-<a href='https://github.com/SeaLLMs/SeaLLMs'><img src='https://img.shields.io/badge/Github-Code-success'></a>
-<a href='https://huggingface.co/spaces/SeaLLMs/SeaLLM-Chat-13b'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a> 
-<a href='https://huggingface.co/SeaLLMs/SeaLLM-Chat-13b'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Model-blue'></a> 
-</div>
-<span style="font-size: larger">
-This is <a href="https://huggingface.co/SeaLLMs/SeaLLM-Chat-13b" target="_blank">SeaLLM-13B-Chat</a> - a chatbot assistant optimized for Southeast Asian Languages. It produces helpful responses in English 🇬🇧, Vietnamese 🇻🇳, Indonesian 🇮🇩 and Thai 🇹🇭.
-Explore <a href="https://huggingface.co/SeaLLMs/SeaLLM-Chat-13b" target="_blank">our article</a> for more details.
-</span>
-<br>
-<span >
-NOTE: The chatbot may produce inaccurate and harmful information about people, places, or facts.
-<span style="color: red">By using our service, you are required to agree to our <a href="https://huggingface.co/SeaLLMs/SeaLLM-Chat-13b/blob/main/LICENSE" target="_blank" style="color: red">SeaLLM Terms Of Use</a>, which include:</span><br>
-<ul>
-<li >
-You must not use our service to generate any harmful, unethical or illegal content that violates locally applicable and international laws or regulations, 
-including but not limited to hate speech, violence, pornography and deception.</li>
-<li >
-The service collects user dialogue data for testing and performance improvement, and reserves the right to distribute it under 
-<a href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution (CC-BY)</a> or similar license. So do not enter any personal information!
-</li>
-</ul>
-</span>
-
-""".strip()
-
-
-cite_markdown = """
-## Citation
-If you find our project useful, hope you can star our repo and cite our paper as follows:
-```
-@article{damonlpsg2023seallm,
-  author = {Xuan-Phi Nguyen*, Wenxuan Zhang*, Xin Li*, Mahani Aljunied*, Qingyu Tan, Liying Cheng, Guanzheng Chen, Yue Deng, Sen Yang, Chaoqun Liu, Hang Zhang, Lidong Bing},
-  title = {SeaLLMs - Large Language Models for Southeast Asia},
-  year = 2023,
-}
-```
-"""
-
-path_markdown = """
-#### Model path:
-{model_path}
-"""
-
-
-
-
-def custom_hf_model_weights_iterator(
-    model_name_or_path: str,
-    cache_dir: Optional[str] = None,
-    use_np_cache: bool = False,
-) -> Iterator[Tuple[str, torch.Tensor]]:
-    # ! if use vllm==0.1.4, use this to augment hf_model_weights_iterator loader
-    from vllm.model_executor.weight_utils import Disabledtqdm
-    # Prepare file lock directory to prevent multiple processes from
-    # downloading the same model weights at the same time.
-    lock_dir = cache_dir if cache_dir is not None else "/tmp"
-    lock_file_name = model_name_or_path.replace("/", "-") + ".lock"
-    lock = filelock.FileLock(os.path.join(lock_dir, lock_file_name))
-
-    # Download model weights from huggingface.
-    is_local = os.path.isdir(model_name_or_path)
-    if not is_local:
-        with lock:
-            hf_folder = snapshot_download(model_name_or_path,
-                                          allow_patterns="*.bin",
-                                          cache_dir=cache_dir,
-                                          local_files_only=True,
-                                          tqdm_class=Disabledtqdm)
-    else:
-        hf_folder = model_name_or_path
-
-    hf_bin_files = [
-        x for x in glob.glob(os.path.join(hf_folder, "*model*.bin"))
-        if not x.endswith("training_args.bin")
-    ]
-    hf_safetensors_files = [
-        x for x in glob.glob(os.path.join(hf_folder, "*model*.safetensors"))
-        if not x.endswith("training_args.bin")
-    ]
-
-    if use_np_cache:
-        # Convert the model weights from torch tensors to numpy arrays for
-        # faster loading.
-        np_folder = os.path.join(hf_folder, "np")
-        os.makedirs(np_folder, exist_ok=True)
-        weight_names_file = os.path.join(np_folder, "weight_names.json")
-        with lock:
-            if not os.path.exists(weight_names_file):
-                weight_names = []
-                for bin_file in hf_bin_files:
-                    state = torch.load(bin_file, map_location="cpu")
-                    for name, param in state.items():
-                        param_path = os.path.join(np_folder, name)
-                        with open(param_path, "wb") as f:
-                            np.save(f, param.cpu().detach().numpy())
-                        weight_names.append(name)
-                with open(weight_names_file, "w") as f:
-                    json.dump(weight_names, f)
-
-        with open(weight_names_file, "r") as f:
-            weight_names = json.load(f)
-
-        for name in weight_names:
-            param_path = os.path.join(np_folder, name)
-            with open(param_path, "rb") as f:
-                param = np.load(f)
-            yield name, torch.from_numpy(param)
-    else:
-        if len(hf_bin_files) > 0:
-            print(F'Load bin files: {hf_bin_files}')
-            for bin_file in hf_bin_files:
-                state = torch.load(bin_file, map_location="cpu")
-                for name, param in state.items():
-                    yield name, param
-                del state
-                torch.cuda.empty_cache()
-        elif len(hf_safetensors_files) > 0:
-            print(F'Load safetensor files: {hf_safetensors_files}')
-            from safetensors.torch import load_file
-            for safe_file in hf_safetensors_files:
-                # state = torch.load(bin_file, map_location="cpu")
-                state = load_file(safe_file)
-                for name, param in state.items():
-                    yield name, param
-                del state
-                torch.cuda.empty_cache()
-        else:
-            raise ValueError(f'no files available either bin or safe')
-
-
-def convert_pyslice_to_tensor(x: Any) -> torch.Tensor:
-    """convert PySafeSlice object from safetensors to torch.Tensor
-
-    PySafeSlice object supports indexing, which is done before loading the
-    actual tensor and can reduce the amount of memory being read into the
-    memory. However, it does not support more advanced functionalities
-    like `.view()` or `.t()`. Therefore, if we need to modify the loaded
-    tensor with these more complicated operators, we need to convert to
-    tensor first.
-    """
-    if not isinstance(x, torch.Tensor):
-        x = x[:]
-    return x
-
-
-def load_padded_tensor_parallel_vocab(
-    param: torch.Tensor,
-    loaded_weight: Any,  # `torch.Tensor` or `PySafeSlice`
-    tensor_model_parallel_rank: int,
-) -> None:
-    shard_size = param.shape[0]
-    start_idx = tensor_model_parallel_rank * shard_size
-    end_idx = (tensor_model_parallel_rank + 1) * shard_size
-    loaded_weight = loaded_weight[start_idx:end_idx]
-    loaded_weight = convert_pyslice_to_tensor(loaded_weight)
-    param[:loaded_weight.shape[0]].copy_(loaded_weight)
-
-
-def llama_load_weights(
-        self,
-        model_name_or_path: str,
-        cache_dir: Optional[str] = None,
-        use_np_cache: bool = False,
-        load_format: str = "auto",
-        revision: Optional[str] = None
-):
-    # if use vllm==0.1.4
-    from vllm.model_executor.weight_utils import (
-        load_tensor_parallel_weights
-    )
-    from vllm.model_executor.parallel_utils.parallel_state import (
-        get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size)
-    tp_size = get_tensor_model_parallel_world_size()
-    tensor_model_parallel_rank = get_tensor_model_parallel_rank()
-
-    q_proj_shard_size = (self.config.hidden_size // tp_size)
-    kv_proj_shard_size = (self.config.hidden_size //
-                            self.config.num_attention_heads *
-                            getattr(self.config, "num_key_value_heads", self.config.num_attention_heads) // tp_size)
-    attention_weight_specs = [
-        # (weight_name, shard_size, offset)
-        ("q_proj", q_proj_shard_size, 0),
-        ("k_proj", kv_proj_shard_size, q_proj_shard_size),
-        ("v_proj", kv_proj_shard_size,
-            q_proj_shard_size + kv_proj_shard_size),
-    ]
-    state_dict = self.state_dict()
-    need_to_load = len(state_dict)
-    loaded = 0
-    iterator = custom_hf_model_weights_iterator(model_name_or_path, cache_dir, use_np_cache)
-
-    for name, loaded_weight in iterator:
-        if "rotary_emb.inv_freq" in name:
-            continue
-
-        if "embed_tokens" in name or "lm_head" in name:
-            param = state_dict[name]
-            # Consider padding in the vocab size.
-            padded_vocab_size = (param.shape[0] * tp_size)
-            # num_extra_rows = padded_vocab_size - self.config.vocab_size
-            num_extra_rows = padded_vocab_size - loaded_weight.size(0)
-            load_size = loaded_weight.size()
-            extra_rows = torch.empty(num_extra_rows,
-                                        loaded_weight.shape[1])
-            extra_rows = extra_rows.to(loaded_weight)
-            loaded_weight = torch.cat([loaded_weight, extra_rows], dim=0)
-            if num_extra_rows > 0:
-                print(f'Add empty to {num_extra_rows} extra row for {name}')
-            print(f'Load: {name} | {padded_vocab_size=} | {self.config.vocab_size=} | {num_extra_rows=} | {param.size()=} | {loaded_weight.size()=} | {load_size=}')
-
-        is_attention_weight = False
-        for weight_name, shard_size, offset in attention_weight_specs:
-            if weight_name not in name or "qkv_proj" in name:
-                continue
-            param = state_dict[name.replace(weight_name, "qkv_proj")]
-
-            loaded_weight = loaded_weight[
-                shard_size * tensor_model_parallel_rank:shard_size *
-                (tensor_model_parallel_rank + 1)]
-            param_slice = param.data[offset:offset + shard_size]
-            assert param_slice.shape == loaded_weight.shape
-
-            param_slice.copy_(loaded_weight)
-            loaded += 1.0 / 3
-            is_attention_weight = True
-            break
-        if is_attention_weight:
-            continue
-            
-        # ! qkv_proj is sharded differently if concatenated into qkv
-        # qkv:      qqqq kkkk vvvv
-        # lweight:  qq0qq1 kk0kk1 vv0vv1
-        # q_shard_size: hidden_size // tp_size = qq
-        # qkv_s0:   qq0_kk0_vv0
-        # qkv_s1:   qq1_kk1_vv1
-        if "qkv_proj" in name:
-            param = state_dict[name]
-            # loaded_weight
-            qsize = self.config.hidden_size
-            kvsize = self.config.hidden_size // self.config.num_attention_heads * getattr(self.config, "num_key_value_heads", self.config.num_attention_heads)
-            q_offsets = (
-                q_proj_shard_size * tensor_model_parallel_rank, 
-                q_proj_shard_size * (tensor_model_parallel_rank + 1)
-            )
-            k_offsets = (
-                qsize + kv_proj_shard_size * tensor_model_parallel_rank, 
-                qsize + kv_proj_shard_size * (tensor_model_parallel_rank + 1)
-            )
-            v_offsets = (
-                qsize + kvsize + kv_proj_shard_size * tensor_model_parallel_rank, 
-                qsize + kvsize + kv_proj_shard_size * (tensor_model_parallel_rank + 1)
-            )
-            _loaded_weight = torch.cat(
-                [
-                    loaded_weight[q_offsets[0]:q_offsets[1]],
-                    loaded_weight[k_offsets[0]:k_offsets[1]],
-                    loaded_weight[v_offsets[0]:v_offsets[1]],
-                ], 0
-            )
-            assert param.shape == _loaded_weight.shape, f'{param.shape=} != {_loaded_weight.shape=}'
-            param.data.copy_(_loaded_weight)
-            loaded += 1.0
-            is_attention_weight = True
-        if is_attention_weight:
-            continue
-
-
-        is_gate_up_weight = False
-        for stride_id, weight_name in enumerate(["gate_proj", "up_proj"]):
-            if weight_name not in name or "gate_up_proj" in name:
-                continue
-            param = state_dict[name.replace(weight_name, "gate_up_proj")]
-            shard_size = param.shape[0] // 2
-            loaded_weight = loaded_weight[
-                shard_size * tensor_model_parallel_rank:shard_size *
-                (tensor_model_parallel_rank + 1)]
-            param_slice = param.data[shard_size * stride_id:shard_size *
-                                        (stride_id + 1)]
-            assert param_slice.shape == loaded_weight.shape
-            param_slice.copy_(loaded_weight)
-            loaded += 1.0 / 2
-            is_gate_up_weight = True
-            break
-        if is_gate_up_weight:
-            continue
-            
-        if "gate_up_proj" in name:
-            param = state_dict[name]
-            shard_size = param.shape[0] // 2
-            intermediate_size = self.config.intermediate_size
-            g_offsets = (
-                shard_size * tensor_model_parallel_rank, 
-                shard_size * (tensor_model_parallel_rank + 1)
-            )
-            u_offsets = (
-                intermediate_size + shard_size * tensor_model_parallel_rank, 
-                intermediate_size + shard_size * (tensor_model_parallel_rank + 1)
-            )
-            _loaded_weight = torch.cat(
-                [
-                    loaded_weight[g_offsets[0]:g_offsets[1]],
-                    loaded_weight[u_offsets[0]:u_offsets[1]],
-                ], 0
-            )
-            assert param.shape == _loaded_weight.shape
-            param.data.copy_(_loaded_weight)
-            loaded += 1.0
-            is_gate_up_weight = True
-        if is_gate_up_weight:
-            continue
-
-
-        param = state_dict[name]
-        load_tensor_parallel_weights(param, loaded_weight, name,
-                                        self._column_parallel_weights,
-                                        self._row_parallel_weights,
-                                        tensor_model_parallel_rank)
-        loaded += 1
-
-    if np.abs(loaded - need_to_load) < 0.01:
-        print(f'WARNING: only {loaded} params loaded out of {need_to_load}')
-    else:
-        print(f'Loaded all {loaded} params loaded out of {need_to_load}')
-
-
-def new_llama_load_weights(
-    self,
-    model_name_or_path: str,
-    cache_dir: Optional[str] = None,
-    load_format: str = "auto",
-    revision: Optional[str] = None
-):
-    # If use newest vllm, not been thoroughly tested yet.
-    from vllm.model_executor.weight_utils import (
-        load_tensor_parallel_weights, hf_model_weights_iterator
-    )
-    from vllm.model_executor.parallel_utils.parallel_state import (
-        get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size)
-    
-    if self.quant_config is None:
-        weight_suffixes = ["weight"]
-    else:
-        weight_suffixes = self.quant_config.get_tp_tensor_names()
-
-    column_parallel_weights: List[str] = []
-    for layer in self._column_parallel_layers:
-        for suffix in weight_suffixes:
-            column_parallel_weights.append(f"{layer}.{suffix}")
-    row_parallel_weights: List[str] = []
-    for layer in self._row_parallel_layers:
-        for suffix in weight_suffixes:
-            row_parallel_weights.append(f"{layer}.{suffix}")
-
-    tp_size = get_tensor_model_parallel_world_size()
-    tp_rank = get_tensor_model_parallel_rank()
-    assert tp_size == 1, f'tensorparallel >=2 not allowed. {tp_size}'
-    q_proj_shard_size = (self.config.hidden_size // tp_size)
-    num_kv_heads_replicas = max(1,
-                                tp_size // self.config.num_key_value_heads)
-    num_kv_heads_per_gpu = max(1,
-                                self.config.num_key_value_heads // tp_size)
-    kv_proj_shard_size = (self.config.hidden_size //
-                            self.config.num_attention_heads *
-                            num_kv_heads_per_gpu)
-    attention_weight_specs = [
-        # (weight_name, shard_size, offset)
-        ("q_proj", q_proj_shard_size, 0),
-        ("k_proj", kv_proj_shard_size, q_proj_shard_size),
-        ("v_proj", kv_proj_shard_size,
-            q_proj_shard_size + kv_proj_shard_size),
-    ]
-    state_dict = self.state_dict()
-    need_to_load = len(state_dict)
-    loaded = 0
-
-    for name, loaded_weight in hf_model_weights_iterator(
-            model_name_or_path, cache_dir, load_format, revision):
-        if "rotary_emb.inv_freq" in name:
-            continue
-
-        is_packed = False
-        is_transposed = False
-        if self.quant_config is not None:
-            is_packed = self.quant_config.is_packed(name)
-            is_transposed = self.quant_config.is_transposed(name)
-        if is_transposed:
-            loaded_weight = convert_pyslice_to_tensor(loaded_weight)
-            loaded_weight = loaded_weight.T
-
-        is_attention_weight = False
-        for weight_name, shard_size, offset in attention_weight_specs:
-            if weight_name not in name or "qkv_proj" in name:
-                continue
-            param = state_dict[name.replace(weight_name, "qkv_proj")]
-            if is_transposed:
-                param = param.T
-
-            if is_packed:
-                shard_size //= self.quant_config.pack_factor
-                offset //= self.quant_config.pack_factor
-
-            if weight_name in ["k_proj", "v_proj"]:
-                shard_id = tp_rank // num_kv_heads_replicas
-            else:
-                shard_id = tp_rank
-            loaded_weight = loaded_weight[shard_size *
-                                            shard_id:shard_size *
-                                            (shard_id + 1)]
-            param_slice = param.data[offset:offset + shard_size]
-            assert param_slice.shape == loaded_weight.shape
-
-            param_slice.copy_(loaded_weight)
-            loaded += 1.0 / 3
-            is_attention_weight = True
-            break
-        if is_attention_weight:
-            continue
-        
-        # TODO: need to figure out to do sharding with qkv_proj fused
-
-        is_gate_up_weight = False
-        for stride_id, weight_name in enumerate(["gate_proj", "up_proj"]):
-            if weight_name not in name or "gate_up_proj" in name:
-                continue
-            param = state_dict[name.replace(weight_name, "gate_up_proj")]
-            if is_transposed:
-                param = param.T
-
-            shard_size = param.shape[0] // 2
-            loaded_weight = loaded_weight[shard_size * tp_rank:shard_size *
-                                            (tp_rank + 1)]
-            param_slice = param.data[shard_size * stride_id:shard_size *
-                                        (stride_id + 1)]
-            assert param_slice.shape == loaded_weight.shape
-            param_slice.copy_(loaded_weight)
-            loaded += 1.0 / 2
-            is_gate_up_weight = True
-            break
-        if is_gate_up_weight:
-            continue
-
-        # TODO: need to figure out to do sharding with gate_up_proj fused
-
-        param = state_dict[name]
-        if is_transposed:
-            param = param.T
-
-        if "embed_tokens" in name or "lm_head" in name:
-            load_padded_tensor_parallel_vocab(param, loaded_weight,
-                                                tp_rank)
-            loaded += 1
-            continue
-
-        load_tensor_parallel_weights(param, loaded_weight, name,
-                                        column_parallel_weights,
-                                        row_parallel_weights, tp_rank)
-        loaded += 1
-
-    if np.abs(loaded - need_to_load) < 0.01:
-        print(f'WARNING: only {loaded} params loaded out of {need_to_load}')
-    else:
-        print(f'Loaded all {loaded} params loaded out of {need_to_load}')
-
-
-# Reassign LlamaForCausalLM.load_weights with llama_load_weights
-if not DEBUG:
-    
-    try:
-        import vllm
-        from vllm.model_executor.model_loader import _MODEL_REGISTRY
-        from vllm.model_executor.models import LlamaForCausalLM
-
-        _MODEL_REGISTRY['FasterLlamaForCausalLM'] = LlamaForCausalLM
-        if vllm.__version__ == "0.1.4":
-            LlamaForCausalLM.load_weights = llama_load_weights
-        else:
-            LlamaForCausalLM.load_weights = new_llama_load_weights
-
-        if DTYPE == "bfloat16":
-            try:
-                compute_capability = torch.cuda.get_device_capability()
-                if compute_capability[0] < 8:
-                    gpu_name = torch.cuda.get_device_name()
-                    print(
-                        "Bfloat16 is only supported on GPUs with compute capability "
-                        f"of at least 8.0. Your {gpu_name} GPU has compute capability "
-                        f"{compute_capability[0]}.{compute_capability[1]}. --> Move to FLOAT16")
-                    DTYPE = "float16"
-            except Exception as e:
-                print(f'Unable to obtain compute_capability: {e}')
-    except Exception as e:
-        print(f'Failing import and reconfigure VLLM: {str(e)}')
-    
-
-# ! ==================================================================
-
-set_documentation_group("component")
-
-
-RES_PRINTED = False
-
-def llama_chat_sys_input_seq_constructor(text, sys_prompt=SYSTEM_PROMPT_1, bos_token=BOS_TOKEN, eos_token=EOS_TOKEN):
-    return f"{bos_token}{B_INST} {B_SYS} {sys_prompt} {E_SYS} {text} {E_INST}"
-
-
-def llama_chat_multiturn_sys_input_seq_constructor(
-    message: str,
-    history: List[Tuple[str, str]], 
-    sys_prompt=SYSTEM_PROMPT_1, 
-    bos_token=BOS_TOKEN, 
-    eos_token=EOS_TOKEN,
-):
-    """
-    ```
-        <bos>[INST] B_SYS SytemPrompt E_SYS Prompt [/INST] Answer <eos>
-        <bos>[INST] Prompt [/INST] Answer <eos>
-        <bos>[INST] Prompt [/INST]
-    ```
-    """
-    text = ''
-    for i, (prompt, res) in enumerate(history):
-        if i == 0:
-            text += f"{bos_token}{B_INST} {B_SYS} {sys_prompt} {E_SYS} {prompt} {E_INST}"
-        else:
-            text += f"{bos_token}{B_INST} {prompt} {E_INST}"
-
-        if res is not None:
-            text += f" {res} {eos_token} "
-    if len(history) == 0 or text.strip() == '':
-        text = f"{bos_token}{B_INST} {B_SYS} {sys_prompt} {E_SYS} {message} {E_INST}"
-    else:
-        text += f"{bos_token}{B_INST} {message} {E_INST}"
-    return text
-
-
-@document()
-class ChatBot(gr.Chatbot):
-    def _postprocess_chat_messages(
-        self, chat_message
-    ):
-        x = super()._postprocess_chat_messages(chat_message)
-        # if isinstance(x, str):
-        #     x = x.strip().replace("\n", "<br>")
-        return x
-
-
-from gradio.components import Button
-from gradio.events import Dependency, EventListenerMethod
-
-# replace events so that submit button is disabled during generation, if stop_btn not found
-# this prevent weird behavior
-def _setup_stop_events(
-    self, event_triggers: list[EventListenerMethod], event_to_cancel: Dependency
-) -> None:
-    from gradio.components import State
-    event_triggers = event_triggers if isinstance(event_triggers, (list, tuple)) else [event_triggers]
-    if self.stop_btn and self.is_generator:
-        if self.submit_btn:
-            for event_trigger in event_triggers:
-                event_trigger(
-                    lambda: (
-                        Button.update(visible=False),
-                        Button.update(visible=True),
-                    ),
-                    None,
-                    [self.submit_btn, self.stop_btn],
-                    api_name=False,
-                    queue=False,
-                )
-            event_to_cancel.then(
-                lambda: (Button.update(visible=True), Button.update(visible=False)),
-                None,
-                [self.submit_btn, self.stop_btn],
-                api_name=False,
-                queue=False,
-            )
-        else:
-            for event_trigger in event_triggers:
-                event_trigger(
-                    lambda: Button.update(visible=True),
-                    None,
-                    [self.stop_btn],
-                    api_name=False,
-                    queue=False,
-                )
-            event_to_cancel.then(
-                lambda: Button.update(visible=False),
-                None,
-                [self.stop_btn],
-                api_name=False,
-                queue=False,
-            )
-        self.stop_btn.click(
-            None,
-            None,
-            None,
-            cancels=event_to_cancel,
-            api_name=False,
-        )
-    else:
-        if self.submit_btn:
-            for event_trigger in event_triggers:
-                event_trigger(
-                    lambda: Button.update(interactive=False),
-                    None,
-                    [self.submit_btn],
-                    api_name=False,
-                    queue=False,
-                )
-            event_to_cancel.then(
-                lambda: Button.update(interactive=True),
-                None,
-                [self.submit_btn],
-                api_name=False,
-                queue=False,
-            )
-    # upon clear, cancel the submit event as well
-    if self.clear_btn:
-        self.clear_btn.click(
-            lambda: ([], [], None, Button.update(interactive=True)),
-            None,
-            [self.chatbot, self.chatbot_state, self.saved_input, self.submit_btn],
-            queue=False,
-            api_name=False,
-            cancels=event_to_cancel,
-        )
-
-# TODO: reconfigure clear button as stop and clear button
-def _setup_events(self) -> None:
-    from gradio.components import State
-    has_on = False
-    try:
-        from gradio.events import Dependency, EventListenerMethod, on
-        has_on = True
-    except ImportError as ie:
-        has_on = False
-    submit_fn = self._stream_fn if self.is_generator else self._submit_fn
-
-    def update_time(c_time, chatbot_state):
-        # if chatbot_state is empty, register a new conversaion with the current timestamp
-        assert len(chatbot_state) > 0, f'empty chatbot state'
-        if len(chatbot_state) == 1:
-            assert chatbot_state[-1][-1] is None, f'invalid [[message, None]] , got {chatbot_state}'
-            return gr.Number(value=time.time(), label='current_time', visible=False), chatbot_state
-        else:
-            return c_time, chatbot_state
-
-    if has_on:
-        # new version
-        submit_triggers = (
-            [self.textbox.submit, self.submit_btn.click]
-            if self.submit_btn
-            else [self.textbox.submit]
-        )
-        submit_event = (
-            on(
-                submit_triggers,
-                self._clear_and_save_textbox,
-                [self.textbox],
-                [self.textbox, self.saved_input],
-                api_name=False,
-                queue=False,
-            )
-            .then(
-                self._display_input,
-                [self.saved_input, self.chatbot_state],
-                [self.chatbot, self.chatbot_state],
-                api_name=False,
-                queue=False,
-            )
-            .then(
-                update_time,
-                [self.additional_inputs[-1], self.chatbot_state],
-                [self.additional_inputs[-1], self.chatbot_state],
-                api_name=False,
-                queue=False,
-            )
-            .then(
-                submit_fn,
-                [self.saved_input, self.chatbot_state] + self.additional_inputs,
-                [self.chatbot, self.chatbot_state],
-                api_name=False,
-            )
-        )
-        self._setup_stop_events(submit_triggers, submit_event)
-    else:
-        raise ValueError(f'Better install new gradio version than 3.44.0')
-
-    if self.retry_btn:
-        retry_event = (
-            self.retry_btn.click(
-                self._delete_prev_fn,
-                [self.chatbot_state],
-                [self.chatbot, self.saved_input, self.chatbot_state],
-                api_name=False,
-                queue=False,
-            )
-            .then(
-                self._display_input,
-                [self.saved_input, self.chatbot_state],
-                [self.chatbot, self.chatbot_state],
-                api_name=False,
-                queue=False,
-            )
-            .then(
-                submit_fn,
-                [self.saved_input, self.chatbot_state] + self.additional_inputs,
-                [self.chatbot, self.chatbot_state],
-                api_name=False,
-            )
-        )
-        self._setup_stop_events([self.retry_btn.click], retry_event)
-
-    if self.undo_btn:
-        self.undo_btn.click(
-            self._delete_prev_fn,
-            [self.chatbot_state],
-            [self.chatbot, self.saved_input, self.chatbot_state],
-            api_name=False,
-            queue=False,
-        ).then(
-            lambda x: x,
-            [self.saved_input],
-            [self.textbox],
-            api_name=False,
-            queue=False,
-        )
-
-    # Reconfigure clear_btn to stop and clear text box
-    # if self.clear_btn:
-    #     self.clear_btn.click(
-    #         lambda: ([], [], None),
-    #         None,
-    #         [self.chatbot, self.chatbot_state, self.saved_input],
-    #         queue=False,
-    #         api_name=False,
-    #         cancels=submit_event,
-    #     )
-
-
-# replace
-gr.ChatInterface._setup_stop_events = _setup_stop_events
-gr.ChatInterface._setup_events = _setup_events
-
-
-def vllm_abort(self: Any):
-    from vllm.sequence import SequenceStatus
-    scheduler = self.llm_engine.scheduler
-    for state_queue in [scheduler.waiting, scheduler.running, scheduler.swapped]:
-        for seq_group in state_queue:
-            # if seq_group.request_id == request_id:
-            # Remove the sequence group from the state queue.
-            state_queue.remove(seq_group)
-            for seq in seq_group.seqs:
-                if seq.is_finished():
-                    continue
-                scheduler.free_seq(seq, SequenceStatus.FINISHED_ABORTED)
-
-
-def _vllm_run_engine(self: Any, use_tqdm: bool = False) -> Dict[str, Any]:
-    from vllm.outputs import RequestOutput
-    # Initialize tqdm.
-    if use_tqdm:
-        num_requests = self.llm_engine.get_num_unfinished_requests()
-        pbar = tqdm(total=num_requests, desc="Processed prompts")
-    # Run the engine.
-    outputs: Dict[str, RequestOutput] = {}
-    while self.llm_engine.has_unfinished_requests():
-        step_outputs = self.llm_engine.step()
-        for output in step_outputs:
-            outputs[output.request_id] = output
-        if len(outputs) > 0:
-            yield outputs
-
-
-
-def vllm_generate_stream(
-    self: Any,
-    prompts: Optional[Union[str, List[str]]] = None,
-    sampling_params: Optional[Any] = None,
-    prompt_token_ids: Optional[List[List[int]]] = None,
-    use_tqdm: bool = False,
-) -> Dict[str, Any]:
-    """Generates the completions for the input prompts.
-
-    NOTE: This class automatically batches the given prompts, considering
-    the memory constraint. For the best performance, put all of your prompts
-    into a single list and pass it to this method.
-
-    Args:
-        prompts: A list of prompts to generate completions for.
-        sampling_params: The sampling parameters for text generation. If
-            None, we use the default sampling parameters.
-        prompt_token_ids: A list of token IDs for the prompts. If None, we
-            use the tokenizer to convert the prompts to token IDs.
-        use_tqdm: Whether to use tqdm to display the progress bar.
-
-    Returns:
-        A list of `RequestOutput` objects containing the generated
-        completions in the same order as the input prompts.
-    """
-    from vllm import LLM, SamplingParams
-    if prompts is None and prompt_token_ids is None:
-        raise ValueError("Either prompts or prompt_token_ids must be "
-                            "provided.")
-    if isinstance(prompts, str):
-        # Convert a single prompt to a list.
-        prompts = [prompts]
-    if prompts is not None and prompt_token_ids is not None:
-        if len(prompts) != len(prompt_token_ids):
-            raise ValueError("The lengths of prompts and prompt_token_ids "
-                                "must be the same.")
-    if sampling_params is None:
-        # Use default sampling params.
-        sampling_params = SamplingParams()
-
-    # Add requests to the engine.
-    if prompts is not None:
-        num_requests = len(prompts)
-    else:
-        num_requests = len(prompt_token_ids)
-    for i in range(num_requests):
-        prompt = prompts[i] if prompts is not None else None
-        if prompt_token_ids is None:
-            token_ids = None
-        else:
-            token_ids = prompt_token_ids[i]
-        self._add_request(prompt, sampling_params, token_ids)
-    # return self._run_engine(use_tqdm)
-    yield from _vllm_run_engine(self, use_tqdm)
-
-
-
-# ! avoid saying 
-LANG_BLOCK_MESSAGE = """Sorry, the language you have asked is currently not supported. If you have questions in other supported languages, I'll be glad to help. \
-Please also consider clearing the chat box for a better experience."""
-
-KEYWORD_BLOCK_MESSAGE = "Sorry, I cannot fulfill your request. If you have any unrelated question, I'll be glad to help."
-
-
-def _detect_lang(text):
-    # Disable language that may have safety risk
-    from langdetect import detect as detect_lang
-    dlang = None
-    try:
-        dlang = detect_lang(text)
-    except Exception as e:
-        print(f'Error: {e}')
-        if "No features in text." in str(e):
-            return "en"
-        else:
-            return "zh"
-    return dlang
-
-
-def block_lang(
-    message: str, 
-    history: List[Tuple[str, str]] = None,
-) -> str:
-    # relieve history base block
-    if len(BLOCK_LANGS) == 0:
-        return False
-    
-    if LANG_BLOCK_HISTORY and history is not None and any((LANG_BLOCK_MESSAGE in x[1].strip()) for x in history):
-        return True
-    else:
-        _lang = _detect_lang(message)
-        if _lang in BLOCK_LANGS:
-            print(f'Detect blocked {_lang}: {message}')
-            return True
-        else:
-            return False
-
-
-def safety_check(text, history=None, ) -> Optional[str]:
-    """
-    Despite our effort in safety tuning and red teaming, our models may still generate harmful or illegal content.
-    This provides an additional security measure to enhance safety and compliance with local regulations.
-    """
-    if len(KEYWORDS) > 0 and any(x in text.lower() for x in KEYWORDS):
-        return KEYWORD_BLOCK_MESSAGE
-    
-    if len(BLOCK_LANGS) > 0:
-        if block_lang(text, history):
-            return LANG_BLOCK_MESSAGE
-
-    return None
-
-
-def chat_response_stream_multiturn(
-    message: str, 
-    history: List[Tuple[str, str]], 
-    temperature: float, 
-    max_tokens: int, 
-    frequency_penalty: float,
-    current_time: Optional[float] = None,
-    system_prompt: Optional[str] = SYSTEM_PROMPT_1
-) -> str:
-    global LOG_FILE, LOG_PATH
-    from vllm import LLM, SamplingParams
-    """Build multi turn
-    <bos>[INST] B_SYS SytemPrompt E_SYS Prompt [/INST] Answer <eos>
-    <bos>[INST] Prompt [/INST] Answer <eos>
-    <bos>[INST] Prompt [/INST]
-
-    message is incoming prompt
-    history don't have the current messauge
-    """
-    global llm, RES_PRINTED
-    assert llm is not None
-    assert system_prompt.strip() != '', f'system prompt is empty'
-    tokenizer = llm.get_tokenizer()
-    # force removing all 
-    vllm_abort(llm)
-
-    temperature = float(temperature)
-    frequency_penalty = float(frequency_penalty)
-    max_tokens = int(max_tokens)
-
-    message = message.strip()
-
-    if message.strip() == GET_LOG_CMD:
-        print_log_file()
-        yield "Finish printed log. Please clear the chatbox now."
-        return 
-
-    if len(message) == 0:
-        raise gr.Error("The message cannot be empty!")
-
-    message_safety = safety_check(message, history=history)
-    if message_safety is not None:
-        yield message_safety
-        return
-
-    # history will be appended with message later on
-    full_prompt = llama_chat_multiturn_sys_input_seq_constructor(
-        message, history, sys_prompt=system_prompt
-    )
-
-    if len(tokenizer.encode(full_prompt, add_special_tokens=False)) >= 4050:
-        raise gr.Error(f"Conversation or prompt is too long, please clear the chatbox or try shorter input.")
-
-    sampling_params = SamplingParams(
-        temperature=temperature, 
-        max_tokens=max_tokens,
-        frequency_penalty=frequency_penalty,
-        stop=['<s>', '</s>', '<<SYS>>', '<</SYS>>', '[INST]', '[/INST]']
-    )
-    cur_out = None
-
-    for j, gen in enumerate(vllm_generate_stream(llm, full_prompt, sampling_params)):
-        if cur_out is not None and (STREAM_YIELD_MULTIPLE < 1 or j % STREAM_YIELD_MULTIPLE == 0) and j > 0:
-            cur_out = cur_out.replace("\\n", "\n")
-            
-            # optionally check safety, and respond
-            if STREAM_CHECK_MULTIPLE > 0 and j % STREAM_CHECK_MULTIPLE == 0:
-                message_safety = safety_check(cur_out, history=None)
-                if message_safety is not None:
-                    yield message_safety
-                    return
-
-            yield cur_out
-        assert len(gen) == 1, f'{gen}'
-        item = next(iter(gen.values()))
-        cur_out = item.outputs[0].text
-    
-    # TODO: use current_time to register conversations, accoriding history and cur_out
-    history_str = format_conversation(history + [[message, cur_out]])
-    print(f'@@@@@@@@@@\n{history_str}\n##########\n')
-
-    maybe_log_conv_file(current_time, history, message, cur_out, temperature=temperature, frequency_penalty=frequency_penalty)
-    
-    if cur_out is not None and "\\n" in cur_out:
-        print(f'double slash-n in cur_out:\n{cur_out}')
-        cur_out = cur_out.replace("\\n", "\n")
-
-    if cur_out is not None:
-        yield cur_out
-    
-    message_safety = safety_check(cur_out, history=None)
-    if message_safety is not None:
-        yield message_safety
-        return
-
-
-def maybe_log_conv_file(current_time, history, message, response, **kwargs):
-    global LOG_FILE
-    if LOG_FILE is not None:
-        my_history = history + [[message, response]]
-        obj = {
-            'key': str(current_time),
-            'history': my_history
-        }
-        for k, v in kwargs.items():
-            obj[k] = v
-        log_ = json.dumps(obj, ensure_ascii=False)
-        LOG_FILE.write(log_ + "\n")
-        LOG_FILE.flush()
-        print(f'Wrote {obj["key"]} to {LOG_PATH}')
-
-
-def format_conversation(history):
-    _str = '\n'.join([
-        (
-            f'<<<User>>> {h[0]}\n'
-            f'<<<Asst>>> {h[1]}'
-        )
-        for h in history
-    ])
-    return _str
-
-
-def print_log_file():
-    global LOG_FILE, LOG_PATH
-    if SAVE_LOGS and os.path.exists(LOG_PATH):
-        with open(LOG_PATH, 'r', encoding='utf-8') as f:
-            convos = {}
-            for l in f:
-                if l:
-                    item = json.loads(l)
-                    convos[item['key']] = item
-            print(f'Printing log from {LOG_PATH}')
-            for k, v in convos.items():
-                history = v.pop('history')
-                print(f'######--{v}--##')
-                _str = format_conversation(history)
-                print(_str)
-
-
-def debug_chat_response_echo(
-    message: str, 
-    history: List[Tuple[str, str]], 
-    temperature: float = 0.0, 
-    max_tokens: int = 4096, 
-    frequency_penalty: float = 0.4,
-    current_time: Optional[float] = None,
-    system_prompt: str = SYSTEM_PROMPT_1,
-) -> str:
-    global LOG_FILE
-    import time
-    time.sleep(0.5)
-
-    if message.strip() == GET_LOG_CMD:
-        print_log_file()
-        yield "Finish printed log."
-        return 
-
-    for i in range(len(message)):
-        yield f"repeat: {current_time} {message[:i + 1]}"
-    
-    cur_out = f"repeat: {current_time} {message}"
-    maybe_log_conv_file(current_time, history, message, cur_out, temperature=temperature, frequency_penalty=frequency_penalty)
-
-
-def check_model_path(model_path) -> str:
-    assert os.path.exists(model_path), f'{model_path} not found'
-    ckpt_info = "None"
-    if os.path.isdir(model_path):
-        if os.path.exists(f'{model_path}/info.txt'):
-            with open(f'{model_path}/info.txt', 'r') as f:
-                ckpt_info = f.read()
-                print(f'Checkpoint info:\n{ckpt_info}\n-----')
-        else:
-            print(f'info.txt not found in {model_path}')
-        print(f'model path dir: {list(os.listdir(model_path))}')
-    
-    return ckpt_info
-
-
-def maybe_delete_folder():
-    if IS_DELETE_FOLDER and DOWNLOAD_SNAPSHOT:
-        import shutil
-        print(f'DELETE ALL FILES IN {DELETE_FOLDER}')
-        for filename in os.listdir(DELETE_FOLDER):
-            file_path = os.path.join(DELETE_FOLDER, filename)
-            try:
-                if os.path.isfile(file_path) or os.path.islink(file_path):
-                    os.unlink(file_path)
-                elif os.path.isdir(file_path):
-                    shutil.rmtree(file_path)
-            except Exception as e:
-                print('Failed to delete %s. Reason: %s' % (file_path, e))
-
-AGREE_POP_SCRIPTS = """
-async () => {
-    alert("To use our service, you are required to agree to the following terms:\\nYou must not use our service to generate any harmful, unethical or illegal content that violates local and international laws, including but not limited to hate speech, violence and deception.\\nThe service may collect user dialogue data for performance improvement, and reserves the right to distribute it under CC-BY or similar license. So do not enter any personal information!");
-}
-"""
-
-def launch():
-    global demo, llm, DEBUG, LOG_FILE
-    model_desc = MODEL_DESC
-    model_path = MODEL_PATH
-    model_title = MODEL_TITLE
-    hf_model_name = HF_MODEL_NAME
-    tensor_parallel = TENSOR_PARALLEL
-    assert tensor_parallel > 0 , f'{tensor_parallel} invalid'
-    dtype = DTYPE
-    sys_prompt = SYSTEM_PROMPT_1
-    max_tokens = MAX_TOKENS
-    temperature = TEMPERATURE
-    frequence_penalty = FREQUENCE_PENALTY
-    ckpt_info = "None"
-
-    print(
-        f'Launch config: '
-        f'\n| model_title=`{model_title}` '
-        f'\n| max_tokens={max_tokens} '
-        f'\n| dtype={dtype} '
-        f'\n| tensor_parallel={tensor_parallel} '
-        f'\n| BLOCK_LANGS={BLOCK_LANGS} '
-        f'\n| IS_DELETE_FOLDER={IS_DELETE_FOLDER} '
-        f'\n| STREAM_YIELD_MULTIPLE={STREAM_YIELD_MULTIPLE} '
-        f'\n| STREAM_CHECK_MULTIPLE={STREAM_CHECK_MULTIPLE} '
-        f'\n| DISPLAY_MODEL_PATH={DISPLAY_MODEL_PATH} '
-        f'\n| LANG_BLOCK_HISTORY={LANG_BLOCK_HISTORY} '
-        f'\n| frequence_penalty={frequence_penalty} '
-        f'\n| temperature={temperature} '
-        f'\n| hf_model_name={hf_model_name} '
-        f'\n| model_path={model_path} '
-        f'\n| DOWNLOAD_SNAPSHOT={DOWNLOAD_SNAPSHOT} '
-        f'\n| gpu_memory_utilization={gpu_memory_utilization} '
-        f'\n| KEYWORDS={KEYWORDS} '
-        f'\n| LOG_PATH={LOG_PATH} | SAVE_LOGS={SAVE_LOGS} '
-        f'\n| GET_LOG_CMD={GET_LOG_CMD} '
-        f'\n| Sys={SYSTEM_PROMPT_1}'
-        f'\n| Desc={model_desc}'
-    )
-
-    if DEBUG:
-        model_desc += "\n<br>!!!!! This is in debug mode, responses will copy original"
-        response_fn = debug_chat_response_echo
-        print(f'Creating in DEBUG MODE')
-        if SAVE_LOGS:
-            LOG_FILE = open(LOG_PATH, 'a', encoding='utf-8')
-    else:
-        # ! load the model
-        maybe_delete_folder()
-
-        if DOWNLOAD_SNAPSHOT:
-            print(f'Downloading from HF_MODEL_NAME={hf_model_name} -> {model_path}')
-            if HF_TOKEN is not None:
-                print(f'Load with HF_TOKEN: {HF_TOKEN}')
-                snapshot_download(hf_model_name, local_dir=model_path, use_auth_token=True, token=HF_TOKEN)
-            else:
-                snapshot_download(hf_model_name, local_dir=model_path)
-
-        import vllm
-        from vllm import LLM
-
-        print(F'VLLM: {vllm.__version__}')
-        ckpt_info = check_model_path(model_path)
-
-        print(f'Load path: {model_path} | {ckpt_info}')
-
-        if QUANTIZATION == 'awq':
-            print(F'Load model in int4 quantization')
-            llm = LLM(model=model_path, dtype=dtype, tensor_parallel_size=tensor_parallel, gpu_memory_utilization=gpu_memory_utilization, quantization="awq")
-        else:
-            llm = LLM(model=model_path, dtype=dtype, tensor_parallel_size=tensor_parallel, gpu_memory_utilization=gpu_memory_utilization)
-
-        try:
-            print(llm.llm_engine.workers[0].model)
-        except Exception as e:
-            print(f'Cannot print model worker: {e}')
-
-        try:
-            llm.llm_engine.scheduler_config.max_model_len = 4096
-            llm.llm_engine.scheduler_config.max_num_batched_tokens = 4096
-            llm.llm_engine.tokenizer.add_special_tokens = False
-        except Exception as e:
-            print(f'Cannot set parameters: {e}')
-
-        print(f'Use system prompt:\n{sys_prompt}')
-
-        response_fn = chat_response_stream_multiturn
-        print(F'respond: {response_fn}')
-
-        if SAVE_LOGS:
-            LOG_FILE = open(LOG_PATH, 'a', encoding='utf-8')
-
-    demo = gr.ChatInterface(
-        response_fn,
-        chatbot=ChatBot(
-            label=MODEL_NAME,
-            bubble_full_width=False,
-            latex_delimiters=[
-                { "left": "$", "right": "$", "display": False},
-                { "left": "$$", "right": "$$", "display": True},
-            ],
-            show_copy_button=True,
-        ),
-        textbox=gr.Textbox(placeholder='Type message', lines=8, max_lines=128, min_width=200),
-        submit_btn=gr.Button(value='Submit', variant="primary", scale=0),
-        # ! consider preventing the stop button
-        stop_btn=None,
-        title=f"{model_title}",
-        description=f"{model_desc}",
-        additional_inputs=[
-            gr.Number(value=temperature, label='Temperature (higher -> more random)'), 
-            gr.Number(value=max_tokens, label='Max generated tokens (increase if want more generation)'), 
-            gr.Number(value=frequence_penalty, label='Frequency penalty (> 0 encourage new tokens)'), 
-            gr.Number(value=0, label='current_time', visible=False), 
-            # ! Remove the system prompt textbox to avoid jailbreaking
-            # gr.Textbox(value=sys_prompt, label='System prompt', lines=8)
-        ], 
-    )
-    demo.title = MODEL_NAME
-    with demo:
-        gr.Markdown(cite_markdown)
-        if DISPLAY_MODEL_PATH:
-            gr.Markdown(path_markdown.format(model_path=model_path))
-        
-        if ENABLE_AGREE_POPUP:
-            demo.load(None, None, None, _js=AGREE_POP_SCRIPTS)
-    
-
-    demo.queue()
-    demo.launch(server_port=PORT)
-
-
-def main():
-
-    launch()
-
-
-if __name__ == "__main__":
-    main()
\ No newline at end of file
diff --git a/spaces/Silentlin/DiffSinger/data_gen/tts/txt_processors/zh_g2pM.py b/spaces/Silentlin/DiffSinger/data_gen/tts/txt_processors/zh_g2pM.py
deleted file mode 100644
index a3dbc7f2feb4c1dc3bc2d745a260cd0a7922e140..0000000000000000000000000000000000000000
--- a/spaces/Silentlin/DiffSinger/data_gen/tts/txt_processors/zh_g2pM.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import re
-import jieba
-from pypinyin import pinyin, Style
-from data_gen.tts.data_gen_utils import PUNCS
-from data_gen.tts.txt_processors import zh
-from g2pM import G2pM
-
-ALL_SHENMU = ['zh', 'ch', 'sh', 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'j',
-              'q', 'x', 'r', 'z', 'c', 's', 'y', 'w']
-ALL_YUNMU = ['a', 'ai', 'an', 'ang', 'ao', 'e', 'ei', 'en', 'eng', 'er', 'i', 'ia', 'ian',
-             'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'iu', 'ng', 'o', 'ong', 'ou',
-             'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've', 'vn']
-
-
-class TxtProcessor(zh.TxtProcessor):
-    model = G2pM()
-
-    @staticmethod
-    def sp_phonemes():
-        return ['|', '#']
-
-    @classmethod
-    def process(cls, txt, pre_align_args):
-        txt = cls.preprocess_text(txt)
-        ph_list = cls.model(txt, tone=pre_align_args['use_tone'], char_split=True)
-        seg_list = '#'.join(jieba.cut(txt))
-        assert len(ph_list) == len([s for s in seg_list if s != '#']), (ph_list, seg_list)
-
-        # 加入词边界'#'
-        ph_list_ = []
-        seg_idx = 0
-        for p in ph_list:
-            p = p.replace("u:", "v")
-            if seg_list[seg_idx] == '#':
-                ph_list_.append('#')
-                seg_idx += 1
-            else:
-                ph_list_.append("|")
-            seg_idx += 1
-            if re.findall('[\u4e00-\u9fff]', p):
-                if pre_align_args['use_tone']:
-                    p = pinyin(p, style=Style.TONE3, strict=True)[0][0]
-                    if p[-1] not in ['1', '2', '3', '4', '5']:
-                        p = p + '5'
-                else:
-                    p = pinyin(p, style=Style.NORMAL, strict=True)[0][0]
-
-            finished = False
-            if len([c.isalpha() for c in p]) > 1:
-                for shenmu in ALL_SHENMU:
-                    if p.startswith(shenmu) and not p.lstrip(shenmu).isnumeric():
-                        ph_list_ += [shenmu, p.lstrip(shenmu)]
-                        finished = True
-                        break
-            if not finished:
-                ph_list_.append(p)
-
-        ph_list = ph_list_
-
-        # 去除静音符号周围的词边界标记 [..., '#', ',', '#', ...]
-        sil_phonemes = list(PUNCS) + TxtProcessor.sp_phonemes()
-        ph_list_ = []
-        for i in range(0, len(ph_list), 1):
-            if ph_list[i] != '#' or (ph_list[i - 1] not in sil_phonemes and ph_list[i + 1] not in sil_phonemes):
-                ph_list_.append(ph_list[i])
-        ph_list = ph_list_
-        return ph_list, txt
-
-
-if __name__ == '__main__':
-    phs, txt = TxtProcessor.process('他来到了,网易杭研大厦', {'use_tone': True})
-    print(phs)
diff --git a/spaces/Skyler123/TangGPT/run_macOS.command b/spaces/Skyler123/TangGPT/run_macOS.command
deleted file mode 100644
index 62af07283093d8e580763d7acfe493c3d88e7b08..0000000000000000000000000000000000000000
--- a/spaces/Skyler123/TangGPT/run_macOS.command
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# 获取脚本所在目录
-script_dir=$(dirname "$0")
-
-# 将工作目录更改为脚本所在目录
-cd "$script_dir"
-
-# 检查Git仓库是否有更新
-git remote update
-pwd
-
-if ! git status -uno | grep 'up to date' > /dev/null; then
-	# 如果有更新,关闭当前运行的服务器
-	pkill -f ChuanhuChatbot.py
-
-	# 拉取最新更改
-	git pull
-
-	# 安装依赖
-	pip3 install -r requirements.txt
-
-	# 重新启动服务器
-	nohup python3 ChuanhuChatbot.py &
-fi
diff --git a/spaces/SuYuanS/AudioCraft_Plus/docs/CONDITIONING.md b/spaces/SuYuanS/AudioCraft_Plus/docs/CONDITIONING.md
deleted file mode 100644
index 6e356cb8e9912d3e18fc84598c1acf77c6e7abc5..0000000000000000000000000000000000000000
--- a/spaces/SuYuanS/AudioCraft_Plus/docs/CONDITIONING.md
+++ /dev/null
@@ -1,146 +0,0 @@
-# AudioCraft conditioning modules
-
-AudioCraft provides a
-[modular implementation of conditioning modules](../audiocraft/modules/conditioners.py)
-that can be used with the language model to condition the generation.
-The codebase was developed in order to easily extend the set of modules
-currently supported to easily develop new ways of controlling the generation.
-
-
-## Conditioning methods
-
-For now, we support 3 main types of conditioning within AudioCraft:
-* Text-based conditioning methods
-* Waveform-based conditioning methods
-* Joint embedding conditioning methods for text and audio projected in a shared latent space.
-
-The Language Model relies on 2 core components that handle processing information:
-* The `ConditionProvider` class, that maps metadata to processed conditions leveraging
-all the defined conditioners for the given task.
-* The `ConditionFuser` class, that takes preprocessed conditions and properly fuse the
-conditioning embedding to the language model inputs following a given fusing strategy.
-
-Different conditioners (for text, waveform, joint embeddings...) are provided as torch
-modules in AudioCraft and are used internally in the language model to process the
-conditioning signals and feed them to the language model.
-
-
-## Core concepts
-
-### Conditioners
-
-The `BaseConditioner` torch module is the base implementation for all conditioners in audiocraft.
-
-Each conditioner is expected to implement 2 methods:
-* The `tokenize` method that is used as a preprocessing method that contains all processing
-that can lead to synchronization points (e.g. BPE tokenization with transfer to the GPU).
-The output of the tokenize method will then be used to feed the forward method.
-* The `forward` method that takes the output of the tokenize method and contains the core computation
-to obtain the conditioning embedding along with a mask indicating valid indices (e.g. padding tokens).
-
-### ConditionProvider
-
-The ConditionProvider prepares and provides conditions given a dictionary of conditioners.
-
-Conditioners are specified as a dictionary of attributes and the corresponding conditioner
-providing the processing logic for the given attribute.
-
-Similarly to the conditioners, the condition provider works in two steps to avoid sychronization points:
-* A `tokenize` method that takes a list of conditioning attributes for the batch,
-and run all tokenize steps for the set of conditioners.
-* A `forward` method that takes the output of the tokenize step and run all the forward steps
-for the set of conditioners.
-
-The list of conditioning attributes is passed as a list of `ConditioningAttributes`
-that is presented just below.
-
-### ConditionFuser
-
-Once all conditioning signals have been extracted and processed by the `ConditionProvider`
-as dense embeddings, they remain to be passed to the language model along with the original
-language model inputs.
-
-The `ConditionFuser` handles specifically the logic to combine the different conditions
-to the actual model input, supporting different strategies to combine them.
-
-One can therefore define different strategies to combine or fuse the condition to the input, in particular:
-* Prepending the conditioning signal to the input with the `prepend` strategy,
-* Summing the conditioning signal to the input with the `sum` strategy,
-* Combining the conditioning relying on a cross-attention mechanism with the `cross` strategy,
-* Using input interpolation with the `input_interpolate` strategy.
-
-### SegmentWithAttributes and ConditioningAttributes: From metadata to conditions
-
-The `ConditioningAttributes` dataclass is the base class for metadata
-containing all attributes used for conditioning the language model.
-
-It currently supports the following types of attributes:
-* Text conditioning attributes: Dictionary of textual attributes used for text-conditioning.
-* Wav conditioning attributes: Dictionary of waveform attributes used for waveform-based
-conditioning such as the chroma conditioning.
-* JointEmbed conditioning attributes: Dictionary of text and waveform attributes
-that are expected to be represented in a shared latent space.
-
-These different types of attributes are the attributes that are processed
-by the different conditioners.
-
-`ConditioningAttributes` are extracted from metadata loaded along the audio in the datasets,
-provided that the metadata used by the dataset implements the `SegmentWithAttributes` abstraction.
-
-All metadata-enabled datasets to use for conditioning in AudioCraft inherits
-the [`audiocraft.data.info_dataset.InfoAudioDataset`](../audiocraft/data/info_audio_dataset.py) class
-and the corresponding metadata inherits and implements the `SegmentWithAttributes` abstraction.
-Refer to the [`audiocraft.data.music_dataset.MusicAudioDataset`](../audiocraft/data/music_dataset.py)
-class as an example.
-
-
-## Available conditioners
-
-### Text conditioners
-
-All text conditioners are expected to inherit from the `TextConditioner` class.
-
-AudioCraft currently provides two text conditioners:
-* The `LUTConditioner` that relies on look-up-table of embeddings learned at train time,
-and relying on either no tokenizer or a spacy tokenizer. This conditioner is particularly
-useful for simple experiments and categorical labels.
-* The `T5Conditioner` that relies on a
-[pre-trained T5 model](https://huggingface.co/docs/transformers/model_doc/t5)
-frozen or fine-tuned at train time to extract the text embeddings.
-
-### Waveform conditioners
-
-All waveform conditioners are expected to inherit from the `WaveformConditioner` class and
-consists of conditioning method that takes a waveform as input. The waveform conditioner
-must implement the logic to extract the embedding from the waveform and define the downsampling
-factor from the waveform to the resulting embedding.
-
-The `ChromaStemConditioner` conditioner is a waveform conditioner for the chroma features
-conditioning used by MusicGen. It takes a given waveform, extract relevant stems for melody
-(namely all non drums and bass stems) using a
-[pre-trained Demucs model](https://github.com/facebookresearch/demucs)
-and then extract the chromagram bins from the remaining mix of stems.
-
-### Joint embeddings conditioners
-
-We finally provide support for conditioning based on joint text and audio embeddings through
-the `JointEmbeddingConditioner` class and the `CLAPEmbeddingConditioner` that implements such
-a conditioning method relying on a [pretrained CLAP model](https://github.com/LAION-AI/CLAP).
-
-## Classifier Free Guidance
-
-We provide a Classifier Free Guidance implementation in AudioCraft. With the classifier free
-guidance dropout, all attributes are dropped with the same probability.
-
-## Attribute Dropout
-
-We further provide an attribute dropout strategy. Unlike the classifier free guidance dropout,
-the attribute dropout drops given attributes with a defined probability, allowing the model
-not to expect all conditioning signals to be provided at once.
-
-## Faster computation of conditions
-
-Conditioners that require some heavy computation on the waveform can be cached, in particular
-the `ChromaStemConditioner` or `CLAPEmbeddingConditioner`. You just need to provide the
-`cache_path` parameter to them. We recommend running dummy jobs for filling up the cache quickly.
-An example is provied in the [musicgen.musicgen_melody_32khz grid](../audiocraft/grids/musicgen/musicgen_melody_32khz.py).
\ No newline at end of file
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/latex_symbols.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/latex_symbols.py
deleted file mode 100644
index 164d917beb64cb0bd44c67a3391a3ab051ca7609..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/latex_symbols.py
+++ /dev/null
@@ -1,1301 +0,0 @@
-# encoding: utf-8
-
-# DO NOT EDIT THIS FILE BY HAND.
-
-# To update this file, run the script /tools/gen_latex_symbols.py using Python 3
-
-# This file is autogenerated from the file:
-# https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl
-# This original list is filtered to remove any unicode characters that are not valid
-# Python identifiers.
-
-latex_symbols = {
-
-    "\\euler" : "ℯ",
-    "\\^a" : "ᵃ",
-    "\\^b" : "ᵇ",
-    "\\^c" : "ᶜ",
-    "\\^d" : "ᵈ",
-    "\\^e" : "ᵉ",
-    "\\^f" : "ᶠ",
-    "\\^g" : "ᵍ",
-    "\\^h" : "ʰ",
-    "\\^i" : "ⁱ",
-    "\\^j" : "ʲ",
-    "\\^k" : "ᵏ",
-    "\\^l" : "ˡ",
-    "\\^m" : "ᵐ",
-    "\\^n" : "ⁿ",
-    "\\^o" : "ᵒ",
-    "\\^p" : "ᵖ",
-    "\\^r" : "ʳ",
-    "\\^s" : "ˢ",
-    "\\^t" : "ᵗ",
-    "\\^u" : "ᵘ",
-    "\\^v" : "ᵛ",
-    "\\^w" : "ʷ",
-    "\\^x" : "ˣ",
-    "\\^y" : "ʸ",
-    "\\^z" : "ᶻ",
-    "\\^A" : "ᴬ",
-    "\\^B" : "ᴮ",
-    "\\^D" : "ᴰ",
-    "\\^E" : "ᴱ",
-    "\\^G" : "ᴳ",
-    "\\^H" : "ᴴ",
-    "\\^I" : "ᴵ",
-    "\\^J" : "ᴶ",
-    "\\^K" : "ᴷ",
-    "\\^L" : "ᴸ",
-    "\\^M" : "ᴹ",
-    "\\^N" : "ᴺ",
-    "\\^O" : "ᴼ",
-    "\\^P" : "ᴾ",
-    "\\^R" : "ᴿ",
-    "\\^T" : "ᵀ",
-    "\\^U" : "ᵁ",
-    "\\^V" : "ⱽ",
-    "\\^W" : "ᵂ",
-    "\\^alpha" : "ᵅ",
-    "\\^beta" : "ᵝ",
-    "\\^gamma" : "ᵞ",
-    "\\^delta" : "ᵟ",
-    "\\^epsilon" : "ᵋ",
-    "\\^theta" : "ᶿ",
-    "\\^iota" : "ᶥ",
-    "\\^phi" : "ᵠ",
-    "\\^chi" : "ᵡ",
-    "\\^Phi" : "ᶲ",
-    "\\_a" : "ₐ",
-    "\\_e" : "ₑ",
-    "\\_h" : "ₕ",
-    "\\_i" : "ᵢ",
-    "\\_j" : "ⱼ",
-    "\\_k" : "ₖ",
-    "\\_l" : "ₗ",
-    "\\_m" : "ₘ",
-    "\\_n" : "ₙ",
-    "\\_o" : "ₒ",
-    "\\_p" : "ₚ",
-    "\\_r" : "ᵣ",
-    "\\_s" : "ₛ",
-    "\\_t" : "ₜ",
-    "\\_u" : "ᵤ",
-    "\\_v" : "ᵥ",
-    "\\_x" : "ₓ",
-    "\\_schwa" : "ₔ",
-    "\\_beta" : "ᵦ",
-    "\\_gamma" : "ᵧ",
-    "\\_rho" : "ᵨ",
-    "\\_phi" : "ᵩ",
-    "\\_chi" : "ᵪ",
-    "\\hbar" : "ħ",
-    "\\sout" : "̶",
-    "\\ordfeminine" : "ª",
-    "\\cdotp" : "·",
-    "\\ordmasculine" : "º",
-    "\\AA" : "Å",
-    "\\AE" : "Æ",
-    "\\DH" : "Ð",
-    "\\O" : "Ø",
-    "\\TH" : "Þ",
-    "\\ss" : "ß",
-    "\\aa" : "å",
-    "\\ae" : "æ",
-    "\\eth" : "ð",
-    "\\dh" : "ð",
-    "\\o" : "ø",
-    "\\th" : "þ",
-    "\\DJ" : "Đ",
-    "\\dj" : "đ",
-    "\\imath" : "ı",
-    "\\jmath" : "ȷ",
-    "\\L" : "Ł",
-    "\\l" : "ł",
-    "\\NG" : "Ŋ",
-    "\\ng" : "ŋ",
-    "\\OE" : "Œ",
-    "\\oe" : "œ",
-    "\\hvlig" : "ƕ",
-    "\\nrleg" : "ƞ",
-    "\\doublepipe" : "ǂ",
-    "\\trna" : "ɐ",
-    "\\trnsa" : "ɒ",
-    "\\openo" : "ɔ",
-    "\\rtld" : "ɖ",
-    "\\schwa" : "ə",
-    "\\varepsilon" : "ε",
-    "\\pgamma" : "ɣ",
-    "\\pbgam" : "ɤ",
-    "\\trnh" : "ɥ",
-    "\\btdl" : "ɬ",
-    "\\rtll" : "ɭ",
-    "\\trnm" : "ɯ",
-    "\\trnmlr" : "ɰ",
-    "\\ltlmr" : "ɱ",
-    "\\ltln" : "ɲ",
-    "\\rtln" : "ɳ",
-    "\\clomeg" : "ɷ",
-    "\\ltphi" : "ɸ",
-    "\\trnr" : "ɹ",
-    "\\trnrl" : "ɺ",
-    "\\rttrnr" : "ɻ",
-    "\\rl" : "ɼ",
-    "\\rtlr" : "ɽ",
-    "\\fhr" : "ɾ",
-    "\\rtls" : "ʂ",
-    "\\esh" : "ʃ",
-    "\\trnt" : "ʇ",
-    "\\rtlt" : "ʈ",
-    "\\pupsil" : "ʊ",
-    "\\pscrv" : "ʋ",
-    "\\invv" : "ʌ",
-    "\\invw" : "ʍ",
-    "\\trny" : "ʎ",
-    "\\rtlz" : "ʐ",
-    "\\yogh" : "ʒ",
-    "\\glst" : "ʔ",
-    "\\reglst" : "ʕ",
-    "\\inglst" : "ʖ",
-    "\\turnk" : "ʞ",
-    "\\dyogh" : "ʤ",
-    "\\tesh" : "ʧ",
-    "\\rasp" : "ʼ",
-    "\\verts" : "ˈ",
-    "\\verti" : "ˌ",
-    "\\lmrk" : "ː",
-    "\\hlmrk" : "ˑ",
-    "\\grave" : "̀",
-    "\\acute" : "́",
-    "\\hat" : "̂",
-    "\\tilde" : "̃",
-    "\\bar" : "̄",
-    "\\breve" : "̆",
-    "\\dot" : "̇",
-    "\\ddot" : "̈",
-    "\\ocirc" : "̊",
-    "\\H" : "̋",
-    "\\check" : "̌",
-    "\\palh" : "̡",
-    "\\rh" : "̢",
-    "\\c" : "̧",
-    "\\k" : "̨",
-    "\\sbbrg" : "̪",
-    "\\strike" : "̶",
-    "\\Alpha" : "Α",
-    "\\Beta" : "Β",
-    "\\Gamma" : "Γ",
-    "\\Delta" : "Δ",
-    "\\Epsilon" : "Ε",
-    "\\Zeta" : "Ζ",
-    "\\Eta" : "Η",
-    "\\Theta" : "Θ",
-    "\\Iota" : "Ι",
-    "\\Kappa" : "Κ",
-    "\\Lambda" : "Λ",
-    "\\Xi" : "Ξ",
-    "\\Pi" : "Π",
-    "\\Rho" : "Ρ",
-    "\\Sigma" : "Σ",
-    "\\Tau" : "Τ",
-    "\\Upsilon" : "Υ",
-    "\\Phi" : "Φ",
-    "\\Chi" : "Χ",
-    "\\Psi" : "Ψ",
-    "\\Omega" : "Ω",
-    "\\alpha" : "α",
-    "\\beta" : "β",
-    "\\gamma" : "γ",
-    "\\delta" : "δ",
-    "\\zeta" : "ζ",
-    "\\eta" : "η",
-    "\\theta" : "θ",
-    "\\iota" : "ι",
-    "\\kappa" : "κ",
-    "\\lambda" : "λ",
-    "\\mu" : "μ",
-    "\\nu" : "ν",
-    "\\xi" : "ξ",
-    "\\pi" : "π",
-    "\\rho" : "ρ",
-    "\\varsigma" : "ς",
-    "\\sigma" : "σ",
-    "\\tau" : "τ",
-    "\\upsilon" : "υ",
-    "\\varphi" : "φ",
-    "\\chi" : "χ",
-    "\\psi" : "ψ",
-    "\\omega" : "ω",
-    "\\vartheta" : "ϑ",
-    "\\phi" : "ϕ",
-    "\\varpi" : "ϖ",
-    "\\Stigma" : "Ϛ",
-    "\\Digamma" : "Ϝ",
-    "\\digamma" : "ϝ",
-    "\\Koppa" : "Ϟ",
-    "\\Sampi" : "Ϡ",
-    "\\varkappa" : "ϰ",
-    "\\varrho" : "ϱ",
-    "\\varTheta" : "ϴ",
-    "\\epsilon" : "ϵ",
-    "\\dddot" : "⃛",
-    "\\ddddot" : "⃜",
-    "\\hslash" : "ℏ",
-    "\\Im" : "ℑ",
-    "\\ell" : "ℓ",
-    "\\wp" : "℘",
-    "\\Re" : "ℜ",
-    "\\aleph" : "ℵ",
-    "\\beth" : "ℶ",
-    "\\gimel" : "ℷ",
-    "\\daleth" : "ℸ",
-    "\\bbPi" : "ℿ",
-    "\\Zbar" : "Ƶ",
-    "\\overbar" : "̅",
-    "\\ovhook" : "̉",
-    "\\candra" : "̐",
-    "\\oturnedcomma" : "̒",
-    "\\ocommatopright" : "̕",
-    "\\droang" : "̚",
-    "\\wideutilde" : "̰",
-    "\\not" : "̸",
-    "\\upMu" : "Μ",
-    "\\upNu" : "Ν",
-    "\\upOmicron" : "Ο",
-    "\\upepsilon" : "ε",
-    "\\upomicron" : "ο",
-    "\\upvarbeta" : "ϐ",
-    "\\upoldKoppa" : "Ϙ",
-    "\\upoldkoppa" : "ϙ",
-    "\\upstigma" : "ϛ",
-    "\\upkoppa" : "ϟ",
-    "\\upsampi" : "ϡ",
-    "\\tieconcat" : "⁀",
-    "\\leftharpoonaccent" : "⃐",
-    "\\rightharpoonaccent" : "⃑",
-    "\\vertoverlay" : "⃒",
-    "\\overleftarrow" : "⃖",
-    "\\vec" : "⃗",
-    "\\overleftrightarrow" : "⃡",
-    "\\annuity" : "⃧",
-    "\\threeunderdot" : "⃨",
-    "\\widebridgeabove" : "⃩",
-    "\\bbC" : "ℂ",
-    "\\eulermascheroni" : "ℇ",
-    "\\scrg" : "ℊ",
-    "\\scrH" : "ℋ",
-    "\\frakH" : "ℌ",
-    "\\bbH" : "ℍ",
-    "\\planck" : "ℎ",
-    "\\scrI" : "ℐ",
-    "\\scrL" : "ℒ",
-    "\\bbN" : "ℕ",
-    "\\bbP" : "ℙ",
-    "\\bbQ" : "ℚ",
-    "\\scrR" : "ℛ",
-    "\\bbR" : "ℝ",
-    "\\bbZ" : "ℤ",
-    "\\frakZ" : "ℨ",
-    "\\Angstrom" : "Å",
-    "\\scrB" : "ℬ",
-    "\\frakC" : "ℭ",
-    "\\scre" : "ℯ",
-    "\\scrE" : "ℰ",
-    "\\scrF" : "ℱ",
-    "\\Finv" : "Ⅎ",
-    "\\scrM" : "ℳ",
-    "\\scro" : "ℴ",
-    "\\bbgamma" : "ℽ",
-    "\\bbGamma" : "ℾ",
-    "\\bbiD" : "ⅅ",
-    "\\bbid" : "ⅆ",
-    "\\bbie" : "ⅇ",
-    "\\bbii" : "ⅈ",
-    "\\bbij" : "ⅉ",
-    "\\bfA" : "𝐀",
-    "\\bfB" : "𝐁",
-    "\\bfC" : "𝐂",
-    "\\bfD" : "𝐃",
-    "\\bfE" : "𝐄",
-    "\\bfF" : "𝐅",
-    "\\bfG" : "𝐆",
-    "\\bfH" : "𝐇",
-    "\\bfI" : "𝐈",
-    "\\bfJ" : "𝐉",
-    "\\bfK" : "𝐊",
-    "\\bfL" : "𝐋",
-    "\\bfM" : "𝐌",
-    "\\bfN" : "𝐍",
-    "\\bfO" : "𝐎",
-    "\\bfP" : "𝐏",
-    "\\bfQ" : "𝐐",
-    "\\bfR" : "𝐑",
-    "\\bfS" : "𝐒",
-    "\\bfT" : "𝐓",
-    "\\bfU" : "𝐔",
-    "\\bfV" : "𝐕",
-    "\\bfW" : "𝐖",
-    "\\bfX" : "𝐗",
-    "\\bfY" : "𝐘",
-    "\\bfZ" : "𝐙",
-    "\\bfa" : "𝐚",
-    "\\bfb" : "𝐛",
-    "\\bfc" : "𝐜",
-    "\\bfd" : "𝐝",
-    "\\bfe" : "𝐞",
-    "\\bff" : "𝐟",
-    "\\bfg" : "𝐠",
-    "\\bfh" : "𝐡",
-    "\\bfi" : "𝐢",
-    "\\bfj" : "𝐣",
-    "\\bfk" : "𝐤",
-    "\\bfl" : "𝐥",
-    "\\bfm" : "𝐦",
-    "\\bfn" : "𝐧",
-    "\\bfo" : "𝐨",
-    "\\bfp" : "𝐩",
-    "\\bfq" : "𝐪",
-    "\\bfr" : "𝐫",
-    "\\bfs" : "𝐬",
-    "\\bft" : "𝐭",
-    "\\bfu" : "𝐮",
-    "\\bfv" : "𝐯",
-    "\\bfw" : "𝐰",
-    "\\bfx" : "𝐱",
-    "\\bfy" : "𝐲",
-    "\\bfz" : "𝐳",
-    "\\itA" : "𝐴",
-    "\\itB" : "𝐵",
-    "\\itC" : "𝐶",
-    "\\itD" : "𝐷",
-    "\\itE" : "𝐸",
-    "\\itF" : "𝐹",
-    "\\itG" : "𝐺",
-    "\\itH" : "𝐻",
-    "\\itI" : "𝐼",
-    "\\itJ" : "𝐽",
-    "\\itK" : "𝐾",
-    "\\itL" : "𝐿",
-    "\\itM" : "𝑀",
-    "\\itN" : "𝑁",
-    "\\itO" : "𝑂",
-    "\\itP" : "𝑃",
-    "\\itQ" : "𝑄",
-    "\\itR" : "𝑅",
-    "\\itS" : "𝑆",
-    "\\itT" : "𝑇",
-    "\\itU" : "𝑈",
-    "\\itV" : "𝑉",
-    "\\itW" : "𝑊",
-    "\\itX" : "𝑋",
-    "\\itY" : "𝑌",
-    "\\itZ" : "𝑍",
-    "\\ita" : "𝑎",
-    "\\itb" : "𝑏",
-    "\\itc" : "𝑐",
-    "\\itd" : "𝑑",
-    "\\ite" : "𝑒",
-    "\\itf" : "𝑓",
-    "\\itg" : "𝑔",
-    "\\iti" : "𝑖",
-    "\\itj" : "𝑗",
-    "\\itk" : "𝑘",
-    "\\itl" : "𝑙",
-    "\\itm" : "𝑚",
-    "\\itn" : "𝑛",
-    "\\ito" : "𝑜",
-    "\\itp" : "𝑝",
-    "\\itq" : "𝑞",
-    "\\itr" : "𝑟",
-    "\\its" : "𝑠",
-    "\\itt" : "𝑡",
-    "\\itu" : "𝑢",
-    "\\itv" : "𝑣",
-    "\\itw" : "𝑤",
-    "\\itx" : "𝑥",
-    "\\ity" : "𝑦",
-    "\\itz" : "𝑧",
-    "\\biA" : "𝑨",
-    "\\biB" : "𝑩",
-    "\\biC" : "𝑪",
-    "\\biD" : "𝑫",
-    "\\biE" : "𝑬",
-    "\\biF" : "𝑭",
-    "\\biG" : "𝑮",
-    "\\biH" : "𝑯",
-    "\\biI" : "𝑰",
-    "\\biJ" : "𝑱",
-    "\\biK" : "𝑲",
-    "\\biL" : "𝑳",
-    "\\biM" : "𝑴",
-    "\\biN" : "𝑵",
-    "\\biO" : "𝑶",
-    "\\biP" : "𝑷",
-    "\\biQ" : "𝑸",
-    "\\biR" : "𝑹",
-    "\\biS" : "𝑺",
-    "\\biT" : "𝑻",
-    "\\biU" : "𝑼",
-    "\\biV" : "𝑽",
-    "\\biW" : "𝑾",
-    "\\biX" : "𝑿",
-    "\\biY" : "𝒀",
-    "\\biZ" : "𝒁",
-    "\\bia" : "𝒂",
-    "\\bib" : "𝒃",
-    "\\bic" : "𝒄",
-    "\\bid" : "𝒅",
-    "\\bie" : "𝒆",
-    "\\bif" : "𝒇",
-    "\\big" : "𝒈",
-    "\\bih" : "𝒉",
-    "\\bii" : "𝒊",
-    "\\bij" : "𝒋",
-    "\\bik" : "𝒌",
-    "\\bil" : "𝒍",
-    "\\bim" : "𝒎",
-    "\\bin" : "𝒏",
-    "\\bio" : "𝒐",
-    "\\bip" : "𝒑",
-    "\\biq" : "𝒒",
-    "\\bir" : "𝒓",
-    "\\bis" : "𝒔",
-    "\\bit" : "𝒕",
-    "\\biu" : "𝒖",
-    "\\biv" : "𝒗",
-    "\\biw" : "𝒘",
-    "\\bix" : "𝒙",
-    "\\biy" : "𝒚",
-    "\\biz" : "𝒛",
-    "\\scrA" : "𝒜",
-    "\\scrC" : "𝒞",
-    "\\scrD" : "𝒟",
-    "\\scrG" : "𝒢",
-    "\\scrJ" : "𝒥",
-    "\\scrK" : "𝒦",
-    "\\scrN" : "𝒩",
-    "\\scrO" : "𝒪",
-    "\\scrP" : "𝒫",
-    "\\scrQ" : "𝒬",
-    "\\scrS" : "𝒮",
-    "\\scrT" : "𝒯",
-    "\\scrU" : "𝒰",
-    "\\scrV" : "𝒱",
-    "\\scrW" : "𝒲",
-    "\\scrX" : "𝒳",
-    "\\scrY" : "𝒴",
-    "\\scrZ" : "𝒵",
-    "\\scra" : "𝒶",
-    "\\scrb" : "𝒷",
-    "\\scrc" : "𝒸",
-    "\\scrd" : "𝒹",
-    "\\scrf" : "𝒻",
-    "\\scrh" : "𝒽",
-    "\\scri" : "𝒾",
-    "\\scrj" : "𝒿",
-    "\\scrk" : "𝓀",
-    "\\scrm" : "𝓂",
-    "\\scrn" : "𝓃",
-    "\\scrp" : "𝓅",
-    "\\scrq" : "𝓆",
-    "\\scrr" : "𝓇",
-    "\\scrs" : "𝓈",
-    "\\scrt" : "𝓉",
-    "\\scru" : "𝓊",
-    "\\scrv" : "𝓋",
-    "\\scrw" : "𝓌",
-    "\\scrx" : "𝓍",
-    "\\scry" : "𝓎",
-    "\\scrz" : "𝓏",
-    "\\bscrA" : "𝓐",
-    "\\bscrB" : "𝓑",
-    "\\bscrC" : "𝓒",
-    "\\bscrD" : "𝓓",
-    "\\bscrE" : "𝓔",
-    "\\bscrF" : "𝓕",
-    "\\bscrG" : "𝓖",
-    "\\bscrH" : "𝓗",
-    "\\bscrI" : "𝓘",
-    "\\bscrJ" : "𝓙",
-    "\\bscrK" : "𝓚",
-    "\\bscrL" : "𝓛",
-    "\\bscrM" : "𝓜",
-    "\\bscrN" : "𝓝",
-    "\\bscrO" : "𝓞",
-    "\\bscrP" : "𝓟",
-    "\\bscrQ" : "𝓠",
-    "\\bscrR" : "𝓡",
-    "\\bscrS" : "𝓢",
-    "\\bscrT" : "𝓣",
-    "\\bscrU" : "𝓤",
-    "\\bscrV" : "𝓥",
-    "\\bscrW" : "𝓦",
-    "\\bscrX" : "𝓧",
-    "\\bscrY" : "𝓨",
-    "\\bscrZ" : "𝓩",
-    "\\bscra" : "𝓪",
-    "\\bscrb" : "𝓫",
-    "\\bscrc" : "𝓬",
-    "\\bscrd" : "𝓭",
-    "\\bscre" : "𝓮",
-    "\\bscrf" : "𝓯",
-    "\\bscrg" : "𝓰",
-    "\\bscrh" : "𝓱",
-    "\\bscri" : "𝓲",
-    "\\bscrj" : "𝓳",
-    "\\bscrk" : "𝓴",
-    "\\bscrl" : "𝓵",
-    "\\bscrm" : "𝓶",
-    "\\bscrn" : "𝓷",
-    "\\bscro" : "𝓸",
-    "\\bscrp" : "𝓹",
-    "\\bscrq" : "𝓺",
-    "\\bscrr" : "𝓻",
-    "\\bscrs" : "𝓼",
-    "\\bscrt" : "𝓽",
-    "\\bscru" : "𝓾",
-    "\\bscrv" : "𝓿",
-    "\\bscrw" : "𝔀",
-    "\\bscrx" : "𝔁",
-    "\\bscry" : "𝔂",
-    "\\bscrz" : "𝔃",
-    "\\frakA" : "𝔄",
-    "\\frakB" : "𝔅",
-    "\\frakD" : "𝔇",
-    "\\frakE" : "𝔈",
-    "\\frakF" : "𝔉",
-    "\\frakG" : "𝔊",
-    "\\frakJ" : "𝔍",
-    "\\frakK" : "𝔎",
-    "\\frakL" : "𝔏",
-    "\\frakM" : "𝔐",
-    "\\frakN" : "𝔑",
-    "\\frakO" : "𝔒",
-    "\\frakP" : "𝔓",
-    "\\frakQ" : "𝔔",
-    "\\frakS" : "𝔖",
-    "\\frakT" : "𝔗",
-    "\\frakU" : "𝔘",
-    "\\frakV" : "𝔙",
-    "\\frakW" : "𝔚",
-    "\\frakX" : "𝔛",
-    "\\frakY" : "𝔜",
-    "\\fraka" : "𝔞",
-    "\\frakb" : "𝔟",
-    "\\frakc" : "𝔠",
-    "\\frakd" : "𝔡",
-    "\\frake" : "𝔢",
-    "\\frakf" : "𝔣",
-    "\\frakg" : "𝔤",
-    "\\frakh" : "𝔥",
-    "\\fraki" : "𝔦",
-    "\\frakj" : "𝔧",
-    "\\frakk" : "𝔨",
-    "\\frakl" : "𝔩",
-    "\\frakm" : "𝔪",
-    "\\frakn" : "𝔫",
-    "\\frako" : "𝔬",
-    "\\frakp" : "𝔭",
-    "\\frakq" : "𝔮",
-    "\\frakr" : "𝔯",
-    "\\fraks" : "𝔰",
-    "\\frakt" : "𝔱",
-    "\\fraku" : "𝔲",
-    "\\frakv" : "𝔳",
-    "\\frakw" : "𝔴",
-    "\\frakx" : "𝔵",
-    "\\fraky" : "𝔶",
-    "\\frakz" : "𝔷",
-    "\\bbA" : "𝔸",
-    "\\bbB" : "𝔹",
-    "\\bbD" : "𝔻",
-    "\\bbE" : "𝔼",
-    "\\bbF" : "𝔽",
-    "\\bbG" : "𝔾",
-    "\\bbI" : "𝕀",
-    "\\bbJ" : "𝕁",
-    "\\bbK" : "𝕂",
-    "\\bbL" : "𝕃",
-    "\\bbM" : "𝕄",
-    "\\bbO" : "𝕆",
-    "\\bbS" : "𝕊",
-    "\\bbT" : "𝕋",
-    "\\bbU" : "𝕌",
-    "\\bbV" : "𝕍",
-    "\\bbW" : "𝕎",
-    "\\bbX" : "𝕏",
-    "\\bbY" : "𝕐",
-    "\\bba" : "𝕒",
-    "\\bbb" : "𝕓",
-    "\\bbc" : "𝕔",
-    "\\bbd" : "𝕕",
-    "\\bbe" : "𝕖",
-    "\\bbf" : "𝕗",
-    "\\bbg" : "𝕘",
-    "\\bbh" : "𝕙",
-    "\\bbi" : "𝕚",
-    "\\bbj" : "𝕛",
-    "\\bbk" : "𝕜",
-    "\\bbl" : "𝕝",
-    "\\bbm" : "𝕞",
-    "\\bbn" : "𝕟",
-    "\\bbo" : "𝕠",
-    "\\bbp" : "𝕡",
-    "\\bbq" : "𝕢",
-    "\\bbr" : "𝕣",
-    "\\bbs" : "𝕤",
-    "\\bbt" : "𝕥",
-    "\\bbu" : "𝕦",
-    "\\bbv" : "𝕧",
-    "\\bbw" : "𝕨",
-    "\\bbx" : "𝕩",
-    "\\bby" : "𝕪",
-    "\\bbz" : "𝕫",
-    "\\bfrakA" : "𝕬",
-    "\\bfrakB" : "𝕭",
-    "\\bfrakC" : "𝕮",
-    "\\bfrakD" : "𝕯",
-    "\\bfrakE" : "𝕰",
-    "\\bfrakF" : "𝕱",
-    "\\bfrakG" : "𝕲",
-    "\\bfrakH" : "𝕳",
-    "\\bfrakI" : "𝕴",
-    "\\bfrakJ" : "𝕵",
-    "\\bfrakK" : "𝕶",
-    "\\bfrakL" : "𝕷",
-    "\\bfrakM" : "𝕸",
-    "\\bfrakN" : "𝕹",
-    "\\bfrakO" : "𝕺",
-    "\\bfrakP" : "𝕻",
-    "\\bfrakQ" : "𝕼",
-    "\\bfrakR" : "𝕽",
-    "\\bfrakS" : "𝕾",
-    "\\bfrakT" : "𝕿",
-    "\\bfrakU" : "𝖀",
-    "\\bfrakV" : "𝖁",
-    "\\bfrakW" : "𝖂",
-    "\\bfrakX" : "𝖃",
-    "\\bfrakY" : "𝖄",
-    "\\bfrakZ" : "𝖅",
-    "\\bfraka" : "𝖆",
-    "\\bfrakb" : "𝖇",
-    "\\bfrakc" : "𝖈",
-    "\\bfrakd" : "𝖉",
-    "\\bfrake" : "𝖊",
-    "\\bfrakf" : "𝖋",
-    "\\bfrakg" : "𝖌",
-    "\\bfrakh" : "𝖍",
-    "\\bfraki" : "𝖎",
-    "\\bfrakj" : "𝖏",
-    "\\bfrakk" : "𝖐",
-    "\\bfrakl" : "𝖑",
-    "\\bfrakm" : "𝖒",
-    "\\bfrakn" : "𝖓",
-    "\\bfrako" : "𝖔",
-    "\\bfrakp" : "𝖕",
-    "\\bfrakq" : "𝖖",
-    "\\bfrakr" : "𝖗",
-    "\\bfraks" : "𝖘",
-    "\\bfrakt" : "𝖙",
-    "\\bfraku" : "𝖚",
-    "\\bfrakv" : "𝖛",
-    "\\bfrakw" : "𝖜",
-    "\\bfrakx" : "𝖝",
-    "\\bfraky" : "𝖞",
-    "\\bfrakz" : "𝖟",
-    "\\sansA" : "𝖠",
-    "\\sansB" : "𝖡",
-    "\\sansC" : "𝖢",
-    "\\sansD" : "𝖣",
-    "\\sansE" : "𝖤",
-    "\\sansF" : "𝖥",
-    "\\sansG" : "𝖦",
-    "\\sansH" : "𝖧",
-    "\\sansI" : "𝖨",
-    "\\sansJ" : "𝖩",
-    "\\sansK" : "𝖪",
-    "\\sansL" : "𝖫",
-    "\\sansM" : "𝖬",
-    "\\sansN" : "𝖭",
-    "\\sansO" : "𝖮",
-    "\\sansP" : "𝖯",
-    "\\sansQ" : "𝖰",
-    "\\sansR" : "𝖱",
-    "\\sansS" : "𝖲",
-    "\\sansT" : "𝖳",
-    "\\sansU" : "𝖴",
-    "\\sansV" : "𝖵",
-    "\\sansW" : "𝖶",
-    "\\sansX" : "𝖷",
-    "\\sansY" : "𝖸",
-    "\\sansZ" : "𝖹",
-    "\\sansa" : "𝖺",
-    "\\sansb" : "𝖻",
-    "\\sansc" : "𝖼",
-    "\\sansd" : "𝖽",
-    "\\sanse" : "𝖾",
-    "\\sansf" : "𝖿",
-    "\\sansg" : "𝗀",
-    "\\sansh" : "𝗁",
-    "\\sansi" : "𝗂",
-    "\\sansj" : "𝗃",
-    "\\sansk" : "𝗄",
-    "\\sansl" : "𝗅",
-    "\\sansm" : "𝗆",
-    "\\sansn" : "𝗇",
-    "\\sanso" : "𝗈",
-    "\\sansp" : "𝗉",
-    "\\sansq" : "𝗊",
-    "\\sansr" : "𝗋",
-    "\\sanss" : "𝗌",
-    "\\sanst" : "𝗍",
-    "\\sansu" : "𝗎",
-    "\\sansv" : "𝗏",
-    "\\sansw" : "𝗐",
-    "\\sansx" : "𝗑",
-    "\\sansy" : "𝗒",
-    "\\sansz" : "𝗓",
-    "\\bsansA" : "𝗔",
-    "\\bsansB" : "𝗕",
-    "\\bsansC" : "𝗖",
-    "\\bsansD" : "𝗗",
-    "\\bsansE" : "𝗘",
-    "\\bsansF" : "𝗙",
-    "\\bsansG" : "𝗚",
-    "\\bsansH" : "𝗛",
-    "\\bsansI" : "𝗜",
-    "\\bsansJ" : "𝗝",
-    "\\bsansK" : "𝗞",
-    "\\bsansL" : "𝗟",
-    "\\bsansM" : "𝗠",
-    "\\bsansN" : "𝗡",
-    "\\bsansO" : "𝗢",
-    "\\bsansP" : "𝗣",
-    "\\bsansQ" : "𝗤",
-    "\\bsansR" : "𝗥",
-    "\\bsansS" : "𝗦",
-    "\\bsansT" : "𝗧",
-    "\\bsansU" : "𝗨",
-    "\\bsansV" : "𝗩",
-    "\\bsansW" : "𝗪",
-    "\\bsansX" : "𝗫",
-    "\\bsansY" : "𝗬",
-    "\\bsansZ" : "𝗭",
-    "\\bsansa" : "𝗮",
-    "\\bsansb" : "𝗯",
-    "\\bsansc" : "𝗰",
-    "\\bsansd" : "𝗱",
-    "\\bsanse" : "𝗲",
-    "\\bsansf" : "𝗳",
-    "\\bsansg" : "𝗴",
-    "\\bsansh" : "𝗵",
-    "\\bsansi" : "𝗶",
-    "\\bsansj" : "𝗷",
-    "\\bsansk" : "𝗸",
-    "\\bsansl" : "𝗹",
-    "\\bsansm" : "𝗺",
-    "\\bsansn" : "𝗻",
-    "\\bsanso" : "𝗼",
-    "\\bsansp" : "𝗽",
-    "\\bsansq" : "𝗾",
-    "\\bsansr" : "𝗿",
-    "\\bsanss" : "𝘀",
-    "\\bsanst" : "𝘁",
-    "\\bsansu" : "𝘂",
-    "\\bsansv" : "𝘃",
-    "\\bsansw" : "𝘄",
-    "\\bsansx" : "𝘅",
-    "\\bsansy" : "𝘆",
-    "\\bsansz" : "𝘇",
-    "\\isansA" : "𝘈",
-    "\\isansB" : "𝘉",
-    "\\isansC" : "𝘊",
-    "\\isansD" : "𝘋",
-    "\\isansE" : "𝘌",
-    "\\isansF" : "𝘍",
-    "\\isansG" : "𝘎",
-    "\\isansH" : "𝘏",
-    "\\isansI" : "𝘐",
-    "\\isansJ" : "𝘑",
-    "\\isansK" : "𝘒",
-    "\\isansL" : "𝘓",
-    "\\isansM" : "𝘔",
-    "\\isansN" : "𝘕",
-    "\\isansO" : "𝘖",
-    "\\isansP" : "𝘗",
-    "\\isansQ" : "𝘘",
-    "\\isansR" : "𝘙",
-    "\\isansS" : "𝘚",
-    "\\isansT" : "𝘛",
-    "\\isansU" : "𝘜",
-    "\\isansV" : "𝘝",
-    "\\isansW" : "𝘞",
-    "\\isansX" : "𝘟",
-    "\\isansY" : "𝘠",
-    "\\isansZ" : "𝘡",
-    "\\isansa" : "𝘢",
-    "\\isansb" : "𝘣",
-    "\\isansc" : "𝘤",
-    "\\isansd" : "𝘥",
-    "\\isanse" : "𝘦",
-    "\\isansf" : "𝘧",
-    "\\isansg" : "𝘨",
-    "\\isansh" : "𝘩",
-    "\\isansi" : "𝘪",
-    "\\isansj" : "𝘫",
-    "\\isansk" : "𝘬",
-    "\\isansl" : "𝘭",
-    "\\isansm" : "𝘮",
-    "\\isansn" : "𝘯",
-    "\\isanso" : "𝘰",
-    "\\isansp" : "𝘱",
-    "\\isansq" : "𝘲",
-    "\\isansr" : "𝘳",
-    "\\isanss" : "𝘴",
-    "\\isanst" : "𝘵",
-    "\\isansu" : "𝘶",
-    "\\isansv" : "𝘷",
-    "\\isansw" : "𝘸",
-    "\\isansx" : "𝘹",
-    "\\isansy" : "𝘺",
-    "\\isansz" : "𝘻",
-    "\\bisansA" : "𝘼",
-    "\\bisansB" : "𝘽",
-    "\\bisansC" : "𝘾",
-    "\\bisansD" : "𝘿",
-    "\\bisansE" : "𝙀",
-    "\\bisansF" : "𝙁",
-    "\\bisansG" : "𝙂",
-    "\\bisansH" : "𝙃",
-    "\\bisansI" : "𝙄",
-    "\\bisansJ" : "𝙅",
-    "\\bisansK" : "𝙆",
-    "\\bisansL" : "𝙇",
-    "\\bisansM" : "𝙈",
-    "\\bisansN" : "𝙉",
-    "\\bisansO" : "𝙊",
-    "\\bisansP" : "𝙋",
-    "\\bisansQ" : "𝙌",
-    "\\bisansR" : "𝙍",
-    "\\bisansS" : "𝙎",
-    "\\bisansT" : "𝙏",
-    "\\bisansU" : "𝙐",
-    "\\bisansV" : "𝙑",
-    "\\bisansW" : "𝙒",
-    "\\bisansX" : "𝙓",
-    "\\bisansY" : "𝙔",
-    "\\bisansZ" : "𝙕",
-    "\\bisansa" : "𝙖",
-    "\\bisansb" : "𝙗",
-    "\\bisansc" : "𝙘",
-    "\\bisansd" : "𝙙",
-    "\\bisanse" : "𝙚",
-    "\\bisansf" : "𝙛",
-    "\\bisansg" : "𝙜",
-    "\\bisansh" : "𝙝",
-    "\\bisansi" : "𝙞",
-    "\\bisansj" : "𝙟",
-    "\\bisansk" : "𝙠",
-    "\\bisansl" : "𝙡",
-    "\\bisansm" : "𝙢",
-    "\\bisansn" : "𝙣",
-    "\\bisanso" : "𝙤",
-    "\\bisansp" : "𝙥",
-    "\\bisansq" : "𝙦",
-    "\\bisansr" : "𝙧",
-    "\\bisanss" : "𝙨",
-    "\\bisanst" : "𝙩",
-    "\\bisansu" : "𝙪",
-    "\\bisansv" : "𝙫",
-    "\\bisansw" : "𝙬",
-    "\\bisansx" : "𝙭",
-    "\\bisansy" : "𝙮",
-    "\\bisansz" : "𝙯",
-    "\\ttA" : "𝙰",
-    "\\ttB" : "𝙱",
-    "\\ttC" : "𝙲",
-    "\\ttD" : "𝙳",
-    "\\ttE" : "𝙴",
-    "\\ttF" : "𝙵",
-    "\\ttG" : "𝙶",
-    "\\ttH" : "𝙷",
-    "\\ttI" : "𝙸",
-    "\\ttJ" : "𝙹",
-    "\\ttK" : "𝙺",
-    "\\ttL" : "𝙻",
-    "\\ttM" : "𝙼",
-    "\\ttN" : "𝙽",
-    "\\ttO" : "𝙾",
-    "\\ttP" : "𝙿",
-    "\\ttQ" : "𝚀",
-    "\\ttR" : "𝚁",
-    "\\ttS" : "𝚂",
-    "\\ttT" : "𝚃",
-    "\\ttU" : "𝚄",
-    "\\ttV" : "𝚅",
-    "\\ttW" : "𝚆",
-    "\\ttX" : "𝚇",
-    "\\ttY" : "𝚈",
-    "\\ttZ" : "𝚉",
-    "\\tta" : "𝚊",
-    "\\ttb" : "𝚋",
-    "\\ttc" : "𝚌",
-    "\\ttd" : "𝚍",
-    "\\tte" : "𝚎",
-    "\\ttf" : "𝚏",
-    "\\ttg" : "𝚐",
-    "\\tth" : "𝚑",
-    "\\tti" : "𝚒",
-    "\\ttj" : "𝚓",
-    "\\ttk" : "𝚔",
-    "\\ttl" : "𝚕",
-    "\\ttm" : "𝚖",
-    "\\ttn" : "𝚗",
-    "\\tto" : "𝚘",
-    "\\ttp" : "𝚙",
-    "\\ttq" : "𝚚",
-    "\\ttr" : "𝚛",
-    "\\tts" : "𝚜",
-    "\\ttt" : "𝚝",
-    "\\ttu" : "𝚞",
-    "\\ttv" : "𝚟",
-    "\\ttw" : "𝚠",
-    "\\ttx" : "𝚡",
-    "\\tty" : "𝚢",
-    "\\ttz" : "𝚣",
-    "\\bfAlpha" : "𝚨",
-    "\\bfBeta" : "𝚩",
-    "\\bfGamma" : "𝚪",
-    "\\bfDelta" : "𝚫",
-    "\\bfEpsilon" : "𝚬",
-    "\\bfZeta" : "𝚭",
-    "\\bfEta" : "𝚮",
-    "\\bfTheta" : "𝚯",
-    "\\bfIota" : "𝚰",
-    "\\bfKappa" : "𝚱",
-    "\\bfLambda" : "𝚲",
-    "\\bfMu" : "𝚳",
-    "\\bfNu" : "𝚴",
-    "\\bfXi" : "𝚵",
-    "\\bfOmicron" : "𝚶",
-    "\\bfPi" : "𝚷",
-    "\\bfRho" : "𝚸",
-    "\\bfvarTheta" : "𝚹",
-    "\\bfSigma" : "𝚺",
-    "\\bfTau" : "𝚻",
-    "\\bfUpsilon" : "𝚼",
-    "\\bfPhi" : "𝚽",
-    "\\bfChi" : "𝚾",
-    "\\bfPsi" : "𝚿",
-    "\\bfOmega" : "𝛀",
-    "\\bfalpha" : "𝛂",
-    "\\bfbeta" : "𝛃",
-    "\\bfgamma" : "𝛄",
-    "\\bfdelta" : "𝛅",
-    "\\bfepsilon" : "𝛆",
-    "\\bfzeta" : "𝛇",
-    "\\bfeta" : "𝛈",
-    "\\bftheta" : "𝛉",
-    "\\bfiota" : "𝛊",
-    "\\bfkappa" : "𝛋",
-    "\\bflambda" : "𝛌",
-    "\\bfmu" : "𝛍",
-    "\\bfnu" : "𝛎",
-    "\\bfxi" : "𝛏",
-    "\\bfomicron" : "𝛐",
-    "\\bfpi" : "𝛑",
-    "\\bfrho" : "𝛒",
-    "\\bfvarsigma" : "𝛓",
-    "\\bfsigma" : "𝛔",
-    "\\bftau" : "𝛕",
-    "\\bfupsilon" : "𝛖",
-    "\\bfvarphi" : "𝛗",
-    "\\bfchi" : "𝛘",
-    "\\bfpsi" : "𝛙",
-    "\\bfomega" : "𝛚",
-    "\\bfvarepsilon" : "𝛜",
-    "\\bfvartheta" : "𝛝",
-    "\\bfvarkappa" : "𝛞",
-    "\\bfphi" : "𝛟",
-    "\\bfvarrho" : "𝛠",
-    "\\bfvarpi" : "𝛡",
-    "\\itAlpha" : "𝛢",
-    "\\itBeta" : "𝛣",
-    "\\itGamma" : "𝛤",
-    "\\itDelta" : "𝛥",
-    "\\itEpsilon" : "𝛦",
-    "\\itZeta" : "𝛧",
-    "\\itEta" : "𝛨",
-    "\\itTheta" : "𝛩",
-    "\\itIota" : "𝛪",
-    "\\itKappa" : "𝛫",
-    "\\itLambda" : "𝛬",
-    "\\itMu" : "𝛭",
-    "\\itNu" : "𝛮",
-    "\\itXi" : "𝛯",
-    "\\itOmicron" : "𝛰",
-    "\\itPi" : "𝛱",
-    "\\itRho" : "𝛲",
-    "\\itvarTheta" : "𝛳",
-    "\\itSigma" : "𝛴",
-    "\\itTau" : "𝛵",
-    "\\itUpsilon" : "𝛶",
-    "\\itPhi" : "𝛷",
-    "\\itChi" : "𝛸",
-    "\\itPsi" : "𝛹",
-    "\\itOmega" : "𝛺",
-    "\\italpha" : "𝛼",
-    "\\itbeta" : "𝛽",
-    "\\itgamma" : "𝛾",
-    "\\itdelta" : "𝛿",
-    "\\itepsilon" : "𝜀",
-    "\\itzeta" : "𝜁",
-    "\\iteta" : "𝜂",
-    "\\ittheta" : "𝜃",
-    "\\itiota" : "𝜄",
-    "\\itkappa" : "𝜅",
-    "\\itlambda" : "𝜆",
-    "\\itmu" : "𝜇",
-    "\\itnu" : "𝜈",
-    "\\itxi" : "𝜉",
-    "\\itomicron" : "𝜊",
-    "\\itpi" : "𝜋",
-    "\\itrho" : "𝜌",
-    "\\itvarsigma" : "𝜍",
-    "\\itsigma" : "𝜎",
-    "\\ittau" : "𝜏",
-    "\\itupsilon" : "𝜐",
-    "\\itphi" : "𝜑",
-    "\\itchi" : "𝜒",
-    "\\itpsi" : "𝜓",
-    "\\itomega" : "𝜔",
-    "\\itvarepsilon" : "𝜖",
-    "\\itvartheta" : "𝜗",
-    "\\itvarkappa" : "𝜘",
-    "\\itvarphi" : "𝜙",
-    "\\itvarrho" : "𝜚",
-    "\\itvarpi" : "𝜛",
-    "\\biAlpha" : "𝜜",
-    "\\biBeta" : "𝜝",
-    "\\biGamma" : "𝜞",
-    "\\biDelta" : "𝜟",
-    "\\biEpsilon" : "𝜠",
-    "\\biZeta" : "𝜡",
-    "\\biEta" : "𝜢",
-    "\\biTheta" : "𝜣",
-    "\\biIota" : "𝜤",
-    "\\biKappa" : "𝜥",
-    "\\biLambda" : "𝜦",
-    "\\biMu" : "𝜧",
-    "\\biNu" : "𝜨",
-    "\\biXi" : "𝜩",
-    "\\biOmicron" : "𝜪",
-    "\\biPi" : "𝜫",
-    "\\biRho" : "𝜬",
-    "\\bivarTheta" : "𝜭",
-    "\\biSigma" : "𝜮",
-    "\\biTau" : "𝜯",
-    "\\biUpsilon" : "𝜰",
-    "\\biPhi" : "𝜱",
-    "\\biChi" : "𝜲",
-    "\\biPsi" : "𝜳",
-    "\\biOmega" : "𝜴",
-    "\\bialpha" : "𝜶",
-    "\\bibeta" : "𝜷",
-    "\\bigamma" : "𝜸",
-    "\\bidelta" : "𝜹",
-    "\\biepsilon" : "𝜺",
-    "\\bizeta" : "𝜻",
-    "\\bieta" : "𝜼",
-    "\\bitheta" : "𝜽",
-    "\\biiota" : "𝜾",
-    "\\bikappa" : "𝜿",
-    "\\bilambda" : "𝝀",
-    "\\bimu" : "𝝁",
-    "\\binu" : "𝝂",
-    "\\bixi" : "𝝃",
-    "\\biomicron" : "𝝄",
-    "\\bipi" : "𝝅",
-    "\\birho" : "𝝆",
-    "\\bivarsigma" : "𝝇",
-    "\\bisigma" : "𝝈",
-    "\\bitau" : "𝝉",
-    "\\biupsilon" : "𝝊",
-    "\\biphi" : "𝝋",
-    "\\bichi" : "𝝌",
-    "\\bipsi" : "𝝍",
-    "\\biomega" : "𝝎",
-    "\\bivarepsilon" : "𝝐",
-    "\\bivartheta" : "𝝑",
-    "\\bivarkappa" : "𝝒",
-    "\\bivarphi" : "𝝓",
-    "\\bivarrho" : "𝝔",
-    "\\bivarpi" : "𝝕",
-    "\\bsansAlpha" : "𝝖",
-    "\\bsansBeta" : "𝝗",
-    "\\bsansGamma" : "𝝘",
-    "\\bsansDelta" : "𝝙",
-    "\\bsansEpsilon" : "𝝚",
-    "\\bsansZeta" : "𝝛",
-    "\\bsansEta" : "𝝜",
-    "\\bsansTheta" : "𝝝",
-    "\\bsansIota" : "𝝞",
-    "\\bsansKappa" : "𝝟",
-    "\\bsansLambda" : "𝝠",
-    "\\bsansMu" : "𝝡",
-    "\\bsansNu" : "𝝢",
-    "\\bsansXi" : "𝝣",
-    "\\bsansOmicron" : "𝝤",
-    "\\bsansPi" : "𝝥",
-    "\\bsansRho" : "𝝦",
-    "\\bsansvarTheta" : "𝝧",
-    "\\bsansSigma" : "𝝨",
-    "\\bsansTau" : "𝝩",
-    "\\bsansUpsilon" : "𝝪",
-    "\\bsansPhi" : "𝝫",
-    "\\bsansChi" : "𝝬",
-    "\\bsansPsi" : "𝝭",
-    "\\bsansOmega" : "𝝮",
-    "\\bsansalpha" : "𝝰",
-    "\\bsansbeta" : "𝝱",
-    "\\bsansgamma" : "𝝲",
-    "\\bsansdelta" : "𝝳",
-    "\\bsansepsilon" : "𝝴",
-    "\\bsanszeta" : "𝝵",
-    "\\bsanseta" : "𝝶",
-    "\\bsanstheta" : "𝝷",
-    "\\bsansiota" : "𝝸",
-    "\\bsanskappa" : "𝝹",
-    "\\bsanslambda" : "𝝺",
-    "\\bsansmu" : "𝝻",
-    "\\bsansnu" : "𝝼",
-    "\\bsansxi" : "𝝽",
-    "\\bsansomicron" : "𝝾",
-    "\\bsanspi" : "𝝿",
-    "\\bsansrho" : "𝞀",
-    "\\bsansvarsigma" : "𝞁",
-    "\\bsanssigma" : "𝞂",
-    "\\bsanstau" : "𝞃",
-    "\\bsansupsilon" : "𝞄",
-    "\\bsansphi" : "𝞅",
-    "\\bsanschi" : "𝞆",
-    "\\bsanspsi" : "𝞇",
-    "\\bsansomega" : "𝞈",
-    "\\bsansvarepsilon" : "𝞊",
-    "\\bsansvartheta" : "𝞋",
-    "\\bsansvarkappa" : "𝞌",
-    "\\bsansvarphi" : "𝞍",
-    "\\bsansvarrho" : "𝞎",
-    "\\bsansvarpi" : "𝞏",
-    "\\bisansAlpha" : "𝞐",
-    "\\bisansBeta" : "𝞑",
-    "\\bisansGamma" : "𝞒",
-    "\\bisansDelta" : "𝞓",
-    "\\bisansEpsilon" : "𝞔",
-    "\\bisansZeta" : "𝞕",
-    "\\bisansEta" : "𝞖",
-    "\\bisansTheta" : "𝞗",
-    "\\bisansIota" : "𝞘",
-    "\\bisansKappa" : "𝞙",
-    "\\bisansLambda" : "𝞚",
-    "\\bisansMu" : "𝞛",
-    "\\bisansNu" : "𝞜",
-    "\\bisansXi" : "𝞝",
-    "\\bisansOmicron" : "𝞞",
-    "\\bisansPi" : "𝞟",
-    "\\bisansRho" : "𝞠",
-    "\\bisansvarTheta" : "𝞡",
-    "\\bisansSigma" : "𝞢",
-    "\\bisansTau" : "𝞣",
-    "\\bisansUpsilon" : "𝞤",
-    "\\bisansPhi" : "𝞥",
-    "\\bisansChi" : "𝞦",
-    "\\bisansPsi" : "𝞧",
-    "\\bisansOmega" : "𝞨",
-    "\\bisansalpha" : "𝞪",
-    "\\bisansbeta" : "𝞫",
-    "\\bisansgamma" : "𝞬",
-    "\\bisansdelta" : "𝞭",
-    "\\bisansepsilon" : "𝞮",
-    "\\bisanszeta" : "𝞯",
-    "\\bisanseta" : "𝞰",
-    "\\bisanstheta" : "𝞱",
-    "\\bisansiota" : "𝞲",
-    "\\bisanskappa" : "𝞳",
-    "\\bisanslambda" : "𝞴",
-    "\\bisansmu" : "𝞵",
-    "\\bisansnu" : "𝞶",
-    "\\bisansxi" : "𝞷",
-    "\\bisansomicron" : "𝞸",
-    "\\bisanspi" : "𝞹",
-    "\\bisansrho" : "𝞺",
-    "\\bisansvarsigma" : "𝞻",
-    "\\bisanssigma" : "𝞼",
-    "\\bisanstau" : "𝞽",
-    "\\bisansupsilon" : "𝞾",
-    "\\bisansphi" : "𝞿",
-    "\\bisanschi" : "𝟀",
-    "\\bisanspsi" : "𝟁",
-    "\\bisansomega" : "𝟂",
-    "\\bisansvarepsilon" : "𝟄",
-    "\\bisansvartheta" : "𝟅",
-    "\\bisansvarkappa" : "𝟆",
-    "\\bisansvarphi" : "𝟇",
-    "\\bisansvarrho" : "𝟈",
-    "\\bisansvarpi" : "𝟉",
-    "\\bfzero" : "𝟎",
-    "\\bfone" : "𝟏",
-    "\\bftwo" : "𝟐",
-    "\\bfthree" : "𝟑",
-    "\\bffour" : "𝟒",
-    "\\bffive" : "𝟓",
-    "\\bfsix" : "𝟔",
-    "\\bfseven" : "𝟕",
-    "\\bfeight" : "𝟖",
-    "\\bfnine" : "𝟗",
-    "\\bbzero" : "𝟘",
-    "\\bbone" : "𝟙",
-    "\\bbtwo" : "𝟚",
-    "\\bbthree" : "𝟛",
-    "\\bbfour" : "𝟜",
-    "\\bbfive" : "𝟝",
-    "\\bbsix" : "𝟞",
-    "\\bbseven" : "𝟟",
-    "\\bbeight" : "𝟠",
-    "\\bbnine" : "𝟡",
-    "\\sanszero" : "𝟢",
-    "\\sansone" : "𝟣",
-    "\\sanstwo" : "𝟤",
-    "\\sansthree" : "𝟥",
-    "\\sansfour" : "𝟦",
-    "\\sansfive" : "𝟧",
-    "\\sanssix" : "𝟨",
-    "\\sansseven" : "𝟩",
-    "\\sanseight" : "𝟪",
-    "\\sansnine" : "𝟫",
-    "\\bsanszero" : "𝟬",
-    "\\bsansone" : "𝟭",
-    "\\bsanstwo" : "𝟮",
-    "\\bsansthree" : "𝟯",
-    "\\bsansfour" : "𝟰",
-    "\\bsansfive" : "𝟱",
-    "\\bsanssix" : "𝟲",
-    "\\bsansseven" : "𝟳",
-    "\\bsanseight" : "𝟴",
-    "\\bsansnine" : "𝟵",
-    "\\ttzero" : "𝟶",
-    "\\ttone" : "𝟷",
-    "\\tttwo" : "𝟸",
-    "\\ttthree" : "𝟹",
-    "\\ttfour" : "𝟺",
-    "\\ttfive" : "𝟻",
-    "\\ttsix" : "𝟼",
-    "\\ttseven" : "𝟽",
-    "\\tteight" : "𝟾",
-    "\\ttnine" : "𝟿",
-    "\\underbar" : "̲",
-    "\\underleftrightarrow" : "͍",
-}
-
-
-reverse_latex_symbol = { v:k for k,v in latex_symbols.items()}
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/lib/tests/test_display.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/lib/tests/test_display.py
deleted file mode 100644
index f5ed34c912b396703931362f1f6de5e9b12f4e91..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/lib/tests/test_display.py
+++ /dev/null
@@ -1,272 +0,0 @@
-"""Tests for IPython.lib.display.
-
-"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012, the IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-from tempfile import NamedTemporaryFile, mkdtemp
-from os.path import split, join as pjoin, dirname
-import pathlib
-from unittest import TestCase, mock
-import struct
-import wave
-from io import BytesIO
-
-# Third-party imports
-import pytest
-
-try:
-    import numpy
-except ImportError:
-    pass
-
-# Our own imports
-from IPython.lib import display
-
-from IPython.testing.decorators import skipif_not_numpy
-
-#-----------------------------------------------------------------------------
-# Classes and functions
-#-----------------------------------------------------------------------------
-
-#--------------------------
-# FileLink tests
-#--------------------------
-
-def test_instantiation_FileLink():
-    """FileLink: Test class can be instantiated"""
-    fl = display.FileLink('example.txt')
-    # TODO: remove if when only Python >= 3.6 is supported
-    fl = display.FileLink(pathlib.PurePath('example.txt'))
-
-def test_warning_on_non_existent_path_FileLink():
-    """FileLink: Calling _repr_html_ on non-existent files returns a warning"""
-    fl = display.FileLink("example.txt")
-    assert fl._repr_html_().startswith("Path (<tt>example.txt</tt>)")
-
-
-def test_existing_path_FileLink():
-    """FileLink: Calling _repr_html_ functions as expected on existing filepath
-    """
-    tf = NamedTemporaryFile()
-    fl = display.FileLink(tf.name)
-    actual = fl._repr_html_()
-    expected = "<a href='%s' target='_blank'>%s</a><br>" % (tf.name, tf.name)
-    assert actual == expected
-
-
-def test_existing_path_FileLink_repr():
-    """FileLink: Calling repr() functions as expected on existing filepath
-    """
-    tf = NamedTemporaryFile()
-    fl = display.FileLink(tf.name)
-    actual = repr(fl)
-    expected = tf.name
-    assert actual == expected
-
-
-def test_error_on_directory_to_FileLink():
-    """FileLink: Raises error when passed directory
-    """
-    td = mkdtemp()
-    pytest.raises(ValueError, display.FileLink, td)
-
-#--------------------------
-# FileLinks tests
-#--------------------------
-
-def test_instantiation_FileLinks():
-    """FileLinks: Test class can be instantiated
-    """
-    fls = display.FileLinks('example')
-
-def test_warning_on_non_existent_path_FileLinks():
-    """FileLinks: Calling _repr_html_ on non-existent files returns a warning"""
-    fls = display.FileLinks("example")
-    assert fls._repr_html_().startswith("Path (<tt>example</tt>)")
-
-
-def test_existing_path_FileLinks():
-    """FileLinks: Calling _repr_html_ functions as expected on existing dir
-    """
-    td = mkdtemp()
-    tf1 = NamedTemporaryFile(dir=td)
-    tf2 = NamedTemporaryFile(dir=td)
-    fl = display.FileLinks(td)
-    actual = fl._repr_html_()
-    actual = actual.split('\n')
-    actual.sort()
-    # the links should always have forward slashes, even on windows, so replace
-    # backslashes with forward slashes here
-    expected = ["%s/<br>" % td,
-                "&nbsp;&nbsp;<a href='%s' target='_blank'>%s</a><br>" %\
-                 (tf2.name.replace("\\","/"),split(tf2.name)[1]),
-                "&nbsp;&nbsp;<a href='%s' target='_blank'>%s</a><br>" %\
-                 (tf1.name.replace("\\","/"),split(tf1.name)[1])]
-    expected.sort()
-    # We compare the sorted list of links here as that's more reliable
-    assert actual == expected
-
-
-def test_existing_path_FileLinks_alt_formatter():
-    """FileLinks: Calling _repr_html_ functions as expected w/ an alt formatter
-    """
-    td = mkdtemp()
-    tf1 = NamedTemporaryFile(dir=td)
-    tf2 = NamedTemporaryFile(dir=td)
-    def fake_formatter(dirname,fnames,included_suffixes):
-        return ["hello","world"]
-    fl = display.FileLinks(td,notebook_display_formatter=fake_formatter)
-    actual = fl._repr_html_()
-    actual = actual.split('\n')
-    actual.sort()
-    expected = ["hello","world"]
-    expected.sort()
-    # We compare the sorted list of links here as that's more reliable
-    assert actual == expected
-
-
-def test_existing_path_FileLinks_repr():
-    """FileLinks: Calling repr() functions as expected on existing directory """
-    td = mkdtemp()
-    tf1 = NamedTemporaryFile(dir=td)
-    tf2 = NamedTemporaryFile(dir=td)
-    fl = display.FileLinks(td)
-    actual = repr(fl)
-    actual = actual.split('\n')
-    actual.sort()
-    expected = ['%s/' % td, '  %s' % split(tf1.name)[1],'  %s' % split(tf2.name)[1]]
-    expected.sort()
-    # We compare the sorted list of links here as that's more reliable
-    assert actual == expected
-
-
-def test_existing_path_FileLinks_repr_alt_formatter():
-    """FileLinks: Calling repr() functions as expected w/ alt formatter
-    """
-    td = mkdtemp()
-    tf1 = NamedTemporaryFile(dir=td)
-    tf2 = NamedTemporaryFile(dir=td)
-    def fake_formatter(dirname,fnames,included_suffixes):
-        return ["hello","world"]
-    fl = display.FileLinks(td,terminal_display_formatter=fake_formatter)
-    actual = repr(fl)
-    actual = actual.split('\n')
-    actual.sort()
-    expected = ["hello","world"]
-    expected.sort()
-    # We compare the sorted list of links here as that's more reliable
-    assert actual == expected
-
-
-def test_error_on_file_to_FileLinks():
-    """FileLinks: Raises error when passed file
-    """
-    td = mkdtemp()
-    tf1 = NamedTemporaryFile(dir=td)
-    pytest.raises(ValueError, display.FileLinks, tf1.name)
-
-
-def test_recursive_FileLinks():
-    """FileLinks: Does not recurse when recursive=False
-    """
-    td = mkdtemp()
-    tf = NamedTemporaryFile(dir=td)
-    subtd = mkdtemp(dir=td)
-    subtf = NamedTemporaryFile(dir=subtd)
-    fl = display.FileLinks(td)
-    actual = str(fl)
-    actual = actual.split('\n')
-    assert len(actual) == 4, actual
-    fl = display.FileLinks(td, recursive=False)
-    actual = str(fl)
-    actual = actual.split('\n')
-    assert len(actual) == 2, actual
-
-def test_audio_from_file():
-    path = pjoin(dirname(__file__), 'test.wav')
-    display.Audio(filename=path)
-
-class TestAudioDataWithNumpy(TestCase):
-
-    @skipif_not_numpy
-    def test_audio_from_numpy_array(self):
-        test_tone = get_test_tone()
-        audio = display.Audio(test_tone, rate=44100)
-        assert len(read_wav(audio.data)) == len(test_tone)
-
-    @skipif_not_numpy
-    def test_audio_from_list(self):
-        test_tone = get_test_tone()
-        audio = display.Audio(list(test_tone), rate=44100)
-        assert len(read_wav(audio.data)) == len(test_tone)
-
-    @skipif_not_numpy
-    def test_audio_from_numpy_array_without_rate_raises(self):
-        self.assertRaises(ValueError, display.Audio, get_test_tone())
-
-    @skipif_not_numpy
-    def test_audio_data_normalization(self):
-        expected_max_value = numpy.iinfo(numpy.int16).max
-        for scale in [1, 0.5, 2]:
-            audio = display.Audio(get_test_tone(scale), rate=44100)
-            actual_max_value = numpy.max(numpy.abs(read_wav(audio.data)))
-            assert actual_max_value == expected_max_value
-
-    @skipif_not_numpy
-    def test_audio_data_without_normalization(self):
-        max_int16 = numpy.iinfo(numpy.int16).max
-        for scale in [1, 0.5, 0.2]:
-            test_tone = get_test_tone(scale)
-            test_tone_max_abs = numpy.max(numpy.abs(test_tone))
-            expected_max_value = int(max_int16 * test_tone_max_abs)
-            audio = display.Audio(test_tone, rate=44100, normalize=False)
-            actual_max_value = numpy.max(numpy.abs(read_wav(audio.data)))
-            assert actual_max_value == expected_max_value
-
-    def test_audio_data_without_normalization_raises_for_invalid_data(self):
-        self.assertRaises(
-            ValueError,
-            lambda: display.Audio([1.001], rate=44100, normalize=False))
-        self.assertRaises(
-            ValueError,
-            lambda: display.Audio([-1.001], rate=44100, normalize=False))
-
-def simulate_numpy_not_installed():
-    try:
-        import numpy
-        return mock.patch('numpy.array', mock.MagicMock(side_effect=ImportError))
-    except ModuleNotFoundError:
-        return lambda x:x
-
-@simulate_numpy_not_installed()
-class TestAudioDataWithoutNumpy(TestAudioDataWithNumpy):
-    # All tests from `TestAudioDataWithNumpy` are inherited.
-
-    @skipif_not_numpy
-    def test_audio_raises_for_nested_list(self):
-        stereo_signal = [list(get_test_tone())] * 2
-        self.assertRaises(TypeError, lambda: display.Audio(stereo_signal, rate=44100))
-
-
-@skipif_not_numpy
-def get_test_tone(scale=1):
-    return numpy.sin(2 * numpy.pi * 440 * numpy.linspace(0, 1, 44100)) * scale
-
-def read_wav(data):
-    with wave.open(BytesIO(data)) as wave_file:
-        wave_data = wave_file.readframes(wave_file.getnframes())
-        num_samples = wave_file.getnframes() * wave_file.getnchannels()
-        return struct.unpack('<%sh' % num_samples, wave_data)
-
-def test_code_from_file():
-    c = display.Code(filename=__file__)
-    assert c._repr_html_().startswith('<style>')
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/colorama/win32.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/colorama/win32.py
deleted file mode 100644
index 841b0e270a381cdfaca544a9be976d7276d83b1e..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/colorama/win32.py
+++ /dev/null
@@ -1,180 +0,0 @@
-# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
-
-# from winbase.h
-STDOUT = -11
-STDERR = -12
-
-ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
-
-try:
-    import ctypes
-    from ctypes import LibraryLoader
-    windll = LibraryLoader(ctypes.WinDLL)
-    from ctypes import wintypes
-except (AttributeError, ImportError):
-    windll = None
-    SetConsoleTextAttribute = lambda *_: None
-    winapi_test = lambda *_: None
-else:
-    from ctypes import byref, Structure, c_char, POINTER
-
-    COORD = wintypes._COORD
-
-    class CONSOLE_SCREEN_BUFFER_INFO(Structure):
-        """struct in wincon.h."""
-        _fields_ = [
-            ("dwSize", COORD),
-            ("dwCursorPosition", COORD),
-            ("wAttributes", wintypes.WORD),
-            ("srWindow", wintypes.SMALL_RECT),
-            ("dwMaximumWindowSize", COORD),
-        ]
-        def __str__(self):
-            return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
-                self.dwSize.Y, self.dwSize.X
-                , self.dwCursorPosition.Y, self.dwCursorPosition.X
-                , self.wAttributes
-                , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
-                , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
-            )
-
-    _GetStdHandle = windll.kernel32.GetStdHandle
-    _GetStdHandle.argtypes = [
-        wintypes.DWORD,
-    ]
-    _GetStdHandle.restype = wintypes.HANDLE
-
-    _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
-    _GetConsoleScreenBufferInfo.argtypes = [
-        wintypes.HANDLE,
-        POINTER(CONSOLE_SCREEN_BUFFER_INFO),
-    ]
-    _GetConsoleScreenBufferInfo.restype = wintypes.BOOL
-
-    _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
-    _SetConsoleTextAttribute.argtypes = [
-        wintypes.HANDLE,
-        wintypes.WORD,
-    ]
-    _SetConsoleTextAttribute.restype = wintypes.BOOL
-
-    _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
-    _SetConsoleCursorPosition.argtypes = [
-        wintypes.HANDLE,
-        COORD,
-    ]
-    _SetConsoleCursorPosition.restype = wintypes.BOOL
-
-    _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
-    _FillConsoleOutputCharacterA.argtypes = [
-        wintypes.HANDLE,
-        c_char,
-        wintypes.DWORD,
-        COORD,
-        POINTER(wintypes.DWORD),
-    ]
-    _FillConsoleOutputCharacterA.restype = wintypes.BOOL
-
-    _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
-    _FillConsoleOutputAttribute.argtypes = [
-        wintypes.HANDLE,
-        wintypes.WORD,
-        wintypes.DWORD,
-        COORD,
-        POINTER(wintypes.DWORD),
-    ]
-    _FillConsoleOutputAttribute.restype = wintypes.BOOL
-
-    _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW
-    _SetConsoleTitleW.argtypes = [
-        wintypes.LPCWSTR
-    ]
-    _SetConsoleTitleW.restype = wintypes.BOOL
-
-    _GetConsoleMode = windll.kernel32.GetConsoleMode
-    _GetConsoleMode.argtypes = [
-        wintypes.HANDLE,
-        POINTER(wintypes.DWORD)
-    ]
-    _GetConsoleMode.restype = wintypes.BOOL
-
-    _SetConsoleMode = windll.kernel32.SetConsoleMode
-    _SetConsoleMode.argtypes = [
-        wintypes.HANDLE,
-        wintypes.DWORD
-    ]
-    _SetConsoleMode.restype = wintypes.BOOL
-
-    def _winapi_test(handle):
-        csbi = CONSOLE_SCREEN_BUFFER_INFO()
-        success = _GetConsoleScreenBufferInfo(
-            handle, byref(csbi))
-        return bool(success)
-
-    def winapi_test():
-        return any(_winapi_test(h) for h in
-                   (_GetStdHandle(STDOUT), _GetStdHandle(STDERR)))
-
-    def GetConsoleScreenBufferInfo(stream_id=STDOUT):
-        handle = _GetStdHandle(stream_id)
-        csbi = CONSOLE_SCREEN_BUFFER_INFO()
-        success = _GetConsoleScreenBufferInfo(
-            handle, byref(csbi))
-        return csbi
-
-    def SetConsoleTextAttribute(stream_id, attrs):
-        handle = _GetStdHandle(stream_id)
-        return _SetConsoleTextAttribute(handle, attrs)
-
-    def SetConsoleCursorPosition(stream_id, position, adjust=True):
-        position = COORD(*position)
-        # If the position is out of range, do nothing.
-        if position.Y <= 0 or position.X <= 0:
-            return
-        # Adjust for Windows' SetConsoleCursorPosition:
-        #    1. being 0-based, while ANSI is 1-based.
-        #    2. expecting (x,y), while ANSI uses (y,x).
-        adjusted_position = COORD(position.Y - 1, position.X - 1)
-        if adjust:
-            # Adjust for viewport's scroll position
-            sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
-            adjusted_position.Y += sr.Top
-            adjusted_position.X += sr.Left
-        # Resume normal processing
-        handle = _GetStdHandle(stream_id)
-        return _SetConsoleCursorPosition(handle, adjusted_position)
-
-    def FillConsoleOutputCharacter(stream_id, char, length, start):
-        handle = _GetStdHandle(stream_id)
-        char = c_char(char.encode())
-        length = wintypes.DWORD(length)
-        num_written = wintypes.DWORD(0)
-        # Note that this is hard-coded for ANSI (vs wide) bytes.
-        success = _FillConsoleOutputCharacterA(
-            handle, char, length, start, byref(num_written))
-        return num_written.value
-
-    def FillConsoleOutputAttribute(stream_id, attr, length, start):
-        ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
-        handle = _GetStdHandle(stream_id)
-        attribute = wintypes.WORD(attr)
-        length = wintypes.DWORD(length)
-        num_written = wintypes.DWORD(0)
-        # Note that this is hard-coded for ANSI (vs wide) bytes.
-        return _FillConsoleOutputAttribute(
-            handle, attribute, length, start, byref(num_written))
-
-    def SetConsoleTitle(title):
-        return _SetConsoleTitleW(title)
-
-    def GetConsoleMode(handle):
-        mode = wintypes.DWORD()
-        success = _GetConsoleMode(handle, byref(mode))
-        if not success:
-            raise ctypes.WinError()
-        return mode.value
-
-    def SetConsoleMode(handle, mode):
-        success = _SetConsoleMode(handle, mode)
-        if not success:
-            raise ctypes.WinError()
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/module.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/module.py
deleted file mode 100644
index 6ae01831b905c3022384f7462e040f644d7305da..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/module.py
+++ /dev/null
@@ -1,2016 +0,0 @@
-#!~/.wine/drive_c/Python25/python.exe
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2009-2014, Mario Vilas
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-#     * Redistributions of source code must retain the above copyright notice,
-#       this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above copyright
-#       notice,this list of conditions and the following disclaimer in the
-#       documentation and/or other materials provided with the distribution.
-#     * Neither the name of the copyright holder nor the names of its
-#       contributors may be used to endorse or promote products derived from
-#       this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-
-"""
-Module instrumentation.
-
-@group Instrumentation:
-    Module
-
-@group Warnings:
-    DebugSymbolsWarning
-"""
-
-from __future__ import with_statement
-
-__revision__ = "$Id$"
-
-__all__ = ['Module', 'DebugSymbolsWarning']
-
-import sys
-from winappdbg import win32
-from winappdbg import compat
-from winappdbg.textio import HexInput, HexDump
-from winappdbg.util import PathOperations
-
-# delayed imports
-Process = None
-
-import os
-import warnings
-import traceback
-
-#==============================================================================
-
-class DebugSymbolsWarning (UserWarning):
-    """
-    This warning is issued if the support for debug symbols
-    isn't working properly.
-    """
-
-#==============================================================================
-
-class Module (object):
-    """
-    Interface to a DLL library loaded in the context of another process.
-
-    @group Properties:
-        get_base, get_filename, get_name, get_size, get_entry_point,
-        get_process, set_process, get_pid,
-        get_handle, set_handle, open_handle, close_handle
-
-    @group Labels:
-        get_label, get_label_at_address, is_address_here,
-        resolve, resolve_label, match_name
-
-    @group Symbols:
-        load_symbols, unload_symbols, get_symbols, iter_symbols,
-        resolve_symbol, get_symbol_at_address
-
-    @group Modules snapshot:
-        clear
-
-    @type unknown: str
-    @cvar unknown: Suggested tag for unknown modules.
-
-    @type lpBaseOfDll: int
-    @ivar lpBaseOfDll: Base of DLL module.
-        Use L{get_base} instead.
-
-    @type hFile: L{FileHandle}
-    @ivar hFile: Handle to the module file.
-        Use L{get_handle} instead.
-
-    @type fileName: str
-    @ivar fileName: Module filename.
-        Use L{get_filename} instead.
-
-    @type SizeOfImage: int
-    @ivar SizeOfImage: Size of the module.
-        Use L{get_size} instead.
-
-    @type EntryPoint: int
-    @ivar EntryPoint: Entry point of the module.
-        Use L{get_entry_point} instead.
-
-    @type process: L{Process}
-    @ivar process: Process where the module is loaded.
-        Use the L{get_process} method instead.
-    """
-
-    unknown = '<unknown>'
-
-    class _SymbolEnumerator (object):
-        """
-        Internally used by L{Module} to enumerate symbols in a module.
-        """
-
-        def __init__(self, undecorate = False):
-            self.symbols = list()
-            self.undecorate = undecorate
-
-        def __call__(self, SymbolName, SymbolAddress, SymbolSize, UserContext):
-            """
-            Callback that receives symbols and stores them in a Python list.
-            """
-            if self.undecorate:
-                try:
-                    SymbolName = win32.UnDecorateSymbolName(SymbolName)
-                except Exception:
-                    pass # not all symbols are decorated!
-            self.symbols.append( (SymbolName, SymbolAddress, SymbolSize) )
-            return win32.TRUE
-
-    def __init__(self, lpBaseOfDll, hFile = None, fileName    = None,
-                                                  SizeOfImage = None,
-                                                  EntryPoint  = None,
-                                                  process     = None):
-        """
-        @type  lpBaseOfDll: str
-        @param lpBaseOfDll: Base address of the module.
-
-        @type  hFile: L{FileHandle}
-        @param hFile: (Optional) Handle to the module file.
-
-        @type  fileName: str
-        @param fileName: (Optional) Module filename.
-
-        @type  SizeOfImage: int
-        @param SizeOfImage: (Optional) Size of the module.
-
-        @type  EntryPoint: int
-        @param EntryPoint: (Optional) Entry point of the module.
-
-        @type  process: L{Process}
-        @param process: (Optional) Process where the module is loaded.
-        """
-        self.lpBaseOfDll    = lpBaseOfDll
-        self.fileName       = fileName
-        self.SizeOfImage    = SizeOfImage
-        self.EntryPoint     = EntryPoint
-
-        self.__symbols = list()
-
-        self.set_handle(hFile)
-        self.set_process(process)
-
-    # Not really sure if it's a good idea...
-##    def __eq__(self, aModule):
-##        """
-##        Compare two Module objects. The comparison is made using the process
-##        IDs and the module bases.
-##
-##        @type  aModule: L{Module}
-##        @param aModule: Another Module object.
-##
-##        @rtype:  bool
-##        @return: C{True} if the two process IDs and module bases are equal,
-##            C{False} otherwise.
-##        """
-##        return isinstance(aModule, Module)           and \
-##               self.get_pid() == aModule.get_pid()   and \
-##               self.get_base() == aModule.get_base()
-
-    def get_handle(self):
-        """
-        @rtype:  L{Handle}
-        @return: File handle.
-            Returns C{None} if unknown.
-        """
-        # no way to guess!
-        return self.__hFile
-
-    def set_handle(self, hFile):
-        """
-        @type  hFile: L{Handle}
-        @param hFile: File handle. Use C{None} to clear.
-        """
-        if hFile == win32.INVALID_HANDLE_VALUE:
-            hFile = None
-        self.__hFile = hFile
-
-    hFile = property(get_handle, set_handle, doc="")
-
-    def get_process(self):
-        """
-        @rtype:  L{Process}
-        @return: Parent Process object.
-            Returns C{None} if unknown.
-        """
-        # no way to guess!
-        return self.__process
-
-    def set_process(self, process = None):
-        """
-        Manually set the parent process. Use with care!
-
-        @type  process: L{Process}
-        @param process: (Optional) Process object. Use C{None} for no process.
-        """
-        if process is None:
-            self.__process = None
-        else:
-            global Process      # delayed import
-            if Process is None:
-                from winappdbg.process import Process
-            if not isinstance(process, Process):
-                msg  = "Parent process must be a Process instance, "
-                msg += "got %s instead" % type(process)
-                raise TypeError(msg)
-            self.__process = process
-
-    process = property(get_process, set_process, doc="")
-
-    def get_pid(self):
-        """
-        @rtype:  int or None
-        @return: Parent process global ID.
-            Returns C{None} on error.
-        """
-        process = self.get_process()
-        if process is not None:
-            return process.get_pid()
-
-    def get_base(self):
-        """
-        @rtype:  int or None
-        @return: Base address of the module.
-            Returns C{None} if unknown.
-        """
-        return self.lpBaseOfDll
-
-    def get_size(self):
-        """
-        @rtype:  int or None
-        @return: Base size of the module.
-            Returns C{None} if unknown.
-        """
-        if not self.SizeOfImage:
-            self.__get_size_and_entry_point()
-        return self.SizeOfImage
-
-    def get_entry_point(self):
-        """
-        @rtype:  int or None
-        @return: Entry point of the module.
-            Returns C{None} if unknown.
-        """
-        if not self.EntryPoint:
-            self.__get_size_and_entry_point()
-        return self.EntryPoint
-
-    def __get_size_and_entry_point(self):
-        "Get the size and entry point of the module using the Win32 API."
-        process = self.get_process()
-        if process:
-            try:
-                handle = process.get_handle( win32.PROCESS_VM_READ |
-                                             win32.PROCESS_QUERY_INFORMATION )
-                base   = self.get_base()
-                mi     = win32.GetModuleInformation(handle, base)
-                self.SizeOfImage = mi.SizeOfImage
-                self.EntryPoint  = mi.EntryPoint
-            except WindowsError:
-                e = sys.exc_info()[1]
-                warnings.warn(
-                    "Cannot get size and entry point of module %s, reason: %s"\
-                    % (self.get_name(), e.strerror), RuntimeWarning)
-
-    def get_filename(self):
-        """
-        @rtype:  str or None
-        @return: Module filename.
-            Returns C{None} if unknown.
-        """
-        if self.fileName is None:
-            if self.hFile not in (None, win32.INVALID_HANDLE_VALUE):
-                fileName = self.hFile.get_filename()
-                if fileName:
-                    fileName = PathOperations.native_to_win32_pathname(fileName)
-                    self.fileName = fileName
-        return self.fileName
-
-    def __filename_to_modname(self, pathname):
-        """
-        @type  pathname: str
-        @param pathname: Pathname to a module.
-
-        @rtype:  str
-        @return: Module name.
-        """
-        filename = PathOperations.pathname_to_filename(pathname)
-        if filename:
-            filename = filename.lower()
-            filepart, extpart = PathOperations.split_extension(filename)
-            if filepart and extpart:
-                modName = filepart
-            else:
-                modName = filename
-        else:
-            modName = pathname
-        return modName
-
-    def get_name(self):
-        """
-        @rtype:  str
-        @return: Module name, as used in labels.
-
-        @warning: Names are B{NOT} guaranteed to be unique.
-
-            If you need unique identification for a loaded module,
-            use the base address instead.
-
-        @see: L{get_label}
-        """
-        pathname = self.get_filename()
-        if pathname:
-            modName = self.__filename_to_modname(pathname)
-            if isinstance(modName, compat.unicode):
-                try:
-                    modName = modName.encode('cp1252')
-                except UnicodeEncodeError:
-                    e = sys.exc_info()[1]
-                    warnings.warn(str(e))
-        else:
-            modName = "0x%x" % self.get_base()
-        return modName
-
-    def match_name(self, name):
-        """
-        @rtype:  bool
-        @return:
-            C{True} if the given name could refer to this module.
-            It may not be exactly the same returned by L{get_name}.
-        """
-
-        # If the given name is exactly our name, return True.
-        # Comparison is case insensitive.
-        my_name = self.get_name().lower()
-        if name.lower() == my_name:
-            return True
-
-        # If the given name is a base address, compare it with ours.
-        try:
-            base = HexInput.integer(name)
-        except ValueError:
-            base = None
-        if base is not None and base == self.get_base():
-            return True
-
-        # If the given name is a filename, convert it to a module name.
-        # Then compare it with ours, case insensitive.
-        modName = self.__filename_to_modname(name)
-        if modName.lower() == my_name:
-            return True
-
-        # No match.
-        return False
-
-#------------------------------------------------------------------------------
-
-    def open_handle(self):
-        """
-        Opens a new handle to the module.
-
-        The new handle is stored in the L{hFile} property.
-        """
-
-        if not self.get_filename():
-            msg = "Cannot retrieve filename for module at %s"
-            msg = msg % HexDump.address( self.get_base() )
-            raise Exception(msg)
-
-        hFile = win32.CreateFile(self.get_filename(),
-                                           dwShareMode = win32.FILE_SHARE_READ,
-                                 dwCreationDisposition = win32.OPEN_EXISTING)
-
-        # In case hFile was set to an actual handle value instead of a Handle
-        # object. This shouldn't happen unless the user tinkered with hFile.
-        if not hasattr(self.hFile, '__del__'):
-            self.close_handle()
-
-        self.hFile = hFile
-
-    def close_handle(self):
-        """
-        Closes the handle to the module.
-
-        @note: Normally you don't need to call this method. All handles
-            created by I{WinAppDbg} are automatically closed when the garbage
-            collector claims them. So unless you've been tinkering with it,
-            setting L{hFile} to C{None} should be enough.
-        """
-        try:
-            if hasattr(self.hFile, 'close'):
-                self.hFile.close()
-            elif self.hFile not in (None, win32.INVALID_HANDLE_VALUE):
-                win32.CloseHandle(self.hFile)
-        finally:
-            self.hFile = None
-
-    def get_handle(self):
-        """
-        @rtype:  L{FileHandle}
-        @return: Handle to the module file.
-        """
-        if self.hFile in (None, win32.INVALID_HANDLE_VALUE):
-            self.open_handle()
-        return self.hFile
-
-    def clear(self):
-        """
-        Clears the resources held by this object.
-        """
-        try:
-            self.set_process(None)
-        finally:
-            self.close_handle()
-
-#------------------------------------------------------------------------------
-
-    # XXX FIXME
-    # I've been told sometimes the debugging symbols APIs don't correctly
-    # handle redirected exports (for example ws2_32!recv).
-    # I haven't been able to reproduce the bug yet.
-    def load_symbols(self):
-        """
-        Loads the debugging symbols for a module.
-        Automatically called by L{get_symbols}.
-        """
-        if win32.PROCESS_ALL_ACCESS == win32.PROCESS_ALL_ACCESS_VISTA:
-            dwAccess = win32.PROCESS_QUERY_LIMITED_INFORMATION
-        else:
-            dwAccess = win32.PROCESS_QUERY_INFORMATION
-        hProcess     = self.get_process().get_handle(dwAccess)
-        hFile        = self.hFile
-        BaseOfDll    = self.get_base()
-        SizeOfDll    = self.get_size()
-        Enumerator   = self._SymbolEnumerator()
-        try:
-            win32.SymInitialize(hProcess)
-            SymOptions = win32.SymGetOptions()
-            SymOptions |= (
-                win32.SYMOPT_ALLOW_ZERO_ADDRESS     |
-                win32.SYMOPT_CASE_INSENSITIVE       |
-                win32.SYMOPT_FAVOR_COMPRESSED       |
-                win32.SYMOPT_INCLUDE_32BIT_MODULES  |
-                win32.SYMOPT_UNDNAME
-            )
-            SymOptions &= ~(
-                win32.SYMOPT_LOAD_LINES         |
-                win32.SYMOPT_NO_IMAGE_SEARCH    |
-                win32.SYMOPT_NO_CPP             |
-                win32.SYMOPT_IGNORE_NT_SYMPATH
-            )
-            win32.SymSetOptions(SymOptions)
-            try:
-                win32.SymSetOptions(
-                    SymOptions | win32.SYMOPT_ALLOW_ABSOLUTE_SYMBOLS)
-            except WindowsError:
-                pass
-            try:
-                try:
-                    success = win32.SymLoadModule64(
-                        hProcess, hFile, None, None, BaseOfDll, SizeOfDll)
-                except WindowsError:
-                    success = 0
-                if not success:
-                    ImageName = self.get_filename()
-                    success = win32.SymLoadModule64(
-                        hProcess, None, ImageName, None, BaseOfDll, SizeOfDll)
-                if success:
-                    try:
-                        win32.SymEnumerateSymbols64(
-                            hProcess, BaseOfDll, Enumerator)
-                    finally:
-                        win32.SymUnloadModule64(hProcess, BaseOfDll)
-            finally:
-                win32.SymCleanup(hProcess)
-        except WindowsError:
-            e = sys.exc_info()[1]
-            msg = "Cannot load debug symbols for process ID %d, reason:\n%s"
-            msg = msg % (self.get_pid(), traceback.format_exc(e))
-            warnings.warn(msg, DebugSymbolsWarning)
-        self.__symbols = Enumerator.symbols
-
-    def unload_symbols(self):
-        """
-        Unloads the debugging symbols for a module.
-        """
-        self.__symbols = list()
-
-    def get_symbols(self):
-        """
-        Returns the debugging symbols for a module.
-        The symbols are automatically loaded when needed.
-
-        @rtype:  list of tuple( str, int, int )
-        @return: List of symbols.
-            Each symbol is represented by a tuple that contains:
-                - Symbol name
-                - Symbol memory address
-                - Symbol size in bytes
-        """
-        if not self.__symbols:
-            self.load_symbols()
-        return list(self.__symbols)
-
-    def iter_symbols(self):
-        """
-        Returns an iterator for the debugging symbols in a module,
-        in no particular order.
-        The symbols are automatically loaded when needed.
-
-        @rtype:  iterator of tuple( str, int, int )
-        @return: Iterator of symbols.
-            Each symbol is represented by a tuple that contains:
-                - Symbol name
-                - Symbol memory address
-                - Symbol size in bytes
-        """
-        if not self.__symbols:
-            self.load_symbols()
-        return self.__symbols.__iter__()
-
-    def resolve_symbol(self, symbol, bCaseSensitive = False):
-        """
-        Resolves a debugging symbol's address.
-
-        @type  symbol: str
-        @param symbol: Name of the symbol to resolve.
-
-        @type  bCaseSensitive: bool
-        @param bCaseSensitive: C{True} for case sensitive matches,
-            C{False} for case insensitive.
-
-        @rtype:  int or None
-        @return: Memory address of symbol. C{None} if not found.
-        """
-        if bCaseSensitive:
-            for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
-                if symbol == SymbolName:
-                    return SymbolAddress
-            for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
-                try:
-                    SymbolName = win32.UnDecorateSymbolName(SymbolName)
-                except Exception:
-                    continue
-                if symbol == SymbolName:
-                    return SymbolAddress
-        else:
-            symbol = symbol.lower()
-            for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
-                if symbol == SymbolName.lower():
-                    return SymbolAddress
-            for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
-                try:
-                    SymbolName = win32.UnDecorateSymbolName(SymbolName)
-                except Exception:
-                    continue
-                if symbol == SymbolName.lower():
-                    return SymbolAddress
-
-    def get_symbol_at_address(self, address):
-        """
-        Tries to find the closest matching symbol for the given address.
-
-        @type  address: int
-        @param address: Memory address to query.
-
-        @rtype: None or tuple( str, int, int )
-        @return: Returns a tuple consisting of:
-             - Name
-             - Address
-             - Size (in bytes)
-            Returns C{None} if no symbol could be matched.
-        """
-        found = None
-        for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
-            if SymbolAddress > address:
-                continue
-            if SymbolAddress + SymbolSize > address:
-                if not found or found[1] < SymbolAddress:
-                    found = (SymbolName, SymbolAddress, SymbolSize)
-        return found
-
-#------------------------------------------------------------------------------
-
-    def get_label(self, function = None, offset = None):
-        """
-        Retrieves the label for the given function of this module or the module
-        base address if no function name is given.
-
-        @type  function: str
-        @param function: (Optional) Exported function name.
-
-        @type  offset: int
-        @param offset: (Optional) Offset from the module base address.
-
-        @rtype:  str
-        @return: Label for the module base address, plus the offset if given.
-        """
-        return _ModuleContainer.parse_label(self.get_name(), function, offset)
-
-    def get_label_at_address(self, address, offset = None):
-        """
-        Creates a label from the given memory address.
-
-        If the address belongs to the module, the label is made relative to
-        it's base address.
-
-        @type  address: int
-        @param address: Memory address.
-
-        @type  offset: None or int
-        @param offset: (Optional) Offset value.
-
-        @rtype:  str
-        @return: Label pointing to the given address.
-        """
-
-        # Add the offset to the address.
-        if offset:
-            address = address + offset
-
-        # Make the label relative to the base address if no match is found.
-        module      = self.get_name()
-        function    = None
-        offset      = address - self.get_base()
-
-        # Make the label relative to the entrypoint if no other match is found.
-        # Skip if the entry point is unknown.
-        start = self.get_entry_point()
-        if start and start <= address:
-            function    = "start"
-            offset      = address - start
-
-        # Enumerate exported functions and debug symbols,
-        # then find the closest match, if possible.
-        try:
-            symbol = self.get_symbol_at_address(address)
-            if symbol:
-                (SymbolName, SymbolAddress, SymbolSize) = symbol
-                new_offset = address - SymbolAddress
-                if new_offset <= offset:
-                    function    = SymbolName
-                    offset      = new_offset
-        except WindowsError:
-            pass
-
-        # Parse the label and return it.
-        return _ModuleContainer.parse_label(module, function, offset)
-
-    def is_address_here(self, address):
-        """
-        Tries to determine if the given address belongs to this module.
-
-        @type  address: int
-        @param address: Memory address.
-
-        @rtype:  bool or None
-        @return: C{True} if the address belongs to the module,
-            C{False} if it doesn't,
-            and C{None} if it can't be determined.
-        """
-        base = self.get_base()
-        size = self.get_size()
-        if base and size:
-            return base <= address < (base + size)
-        return None
-
-    def resolve(self, function):
-        """
-        Resolves a function exported by this module.
-
-        @type  function: str or int
-        @param function:
-            str: Name of the function.
-            int: Ordinal of the function.
-
-        @rtype:  int
-        @return: Memory address of the exported function in the process.
-            Returns None on error.
-        """
-
-        # Unknown DLL filename, there's nothing we can do.
-        filename = self.get_filename()
-        if not filename:
-            return None
-
-        # If the DLL is already mapped locally, resolve the function.
-        try:
-            hlib    = win32.GetModuleHandle(filename)
-            address = win32.GetProcAddress(hlib, function)
-        except WindowsError:
-
-            # Load the DLL locally, resolve the function and unload it.
-            try:
-                hlib = win32.LoadLibraryEx(filename,
-                                           win32.DONT_RESOLVE_DLL_REFERENCES)
-                try:
-                    address = win32.GetProcAddress(hlib, function)
-                finally:
-                    win32.FreeLibrary(hlib)
-            except WindowsError:
-                return None
-
-        # A NULL pointer means the function was not found.
-        if address in (None, 0):
-            return None
-
-        # Compensate for DLL base relocations locally and remotely.
-        return address - hlib + self.lpBaseOfDll
-
-    def resolve_label(self, label):
-        """
-        Resolves a label for this module only. If the label refers to another
-        module, an exception is raised.
-
-        @type  label: str
-        @param label: Label to resolve.
-
-        @rtype:  int
-        @return: Memory address pointed to by the label.
-
-        @raise ValueError: The label is malformed or impossible to resolve.
-        @raise RuntimeError: Cannot resolve the module or function.
-        """
-
-        # Split the label into it's components.
-        # Use the fuzzy mode whenever possible.
-        aProcess = self.get_process()
-        if aProcess is not None:
-            (module, procedure, offset) = aProcess.split_label(label)
-        else:
-            (module, procedure, offset) = _ModuleContainer.split_label(label)
-
-        # If a module name is given that doesn't match ours,
-        # raise an exception.
-        if module and not self.match_name(module):
-            raise RuntimeError("Label does not belong to this module")
-
-        # Resolve the procedure if given.
-        if procedure:
-            address = self.resolve(procedure)
-            if address is None:
-
-                # If it's a debug symbol, use the symbol.
-                address = self.resolve_symbol(procedure)
-
-                # If it's the keyword "start" use the entry point.
-                if address is None and procedure == "start":
-                    address = self.get_entry_point()
-
-                # The procedure was not found.
-                if address is None:
-                    if not module:
-                        module = self.get_name()
-                    msg = "Can't find procedure %s in module %s"
-                    raise RuntimeError(msg % (procedure, module))
-
-        # If no procedure is given use the base address of the module.
-        else:
-            address = self.get_base()
-
-        # Add the offset if given and return the resolved address.
-        if offset:
-            address = address + offset
-        return address
-
-#==============================================================================
-
-# TODO
-# An alternative approach to the toolhelp32 snapshots: parsing the PEB and
-# fetching the list of loaded modules from there. That would solve the problem
-# of toolhelp32 not working when the process hasn't finished initializing.
-# See: http://pferrie.host22.com/misc/lowlevel3.htm
-
-class _ModuleContainer (object):
-    """
-    Encapsulates the capability to contain Module objects.
-
-    @note: Labels are an approximated way of referencing memory locations
-        across different executions of the same process, or different processes
-        with common modules. They are not meant to be perfectly unique, and
-        some errors may occur when multiple modules with the same name are
-        loaded, or when module filenames can't be retrieved.
-
-    @group Modules snapshot:
-        scan_modules,
-        get_module, get_module_bases, get_module_count,
-        get_module_at_address, get_module_by_name,
-        has_module, iter_modules, iter_module_addresses,
-        clear_modules
-
-    @group Labels:
-        parse_label, split_label, sanitize_label, resolve_label,
-        resolve_label_components, get_label_at_address, split_label_strict,
-        split_label_fuzzy
-
-    @group Symbols:
-        load_symbols, unload_symbols, get_symbols, iter_symbols,
-        resolve_symbol, get_symbol_at_address
-
-    @group Debugging:
-        is_system_defined_breakpoint, get_system_breakpoint,
-        get_user_breakpoint, get_breakin_breakpoint,
-        get_wow64_system_breakpoint, get_wow64_user_breakpoint,
-        get_wow64_breakin_breakpoint, get_break_on_error_ptr
-    """
-
-    def __init__(self):
-        self.__moduleDict = dict()
-        self.__system_breakpoints = dict()
-
-        # Replace split_label with the fuzzy version on object instances.
-        self.split_label = self.__use_fuzzy_mode
-
-    def __initialize_snapshot(self):
-        """
-        Private method to automatically initialize the snapshot
-        when you try to use it without calling any of the scan_*
-        methods first. You don't need to call this yourself.
-        """
-        if not self.__moduleDict:
-            try:
-                self.scan_modules()
-            except WindowsError:
-                pass
-
-    def __contains__(self, anObject):
-        """
-        @type  anObject: L{Module}, int
-        @param anObject:
-            - C{Module}: Module object to look for.
-            - C{int}: Base address of the DLL to look for.
-
-        @rtype:  bool
-        @return: C{True} if the snapshot contains
-            a L{Module} object with the same base address.
-        """
-        if isinstance(anObject, Module):
-            anObject = anObject.lpBaseOfDll
-        return self.has_module(anObject)
-
-    def __iter__(self):
-        """
-        @see:    L{iter_modules}
-        @rtype:  dictionary-valueiterator
-        @return: Iterator of L{Module} objects in this snapshot.
-        """
-        return self.iter_modules()
-
-    def __len__(self):
-        """
-        @see:    L{get_module_count}
-        @rtype:  int
-        @return: Count of L{Module} objects in this snapshot.
-        """
-        return self.get_module_count()
-
-    def has_module(self, lpBaseOfDll):
-        """
-        @type  lpBaseOfDll: int
-        @param lpBaseOfDll: Base address of the DLL to look for.
-
-        @rtype:  bool
-        @return: C{True} if the snapshot contains a
-            L{Module} object with the given base address.
-        """
-        self.__initialize_snapshot()
-        return lpBaseOfDll in self.__moduleDict
-
-    def get_module(self, lpBaseOfDll):
-        """
-        @type  lpBaseOfDll: int
-        @param lpBaseOfDll: Base address of the DLL to look for.
-
-        @rtype:  L{Module}
-        @return: Module object with the given base address.
-        """
-        self.__initialize_snapshot()
-        if lpBaseOfDll not in self.__moduleDict:
-            msg = "Unknown DLL base address %s"
-            msg = msg % HexDump.address(lpBaseOfDll)
-            raise KeyError(msg)
-        return self.__moduleDict[lpBaseOfDll]
-
-    def iter_module_addresses(self):
-        """
-        @see:    L{iter_modules}
-        @rtype:  dictionary-keyiterator
-        @return: Iterator of DLL base addresses in this snapshot.
-        """
-        self.__initialize_snapshot()
-        return compat.iterkeys(self.__moduleDict)
-
-    def iter_modules(self):
-        """
-        @see:    L{iter_module_addresses}
-        @rtype:  dictionary-valueiterator
-        @return: Iterator of L{Module} objects in this snapshot.
-        """
-        self.__initialize_snapshot()
-        return compat.itervalues(self.__moduleDict)
-
-    def get_module_bases(self):
-        """
-        @see:    L{iter_module_addresses}
-        @rtype:  list( int... )
-        @return: List of DLL base addresses in this snapshot.
-        """
-        self.__initialize_snapshot()
-        return compat.keys(self.__moduleDict)
-
-    def get_module_count(self):
-        """
-        @rtype:  int
-        @return: Count of L{Module} objects in this snapshot.
-        """
-        self.__initialize_snapshot()
-        return len(self.__moduleDict)
-
-#------------------------------------------------------------------------------
-
-    def get_module_by_name(self, modName):
-        """
-        @type  modName: int
-        @param modName:
-            Name of the module to look for, as returned by L{Module.get_name}.
-            If two or more modules with the same name are loaded, only one
-            of the matching modules is returned.
-
-            You can also pass a full pathname to the DLL file.
-            This works correctly even if two modules with the same name
-            are loaded from different paths.
-
-        @rtype:  L{Module}
-        @return: C{Module} object that best matches the given name.
-            Returns C{None} if no C{Module} can be found.
-        """
-
-        # Convert modName to lowercase.
-        # This helps make case insensitive string comparisons.
-        modName = modName.lower()
-
-        # modName is an absolute pathname.
-        if PathOperations.path_is_absolute(modName):
-            for lib in self.iter_modules():
-                if modName == lib.get_filename().lower():
-                    return lib
-            return None     # Stop trying to match the name.
-
-        # Get all the module names.
-        # This prevents having to iterate through the module list
-        #  more than once.
-        modDict = [ ( lib.get_name(), lib ) for lib in self.iter_modules() ]
-        modDict = dict(modDict)
-
-        # modName is a base filename.
-        if modName in modDict:
-            return modDict[modName]
-
-        # modName is a base filename without extension.
-        filepart, extpart = PathOperations.split_extension(modName)
-        if filepart and extpart:
-            if filepart in modDict:
-                return modDict[filepart]
-
-        # modName is a base address.
-        try:
-            baseAddress = HexInput.integer(modName)
-        except ValueError:
-            return None
-        if self.has_module(baseAddress):
-            return self.get_module(baseAddress)
-
-        # Module not found.
-        return None
-
-    def get_module_at_address(self, address):
-        """
-        @type  address: int
-        @param address: Memory address to query.
-
-        @rtype:  L{Module}
-        @return: C{Module} object that best matches the given address.
-            Returns C{None} if no C{Module} can be found.
-        """
-        bases = self.get_module_bases()
-        bases.sort()
-        bases.append(long(0x10000000000000000))  # max. 64 bit address + 1
-        if address >= bases[0]:
-            i = 0
-            max_i = len(bases) - 1
-            while i < max_i:
-                begin, end = bases[i:i+2]
-                if begin <= address < end:
-                    module = self.get_module(begin)
-                    here   = module.is_address_here(address)
-                    if here is False:
-                        break
-                    else:   # True or None
-                        return module
-                i = i + 1
-        return None
-
-    # XXX this method musn't end up calling __initialize_snapshot by accident!
-    def scan_modules(self):
-        """
-        Populates the snapshot with loaded modules.
-        """
-
-        # The module filenames may be spoofed by malware,
-        # since this information resides in usermode space.
-        # See: http://www.ragestorm.net/blogs/?p=163
-
-        # Ignore special process IDs.
-        # PID 0: System Idle Process. Also has a special meaning to the
-        #        toolhelp APIs (current process).
-        # PID 4: System Integrity Group. See this forum post for more info:
-        #        http://tinyurl.com/ycza8jo
-        #        (points to social.technet.microsoft.com)
-        #        Only on XP and above
-        # PID 8: System (?) only in Windows 2000 and below AFAIK.
-        #        It's probably the same as PID 4 in XP and above.
-        dwProcessId = self.get_pid()
-        if dwProcessId in (0, 4, 8):
-            return
-
-        # It would seem easier to clear the snapshot first.
-        # But then all open handles would be closed.
-        found_bases = set()
-        with win32.CreateToolhelp32Snapshot(win32.TH32CS_SNAPMODULE,
-                                            dwProcessId) as hSnapshot:
-            me = win32.Module32First(hSnapshot)
-            while me is not None:
-                lpBaseAddress = me.modBaseAddr
-                fileName      = me.szExePath    # full pathname
-                if not fileName:
-                    fileName  = me.szModule     # filename only
-                    if not fileName:
-                        fileName = None
-                else:
-                    fileName = PathOperations.native_to_win32_pathname(fileName)
-                found_bases.add(lpBaseAddress)
-##                if not self.has_module(lpBaseAddress): # XXX triggers a scan
-                if lpBaseAddress not in self.__moduleDict:
-                    aModule = Module(lpBaseAddress, fileName = fileName,
-                                           SizeOfImage = me.modBaseSize,
-                                           process = self)
-                    self._add_module(aModule)
-                else:
-                    aModule = self.get_module(lpBaseAddress)
-                    if not aModule.fileName:
-                        aModule.fileName    = fileName
-                    if not aModule.SizeOfImage:
-                        aModule.SizeOfImage = me.modBaseSize
-                    if not aModule.process:
-                        aModule.process     = self
-                me = win32.Module32Next(hSnapshot)
-##        for base in self.get_module_bases(): # XXX triggers a scan
-        for base in compat.keys(self.__moduleDict):
-            if base not in found_bases:
-                self._del_module(base)
-
-    def clear_modules(self):
-        """
-        Clears the modules snapshot.
-        """
-        for aModule in compat.itervalues(self.__moduleDict):
-            aModule.clear()
-        self.__moduleDict = dict()
-
-#------------------------------------------------------------------------------
-
-    @staticmethod
-    def parse_label(module = None, function = None, offset = None):
-        """
-        Creates a label from a module and a function name, plus an offset.
-
-        @warning: This method only creates the label, it doesn't make sure the
-            label actually points to a valid memory location.
-
-        @type  module: None or str
-        @param module: (Optional) Module name.
-
-        @type  function: None, str or int
-        @param function: (Optional) Function name or ordinal.
-
-        @type  offset: None or int
-        @param offset: (Optional) Offset value.
-
-            If C{function} is specified, offset from the function.
-
-            If C{function} is C{None}, offset from the module.
-
-        @rtype:  str
-        @return:
-            Label representing the given function in the given module.
-
-        @raise ValueError:
-            The module or function name contain invalid characters.
-        """
-
-        # TODO
-        # Invalid characters should be escaped or filtered.
-
-        # Convert ordinals to strings.
-        try:
-            function = "#0x%x" % function
-        except TypeError:
-            pass
-
-        # Validate the parameters.
-        if module is not None and ('!' in module or '+' in module):
-            raise ValueError("Invalid module name: %s" % module)
-        if function is not None and ('!' in function or '+' in function):
-            raise ValueError("Invalid function name: %s" % function)
-
-        # Parse the label.
-        if module:
-            if function:
-                if offset:
-                    label = "%s!%s+0x%x" % (module, function, offset)
-                else:
-                    label = "%s!%s" % (module, function)
-            else:
-                if offset:
-##                    label = "%s+0x%x!" % (module, offset)
-                    label = "%s!0x%x" % (module, offset)
-                else:
-                    label = "%s!" % module
-        else:
-            if function:
-                if offset:
-                    label = "!%s+0x%x" % (function, offset)
-                else:
-                    label = "!%s" % function
-            else:
-                if offset:
-                    label = "0x%x" % offset
-                else:
-                    label = "0x0"
-
-        return label
-
-    @staticmethod
-    def split_label_strict(label):
-        """
-        Splits a label created with L{parse_label}.
-
-        To parse labels with a less strict syntax, use the L{split_label_fuzzy}
-        method instead.
-
-        @warning: This method only parses the label, it doesn't make sure the
-            label actually points to a valid memory location.
-
-        @type  label: str
-        @param label: Label to split.
-
-        @rtype:  tuple( str or None, str or int or None, int or None )
-        @return: Tuple containing the C{module} name,
-            the C{function} name or ordinal, and the C{offset} value.
-
-            If the label doesn't specify a module,
-            then C{module} is C{None}.
-
-            If the label doesn't specify a function,
-            then C{function} is C{None}.
-
-            If the label doesn't specify an offset,
-            then C{offset} is C{0}.
-
-        @raise ValueError: The label is malformed.
-        """
-        module = function = None
-        offset = 0
-
-        # Special case: None
-        if not label:
-            label = "0x0"
-        else:
-
-            # Remove all blanks.
-            label = label.replace(' ', '')
-            label = label.replace('\t', '')
-            label = label.replace('\r', '')
-            label = label.replace('\n', '')
-
-            # Special case: empty label.
-            if not label:
-                label = "0x0"
-
-        # * ! *
-        if '!' in label:
-            try:
-                module, function = label.split('!')
-            except ValueError:
-                raise ValueError("Malformed label: %s" % label)
-
-            # module ! function
-            if function:
-                if '+' in module:
-                    raise ValueError("Malformed label: %s" % label)
-
-                # module ! function + offset
-                if '+' in function:
-                    try:
-                        function, offset = function.split('+')
-                    except ValueError:
-                        raise ValueError("Malformed label: %s" % label)
-                    try:
-                        offset = HexInput.integer(offset)
-                    except ValueError:
-                        raise ValueError("Malformed label: %s" % label)
-                else:
-
-                    # module ! offset
-                    try:
-                        offset   = HexInput.integer(function)
-                        function = None
-                    except ValueError:
-                        pass
-            else:
-
-                # module + offset !
-                if '+' in module:
-                    try:
-                        module, offset = module.split('+')
-                    except ValueError:
-                        raise ValueError("Malformed label: %s" % label)
-                    try:
-                        offset = HexInput.integer(offset)
-                    except ValueError:
-                        raise ValueError("Malformed label: %s" % label)
-
-                else:
-
-                    # module !
-                    try:
-                        offset = HexInput.integer(module)
-                        module = None
-
-                    # offset !
-                    except ValueError:
-                        pass
-
-            if not module:
-                module   = None
-            if not function:
-                function = None
-
-        # *
-        else:
-
-            # offset
-            try:
-                offset = HexInput.integer(label)
-
-            # # ordinal
-            except ValueError:
-                if label.startswith('#'):
-                    function = label
-                    try:
-                        HexInput.integer(function[1:])
-
-                    # module?
-                    # function?
-                    except ValueError:
-                        raise ValueError("Ambiguous label: %s" % label)
-
-                # module?
-                # function?
-                else:
-                    raise ValueError("Ambiguous label: %s" % label)
-
-        # Convert function ordinal strings into integers.
-        if function and function.startswith('#'):
-            try:
-                function = HexInput.integer(function[1:])
-            except ValueError:
-                pass
-
-        # Convert null offsets to None.
-        if not offset:
-            offset = None
-
-        return (module, function, offset)
-
-    def split_label_fuzzy(self, label):
-        """
-        Splits a label entered as user input.
-
-        It's more flexible in it's syntax parsing than the L{split_label_strict}
-        method, as it allows the exclamation mark (B{C{!}}) to be omitted. The
-        ambiguity is resolved by searching the modules in the snapshot to guess
-        if a label refers to a module or a function. It also tries to rebuild
-        labels when they contain hardcoded addresses.
-
-        @warning: This method only parses the label, it doesn't make sure the
-            label actually points to a valid memory location.
-
-        @type  label: str
-        @param label: Label to split.
-
-        @rtype:  tuple( str or None, str or int or None, int or None )
-        @return: Tuple containing the C{module} name,
-            the C{function} name or ordinal, and the C{offset} value.
-
-            If the label doesn't specify a module,
-            then C{module} is C{None}.
-
-            If the label doesn't specify a function,
-            then C{function} is C{None}.
-
-            If the label doesn't specify an offset,
-            then C{offset} is C{0}.
-
-        @raise ValueError: The label is malformed.
-        """
-        module = function = None
-        offset = 0
-
-        # Special case: None
-        if not label:
-            label = compat.b("0x0")
-        else:
-
-            # Remove all blanks.
-            label = label.replace(compat.b(' '), compat.b(''))
-            label = label.replace(compat.b('\t'), compat.b(''))
-            label = label.replace(compat.b('\r'), compat.b(''))
-            label = label.replace(compat.b('\n'), compat.b(''))
-
-            # Special case: empty label.
-            if not label:
-                label = compat.b("0x0")
-
-        # If an exclamation sign is present, we know we can parse it strictly.
-        if compat.b('!') in label:
-            return self.split_label_strict(label)
-
-##        # Try to parse it strictly, on error do it the fuzzy way.
-##        try:
-##            return self.split_label(label)
-##        except ValueError:
-##            pass
-
-        # * + offset
-        if compat.b('+') in label:
-            try:
-                prefix, offset = label.split(compat.b('+'))
-            except ValueError:
-                raise ValueError("Malformed label: %s" % label)
-            try:
-                offset = HexInput.integer(offset)
-            except ValueError:
-                raise ValueError("Malformed label: %s" % label)
-            label = prefix
-
-        # This parses both filenames and base addresses.
-        modobj = self.get_module_by_name(label)
-        if modobj:
-
-            # module
-            # module + offset
-            module = modobj.get_name()
-
-        else:
-
-            # TODO
-            # If 0xAAAAAAAA + 0xBBBBBBBB is given,
-            # A is interpreted as a module base address,
-            # and B as an offset.
-            # If that fails, it'd be good to add A+B and try to
-            # use the nearest loaded module.
-
-            # offset
-            # base address + offset (when no module has that base address)
-            try:
-                address = HexInput.integer(label)
-
-                if offset:
-                    # If 0xAAAAAAAA + 0xBBBBBBBB is given,
-                    # A is interpreted as a module base address,
-                    # and B as an offset.
-                    # If that fails, we get here, meaning no module was found
-                    # at A. Then add up A+B and work with that as a hardcoded
-                    # address.
-                    offset = address + offset
-                else:
-                    # If the label is a hardcoded address, we get here.
-                    offset = address
-
-                # If only a hardcoded address is given,
-                # rebuild the label using get_label_at_address.
-                # Then parse it again, but this time strictly,
-                # both because there is no need for fuzzy syntax and
-                # to prevent an infinite recursion if there's a bug here.
-                try:
-                    new_label = self.get_label_at_address(offset)
-                    module, function, offset = \
-                                             self.split_label_strict(new_label)
-                except ValueError:
-                    pass
-
-            # function
-            # function + offset
-            except ValueError:
-                function = label
-
-        # Convert function ordinal strings into integers.
-        if function and function.startswith(compat.b('#')):
-            try:
-                function = HexInput.integer(function[1:])
-            except ValueError:
-                pass
-
-        # Convert null offsets to None.
-        if not offset:
-            offset = None
-
-        return (module, function, offset)
-
-    @classmethod
-    def split_label(cls, label):
-        """
-Splits a label into it's C{module}, C{function} and C{offset}
-components, as used in L{parse_label}.
-
-When called as a static method, the strict syntax mode is used::
-
-    winappdbg.Process.split_label( "kernel32!CreateFileA" )
-
-When called as an instance method, the fuzzy syntax mode is used::
-
-    aProcessInstance.split_label( "CreateFileA" )
-
-@see: L{split_label_strict}, L{split_label_fuzzy}
-
-@type  label: str
-@param label: Label to split.
-
-@rtype:  tuple( str or None, str or int or None, int or None )
-@return:
-    Tuple containing the C{module} name,
-    the C{function} name or ordinal, and the C{offset} value.
-
-    If the label doesn't specify a module,
-    then C{module} is C{None}.
-
-    If the label doesn't specify a function,
-    then C{function} is C{None}.
-
-    If the label doesn't specify an offset,
-    then C{offset} is C{0}.
-
-@raise ValueError: The label is malformed.
-        """
-
-        # XXX
-        # Docstring indentation was removed so epydoc doesn't complain
-        # when parsing the docs for __use_fuzzy_mode().
-
-        # This function is overwritten by __init__
-        # so here is the static implementation only.
-        return cls.split_label_strict(label)
-
-    # The split_label method is replaced with this function by __init__.
-    def __use_fuzzy_mode(self, label):
-        "@see: L{split_label}"
-        return self.split_label_fuzzy(label)
-##    __use_fuzzy_mode.__doc__ = split_label.__doc__
-
-    def sanitize_label(self, label):
-        """
-        Converts a label taken from user input into a well-formed label.
-
-        @type  label: str
-        @param label: Label taken from user input.
-
-        @rtype:  str
-        @return: Sanitized label.
-        """
-        (module, function, offset) = self.split_label_fuzzy(label)
-        label = self.parse_label(module, function, offset)
-        return label
-
-    def resolve_label(self, label):
-        """
-        Resolve the memory address of the given label.
-
-        @note:
-            If multiple modules with the same name are loaded,
-            the label may be resolved at any of them. For a more precise
-            way to resolve functions use the base address to get the L{Module}
-            object (see L{Process.get_module}) and then call L{Module.resolve}.
-
-            If no module name is specified in the label, the function may be
-            resolved in any loaded module. If you want to resolve all functions
-            with that name in all processes, call L{Process.iter_modules} to
-            iterate through all loaded modules, and then try to resolve the
-            function in each one of them using L{Module.resolve}.
-
-        @type  label: str
-        @param label: Label to resolve.
-
-        @rtype:  int
-        @return: Memory address pointed to by the label.
-
-        @raise ValueError: The label is malformed or impossible to resolve.
-        @raise RuntimeError: Cannot resolve the module or function.
-        """
-
-        # Split the label into module, function and offset components.
-        module, function, offset = self.split_label_fuzzy(label)
-
-        # Resolve the components into a memory address.
-        address = self.resolve_label_components(module, function, offset)
-
-        # Return the memory address.
-        return address
-
-    def resolve_label_components(self, module   = None,
-                                       function = None,
-                                       offset   = None):
-        """
-        Resolve the memory address of the given module, function and/or offset.
-
-        @note:
-            If multiple modules with the same name are loaded,
-            the label may be resolved at any of them. For a more precise
-            way to resolve functions use the base address to get the L{Module}
-            object (see L{Process.get_module}) and then call L{Module.resolve}.
-
-            If no module name is specified in the label, the function may be
-            resolved in any loaded module. If you want to resolve all functions
-            with that name in all processes, call L{Process.iter_modules} to
-            iterate through all loaded modules, and then try to resolve the
-            function in each one of them using L{Module.resolve}.
-
-        @type  module: None or str
-        @param module: (Optional) Module name.
-
-        @type  function: None, str or int
-        @param function: (Optional) Function name or ordinal.
-
-        @type  offset: None or int
-        @param offset: (Optional) Offset value.
-
-            If C{function} is specified, offset from the function.
-
-            If C{function} is C{None}, offset from the module.
-
-        @rtype:  int
-        @return: Memory address pointed to by the label.
-
-        @raise ValueError: The label is malformed or impossible to resolve.
-        @raise RuntimeError: Cannot resolve the module or function.
-        """
-        # Default address if no module or function are given.
-        # An offset may be added later.
-        address = 0
-
-        # Resolve the module.
-        # If the module is not found, check for the special symbol "main".
-        if module:
-            modobj = self.get_module_by_name(module)
-            if not modobj:
-                if module == "main":
-                    modobj = self.get_main_module()
-                else:
-                    raise RuntimeError("Module %r not found" % module)
-
-            # Resolve the exported function or debugging symbol.
-            # If all else fails, check for the special symbol "start".
-            if function:
-                address = modobj.resolve(function)
-                if address is None:
-                    address = modobj.resolve_symbol(function)
-                    if address is None:
-                        if function == "start":
-                            address = modobj.get_entry_point()
-                        if address is None:
-                            msg = "Symbol %r not found in module %s"
-                            raise RuntimeError(msg % (function, module))
-
-            # No function, use the base address.
-            else:
-                address = modobj.get_base()
-
-        # Resolve the function in any module.
-        # If all else fails, check for the special symbols "main" and "start".
-        elif function:
-            for modobj in self.iter_modules():
-                address = modobj.resolve(function)
-                if address is not None:
-                    break
-            if address is None:
-                if function == "start":
-                    modobj = self.get_main_module()
-                    address = modobj.get_entry_point()
-                elif function == "main":
-                    modobj = self.get_main_module()
-                    address = modobj.get_base()
-                else:
-                    msg = "Function %r not found in any module" % function
-                    raise RuntimeError(msg)
-
-        # Return the address plus the offset.
-        if offset:
-            address = address + offset
-        return address
-
-    def get_label_at_address(self, address, offset = None):
-        """
-        Creates a label from the given memory address.
-
-        @warning: This method uses the name of the nearest currently loaded
-            module. If that module is unloaded later, the label becomes
-            impossible to resolve.
-
-        @type  address: int
-        @param address: Memory address.
-
-        @type  offset: None or int
-        @param offset: (Optional) Offset value.
-
-        @rtype:  str
-        @return: Label pointing to the given address.
-        """
-        if offset:
-            address = address + offset
-        modobj = self.get_module_at_address(address)
-        if modobj:
-            label = modobj.get_label_at_address(address)
-        else:
-            label = self.parse_label(None, None, address)
-        return label
-
-#------------------------------------------------------------------------------
-
-    # The memory addresses of system breakpoints are be cached, since they're
-    # all in system libraries it's not likely they'll ever change their address
-    # during the lifetime of the process... I don't suppose a program could
-    # happily unload ntdll.dll and survive.
-    def __get_system_breakpoint(self, label):
-        try:
-            return self.__system_breakpoints[label]
-        except KeyError:
-            try:
-                address = self.resolve_label(label)
-            except Exception:
-                return None
-            self.__system_breakpoints[label] = address
-            return address
-
-    # It's in kernel32 in Windows Server 2003, in ntdll since Windows Vista.
-    # It can only be resolved if we have the debug symbols.
-    def get_break_on_error_ptr(self):
-        """
-        @rtype: int
-        @return:
-            If present, returns the address of the C{g_dwLastErrorToBreakOn}
-            global variable for this process. If not, returns C{None}.
-        """
-        address = self.__get_system_breakpoint("ntdll!g_dwLastErrorToBreakOn")
-        if not address:
-            address = self.__get_system_breakpoint(
-                                            "kernel32!g_dwLastErrorToBreakOn")
-            # cheat a little :)
-            self.__system_breakpoints["ntdll!g_dwLastErrorToBreakOn"] = address
-        return address
-
-    def is_system_defined_breakpoint(self, address):
-        """
-        @type  address: int
-        @param address: Memory address.
-
-        @rtype:  bool
-        @return: C{True} if the given address points to a system defined
-            breakpoint. System defined breakpoints are hardcoded into
-            system libraries.
-        """
-        if address:
-            module = self.get_module_at_address(address)
-            if module:
-                return module.match_name("ntdll")    or \
-                       module.match_name("kernel32")
-        return False
-
-    # FIXME
-    # In Wine, the system breakpoint seems to be somewhere in kernel32.
-    def get_system_breakpoint(self):
-        """
-        @rtype:  int or None
-        @return: Memory address of the system breakpoint
-            within the process address space.
-            Returns C{None} on error.
-        """
-        return self.__get_system_breakpoint("ntdll!DbgBreakPoint")
-
-    # I don't know when this breakpoint is actually used...
-    def get_user_breakpoint(self):
-        """
-        @rtype:  int or None
-        @return: Memory address of the user breakpoint
-            within the process address space.
-            Returns C{None} on error.
-        """
-        return self.__get_system_breakpoint("ntdll!DbgUserBreakPoint")
-
-    # On some platforms, this breakpoint can only be resolved
-    # when the debugging symbols for ntdll.dll are loaded.
-    def get_breakin_breakpoint(self):
-        """
-        @rtype:  int or None
-        @return: Memory address of the remote breakin breakpoint
-            within the process address space.
-            Returns C{None} on error.
-        """
-        return self.__get_system_breakpoint("ntdll!DbgUiRemoteBreakin")
-
-    # Equivalent of ntdll!DbgBreakPoint in Wow64.
-    def get_wow64_system_breakpoint(self):
-        """
-        @rtype:  int or None
-        @return: Memory address of the Wow64 system breakpoint
-            within the process address space.
-            Returns C{None} on error.
-        """
-        return self.__get_system_breakpoint("ntdll32!DbgBreakPoint")
-
-    # Equivalent of ntdll!DbgUserBreakPoint in Wow64.
-    def get_wow64_user_breakpoint(self):
-        """
-        @rtype:  int or None
-        @return: Memory address of the Wow64 user breakpoint
-            within the process address space.
-            Returns C{None} on error.
-        """
-        return self.__get_system_breakpoint("ntdll32!DbgUserBreakPoint")
-
-    # Equivalent of ntdll!DbgUiRemoteBreakin in Wow64.
-    def get_wow64_breakin_breakpoint(self):
-        """
-        @rtype:  int or None
-        @return: Memory address of the Wow64 remote breakin breakpoint
-            within the process address space.
-            Returns C{None} on error.
-        """
-        return self.__get_system_breakpoint("ntdll32!DbgUiRemoteBreakin")
-
-#------------------------------------------------------------------------------
-
-    def load_symbols(self):
-        """
-        Loads the debugging symbols for all modules in this snapshot.
-        Automatically called by L{get_symbols}.
-        """
-        for aModule in self.iter_modules():
-            aModule.load_symbols()
-
-    def unload_symbols(self):
-        """
-        Unloads the debugging symbols for all modules in this snapshot.
-        """
-        for aModule in self.iter_modules():
-            aModule.unload_symbols()
-
-    def get_symbols(self):
-        """
-        Returns the debugging symbols for all modules in this snapshot.
-        The symbols are automatically loaded when needed.
-
-        @rtype:  list of tuple( str, int, int )
-        @return: List of symbols.
-            Each symbol is represented by a tuple that contains:
-                - Symbol name
-                - Symbol memory address
-                - Symbol size in bytes
-        """
-        symbols = list()
-        for aModule in self.iter_modules():
-            for symbol in aModule.iter_symbols():
-                symbols.append(symbol)
-        return symbols
-
-    def iter_symbols(self):
-        """
-        Returns an iterator for the debugging symbols in all modules in this
-        snapshot, in no particular order.
-        The symbols are automatically loaded when needed.
-
-        @rtype:  iterator of tuple( str, int, int )
-        @return: Iterator of symbols.
-            Each symbol is represented by a tuple that contains:
-                - Symbol name
-                - Symbol memory address
-                - Symbol size in bytes
-        """
-        for aModule in self.iter_modules():
-            for symbol in aModule.iter_symbols():
-                yield symbol
-
-    def resolve_symbol(self, symbol, bCaseSensitive = False):
-        """
-        Resolves a debugging symbol's address.
-
-        @type  symbol: str
-        @param symbol: Name of the symbol to resolve.
-
-        @type  bCaseSensitive: bool
-        @param bCaseSensitive: C{True} for case sensitive matches,
-            C{False} for case insensitive.
-
-        @rtype:  int or None
-        @return: Memory address of symbol. C{None} if not found.
-        """
-        if bCaseSensitive:
-            for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
-                if symbol == SymbolName:
-                    return SymbolAddress
-        else:
-            symbol = symbol.lower()
-            for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
-                if symbol == SymbolName.lower():
-                    return SymbolAddress
-
-    def get_symbol_at_address(self, address):
-        """
-        Tries to find the closest matching symbol for the given address.
-
-        @type  address: int
-        @param address: Memory address to query.
-
-        @rtype: None or tuple( str, int, int )
-        @return: Returns a tuple consisting of:
-             - Name
-             - Address
-             - Size (in bytes)
-            Returns C{None} if no symbol could be matched.
-        """
-        # Any module may have symbols pointing anywhere in memory, so there's
-        # no easy way to optimize this. I guess we're stuck with brute force.
-        found = None
-        for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
-            if SymbolAddress > address:
-                continue
-
-            if SymbolAddress == address:
-                found = (SymbolName, SymbolAddress, SymbolSize)
-                break
-
-            if SymbolAddress < address:
-                if found and (address - found[1]) < (address - SymbolAddress):
-                    continue
-                else:
-                    found = (SymbolName, SymbolAddress, SymbolSize)
-        return found
-#------------------------------------------------------------------------------
-
-    # XXX _notify_* methods should not trigger a scan
-
-    def _add_module(self, aModule):
-        """
-        Private method to add a module object to the snapshot.
-
-        @type  aModule: L{Module}
-        @param aModule: Module object.
-        """
-##        if not isinstance(aModule, Module):
-##            if hasattr(aModule, '__class__'):
-##                typename = aModule.__class__.__name__
-##            else:
-##                typename = str(type(aModule))
-##            msg = "Expected Module, got %s instead" % typename
-##            raise TypeError(msg)
-        lpBaseOfDll = aModule.get_base()
-##        if lpBaseOfDll in self.__moduleDict:
-##            msg = "Module already exists: %d" % lpBaseOfDll
-##            raise KeyError(msg)
-        aModule.set_process(self)
-        self.__moduleDict[lpBaseOfDll] = aModule
-
-    def _del_module(self, lpBaseOfDll):
-        """
-        Private method to remove a module object from the snapshot.
-
-        @type  lpBaseOfDll: int
-        @param lpBaseOfDll: Module base address.
-        """
-        try:
-            aModule = self.__moduleDict[lpBaseOfDll]
-            del self.__moduleDict[lpBaseOfDll]
-        except KeyError:
-            aModule = None
-            msg = "Unknown base address %d" % HexDump.address(lpBaseOfDll)
-            warnings.warn(msg, RuntimeWarning)
-        if aModule:
-            aModule.clear()     # remove circular references
-
-    def __add_loaded_module(self, event):
-        """
-        Private method to automatically add new module objects from debug events.
-
-        @type  event: L{Event}
-        @param event: Event object.
-        """
-        lpBaseOfDll = event.get_module_base()
-        hFile       = event.get_file_handle()
-##        if not self.has_module(lpBaseOfDll):  # XXX this would trigger a scan
-        if lpBaseOfDll not in self.__moduleDict:
-            fileName = event.get_filename()
-            if not fileName:
-                fileName = None
-            if hasattr(event, 'get_start_address'):
-                EntryPoint = event.get_start_address()
-            else:
-                EntryPoint = None
-            aModule  = Module(lpBaseOfDll, hFile, fileName = fileName,
-                                                EntryPoint = EntryPoint,
-                                                   process = self)
-            self._add_module(aModule)
-        else:
-            aModule = self.get_module(lpBaseOfDll)
-            if not aModule.hFile and hFile not in (None, 0,
-                                                   win32.INVALID_HANDLE_VALUE):
-                aModule.hFile = hFile
-            if not aModule.process:
-                aModule.process = self
-            if aModule.EntryPoint is None and \
-                                           hasattr(event, 'get_start_address'):
-                aModule.EntryPoint = event.get_start_address()
-            if not aModule.fileName:
-                fileName = event.get_filename()
-                if fileName:
-                    aModule.fileName = fileName
-
-    def _notify_create_process(self, event):
-        """
-        Notify the load of the main module.
-
-        This is done automatically by the L{Debug} class, you shouldn't need
-        to call it yourself.
-
-        @type  event: L{CreateProcessEvent}
-        @param event: Create process event.
-
-        @rtype:  bool
-        @return: C{True} to call the user-defined handle, C{False} otherwise.
-        """
-        self.__add_loaded_module(event)
-        return True
-
-    def _notify_load_dll(self, event):
-        """
-        Notify the load of a new module.
-
-        This is done automatically by the L{Debug} class, you shouldn't need
-        to call it yourself.
-
-        @type  event: L{LoadDLLEvent}
-        @param event: Load DLL event.
-
-        @rtype:  bool
-        @return: C{True} to call the user-defined handle, C{False} otherwise.
-        """
-        self.__add_loaded_module(event)
-        return True
-
-    def _notify_unload_dll(self, event):
-        """
-        Notify the release of a loaded module.
-
-        This is done automatically by the L{Debug} class, you shouldn't need
-        to call it yourself.
-
-        @type  event: L{UnloadDLLEvent}
-        @param event: Unload DLL event.
-
-        @rtype:  bool
-        @return: C{True} to call the user-defined handle, C{False} otherwise.
-        """
-        lpBaseOfDll = event.get_module_base()
-##        if self.has_module(lpBaseOfDll):  # XXX this would trigger a scan
-        if lpBaseOfDll in self.__moduleDict:
-            self._del_module(lpBaseOfDll)
-        return True
diff --git a/spaces/Superlang/ImageProcessor/Image/processor.py b/spaces/Superlang/ImageProcessor/Image/processor.py
deleted file mode 100644
index 2d0976bd388ceab926bec427546dfa5cf2ae91f4..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/Image/processor.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import cv2
-import numpy as np
-
-
-def pad64(x):
-    return int(np.ceil(float(x) / 64.0) * 64 - x)
-
-
-def safer_memory(x):
-    # Fix many MAC/AMD problems
-    return np.ascontiguousarray(x.copy()).copy()
-
-
-def HWC3(x):
-    assert x.dtype == np.uint8
-    if x.ndim == 2:
-        x = x[:, :, None]
-    assert x.ndim == 3
-    H, W, C = x.shape
-    assert C == 1 or C == 3 or C == 4
-    if C == 3:
-        return x
-    if C == 1:
-        return np.concatenate([x, x, x], axis=2)
-    if C == 4:
-        color = x[:, :, 0:3].astype(np.float32)
-        alpha = x[:, :, 3:4].astype(np.float32) / 255.0
-        y = color * alpha + 255.0 * (1.0 - alpha)
-        y = y.clip(0, 255).astype(np.uint8)
-        return y
-
-
-def resize_image_with_pad(input_image, resolution):
-    img = HWC3(input_image)
-    H_raw, W_raw, _ = img.shape
-    k = float(resolution) / float(min(H_raw, W_raw))
-    interpolation = cv2.INTER_CUBIC if k > 1 else cv2.INTER_AREA
-    H_target = int(np.round(float(H_raw) * k))
-    W_target = int(np.round(float(W_raw) * k))
-    img = cv2.resize(img, (W_target, H_target), interpolation=interpolation)
-    H_pad, W_pad = pad64(H_target), pad64(W_target)
-    img_padded = np.pad(img, [[0, H_pad], [0, W_pad], [0, 0]], mode='edge')
-
-    def remove_pad(x):
-        return safer_memory(x[:H_target, :W_target])
-
-    return safer_memory(img_padded), remove_pad
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/exp/upernet_global_small/config.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/exp/upernet_global_small/config.py
deleted file mode 100644
index 01db96bf9b0be531aa0eaf62fee51543712f8670..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/exp/upernet_global_small/config.py
+++ /dev/null
@@ -1,38 +0,0 @@
-_base_ = [
-    '../../configs/_base_/models/upernet_uniformer.py', 
-    '../../configs/_base_/datasets/ade20k.py',
-    '../../configs/_base_/default_runtime.py', 
-    '../../configs/_base_/schedules/schedule_160k.py'
-]
-model = dict(
-    backbone=dict(
-        type='UniFormer',
-        embed_dim=[64, 128, 320, 512],
-        layers=[3, 4, 8, 3],
-        head_dim=64,
-        drop_path_rate=0.25,
-        windows=False,
-        hybrid=False
-    ),
-    decode_head=dict(
-        in_channels=[64, 128, 320, 512],
-        num_classes=150
-    ),
-    auxiliary_head=dict(
-        in_channels=320,
-        num_classes=150
-    ))
-
-# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
-optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
-                 paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
-                                                 'relative_position_bias_table': dict(decay_mult=0.),
-                                                 'norm': dict(decay_mult=0.)}))
-
-lr_config = dict(_delete_=True, policy='poly',
-                 warmup='linear',
-                 warmup_iters=1500,
-                 warmup_ratio=1e-6,
-                 power=1.0, min_lr=0.0, by_epoch=False)
-
-data=dict(samples_per_gpu=2)
\ No newline at end of file
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/fileio/handlers/json_handler.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/fileio/handlers/json_handler.py
deleted file mode 100644
index 18d4f15f74139d20adff18b20be5529c592a66b6..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/fileio/handlers/json_handler.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import json
-
-import numpy as np
-
-from .base import BaseFileHandler
-
-
-def set_default(obj):
-    """Set default json values for non-serializable values.
-
-    It helps convert ``set``, ``range`` and ``np.ndarray`` data types to list.
-    It also converts ``np.generic`` (including ``np.int32``, ``np.float32``,
-    etc.) into plain numbers of plain python built-in types.
-    """
-    if isinstance(obj, (set, range)):
-        return list(obj)
-    elif isinstance(obj, np.ndarray):
-        return obj.tolist()
-    elif isinstance(obj, np.generic):
-        return obj.item()
-    raise TypeError(f'{type(obj)} is unsupported for json dump')
-
-
-class JsonHandler(BaseFileHandler):
-
-    def load_from_fileobj(self, file):
-        return json.load(file)
-
-    def dump_to_fileobj(self, obj, file, **kwargs):
-        kwargs.setdefault('default', set_default)
-        json.dump(obj, file, **kwargs)
-
-    def dump_to_str(self, obj, **kwargs):
-        kwargs.setdefault('default', set_default)
-        return json.dumps(obj, **kwargs)
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/iou3d.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/iou3d.py
deleted file mode 100644
index 6fc71979190323f44c09f8b7e1761cf49cd2d76b..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/iou3d.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext('_ext', [
-    'iou3d_boxes_iou_bev_forward', 'iou3d_nms_forward',
-    'iou3d_nms_normal_forward'
-])
-
-
-def boxes_iou_bev(boxes_a, boxes_b):
-    """Calculate boxes IoU in the Bird's Eye View.
-
-    Args:
-        boxes_a (torch.Tensor): Input boxes a with shape (M, 5).
-        boxes_b (torch.Tensor): Input boxes b with shape (N, 5).
-
-    Returns:
-        ans_iou (torch.Tensor): IoU result with shape (M, N).
-    """
-    ans_iou = boxes_a.new_zeros(
-        torch.Size((boxes_a.shape[0], boxes_b.shape[0])))
-
-    ext_module.iou3d_boxes_iou_bev_forward(boxes_a.contiguous(),
-                                           boxes_b.contiguous(), ans_iou)
-
-    return ans_iou
-
-
-def nms_bev(boxes, scores, thresh, pre_max_size=None, post_max_size=None):
-    """NMS function GPU implementation (for BEV boxes). The overlap of two
-    boxes for IoU calculation is defined as the exact overlapping area of the
-    two boxes. In this function, one can also set ``pre_max_size`` and
-    ``post_max_size``.
-
-    Args:
-        boxes (torch.Tensor): Input boxes with the shape of [N, 5]
-            ([x1, y1, x2, y2, ry]).
-        scores (torch.Tensor): Scores of boxes with the shape of [N].
-        thresh (float): Overlap threshold of NMS.
-        pre_max_size (int, optional): Max size of boxes before NMS.
-            Default: None.
-        post_max_size (int, optional): Max size of boxes after NMS.
-            Default: None.
-
-    Returns:
-        torch.Tensor: Indexes after NMS.
-    """
-    assert boxes.size(1) == 5, 'Input boxes shape should be [N, 5]'
-    order = scores.sort(0, descending=True)[1]
-
-    if pre_max_size is not None:
-        order = order[:pre_max_size]
-    boxes = boxes[order].contiguous()
-
-    keep = torch.zeros(boxes.size(0), dtype=torch.long)
-    num_out = ext_module.iou3d_nms_forward(boxes, keep, thresh)
-    keep = order[keep[:num_out].cuda(boxes.device)].contiguous()
-    if post_max_size is not None:
-        keep = keep[:post_max_size]
-    return keep
-
-
-def nms_normal_bev(boxes, scores, thresh):
-    """Normal NMS function GPU implementation (for BEV boxes). The overlap of
-    two boxes for IoU calculation is defined as the exact overlapping area of
-    the two boxes WITH their yaw angle set to 0.
-
-    Args:
-        boxes (torch.Tensor): Input boxes with shape (N, 5).
-        scores (torch.Tensor): Scores of predicted boxes with shape (N).
-        thresh (float): Overlap threshold of NMS.
-
-    Returns:
-        torch.Tensor: Remaining indices with scores in descending order.
-    """
-    assert boxes.shape[1] == 5, 'Input boxes shape should be [N, 5]'
-    order = scores.sort(0, descending=True)[1]
-
-    boxes = boxes[order].contiguous()
-
-    keep = torch.zeros(boxes.size(0), dtype=torch.long)
-    num_out = ext_module.iou3d_nms_normal_forward(boxes, keep, thresh)
-    return order[keep[:num_out].cuda(boxes.device)].contiguous()
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/utils/up_conv_block.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/utils/up_conv_block.py
deleted file mode 100644
index 378469da76cb7bff6a639e7877b3c275d50490fb..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/utils/up_conv_block.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import torch
-import torch.nn as nn
-from annotator.uniformer.mmcv.cnn import ConvModule, build_upsample_layer
-
-
-class UpConvBlock(nn.Module):
-    """Upsample convolution block in decoder for UNet.
-
-    This upsample convolution block consists of one upsample module
-    followed by one convolution block. The upsample module expands the
-    high-level low-resolution feature map and the convolution block fuses
-    the upsampled high-level low-resolution feature map and the low-level
-    high-resolution feature map from encoder.
-
-    Args:
-        conv_block (nn.Sequential): Sequential of convolutional layers.
-        in_channels (int): Number of input channels of the high-level
-        skip_channels (int): Number of input channels of the low-level
-        high-resolution feature map from encoder.
-        out_channels (int): Number of output channels.
-        num_convs (int): Number of convolutional layers in the conv_block.
-            Default: 2.
-        stride (int): Stride of convolutional layer in conv_block. Default: 1.
-        dilation (int): Dilation rate of convolutional layer in conv_block.
-            Default: 1.
-        with_cp (bool): Use checkpoint or not. Using checkpoint will save some
-            memory while slowing down the training speed. Default: False.
-        conv_cfg (dict | None): Config dict for convolution layer.
-            Default: None.
-        norm_cfg (dict | None): Config dict for normalization layer.
-            Default: dict(type='BN').
-        act_cfg (dict | None): Config dict for activation layer in ConvModule.
-            Default: dict(type='ReLU').
-        upsample_cfg (dict): The upsample config of the upsample module in
-            decoder. Default: dict(type='InterpConv'). If the size of
-            high-level feature map is the same as that of skip feature map
-            (low-level feature map from encoder), it does not need upsample the
-            high-level feature map and the upsample_cfg is None.
-        dcn (bool): Use deformable convolution in convolutional layer or not.
-            Default: None.
-        plugins (dict): plugins for convolutional layers. Default: None.
-    """
-
-    def __init__(self,
-                 conv_block,
-                 in_channels,
-                 skip_channels,
-                 out_channels,
-                 num_convs=2,
-                 stride=1,
-                 dilation=1,
-                 with_cp=False,
-                 conv_cfg=None,
-                 norm_cfg=dict(type='BN'),
-                 act_cfg=dict(type='ReLU'),
-                 upsample_cfg=dict(type='InterpConv'),
-                 dcn=None,
-                 plugins=None):
-        super(UpConvBlock, self).__init__()
-        assert dcn is None, 'Not implemented yet.'
-        assert plugins is None, 'Not implemented yet.'
-
-        self.conv_block = conv_block(
-            in_channels=2 * skip_channels,
-            out_channels=out_channels,
-            num_convs=num_convs,
-            stride=stride,
-            dilation=dilation,
-            with_cp=with_cp,
-            conv_cfg=conv_cfg,
-            norm_cfg=norm_cfg,
-            act_cfg=act_cfg,
-            dcn=None,
-            plugins=None)
-        if upsample_cfg is not None:
-            self.upsample = build_upsample_layer(
-                cfg=upsample_cfg,
-                in_channels=in_channels,
-                out_channels=skip_channels,
-                with_cp=with_cp,
-                norm_cfg=norm_cfg,
-                act_cfg=act_cfg)
-        else:
-            self.upsample = ConvModule(
-                in_channels,
-                skip_channels,
-                kernel_size=1,
-                stride=1,
-                padding=0,
-                conv_cfg=conv_cfg,
-                norm_cfg=norm_cfg,
-                act_cfg=act_cfg)
-
-    def forward(self, skip, x):
-        """Forward function."""
-
-        x = self.upsample(x)
-        out = torch.cat([skip, x], dim=1)
-        out = self.conv_block(out)
-
-        return out
diff --git a/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/models/zoedepth/zoedepth_v1.py b/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/models/zoedepth/zoedepth_v1.py
deleted file mode 100644
index bc931b059d6165c84e8ff4f09d5c62d19930cee9..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/models/zoedepth/zoedepth_v1.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# MIT License
-
-# Copyright (c) 2022 Intelligent Systems Lab Org
-
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-# File author: Shariq Farooq Bhat
-
-import itertools
-
-import torch
-import torch.nn as nn
-from ..depth_model import DepthModel
-from ..base_models.midas import MidasCore
-from ..layers.attractor import AttractorLayer, AttractorLayerUnnormed
-from ..layers.dist_layers import ConditionalLogBinomial
-from ..layers.localbins_layers import (Projector, SeedBinRegressor,
-                                            SeedBinRegressorUnnormed)
-from ..model_io import load_state_from_resource
-
-
-class ZoeDepth(DepthModel):
-    def __init__(self, core,  n_bins=64, bin_centers_type="softplus", bin_embedding_dim=128, min_depth=1e-3, max_depth=10,
-                 n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True,
-                 midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs):
-        """ZoeDepth model. This is the version of ZoeDepth that has a single metric head
-
-        Args:
-            core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features
-            n_bins (int, optional): Number of bin centers. Defaults to 64.
-            bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers.
-                                               For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus".
-            bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128.
-            min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3.
-            max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10.
-            n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1].
-            attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300.
-            attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2.
-            attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'.
-            attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'.
-            min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5.
-            max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50.
-            train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True.
-            midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10.
-            encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10.
-            pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10.
-        """
-        super().__init__()
-
-        self.core = core
-        self.max_depth = max_depth
-        self.min_depth = min_depth
-        self.min_temp = min_temp
-        self.bin_centers_type = bin_centers_type
-
-        self.midas_lr_factor = midas_lr_factor
-        self.encoder_lr_factor = encoder_lr_factor
-        self.pos_enc_lr_factor = pos_enc_lr_factor
-        self.train_midas = train_midas
-        self.inverse_midas = inverse_midas
-
-        if self.encoder_lr_factor <= 0:
-            self.core.freeze_encoder(
-                freeze_rel_pos=self.pos_enc_lr_factor <= 0)
-
-        N_MIDAS_OUT = 32
-        btlnck_features = self.core.output_channels[0]
-        num_out_features = self.core.output_channels[1:]
-
-        self.conv2 = nn.Conv2d(btlnck_features, btlnck_features,
-                               kernel_size=1, stride=1, padding=0)  # btlnck conv
-
-        if bin_centers_type == "normed":
-            SeedBinRegressorLayer = SeedBinRegressor
-            Attractor = AttractorLayer
-        elif bin_centers_type == "softplus":
-            SeedBinRegressorLayer = SeedBinRegressorUnnormed
-            Attractor = AttractorLayerUnnormed
-        elif bin_centers_type == "hybrid1":
-            SeedBinRegressorLayer = SeedBinRegressor
-            Attractor = AttractorLayerUnnormed
-        elif bin_centers_type == "hybrid2":
-            SeedBinRegressorLayer = SeedBinRegressorUnnormed
-            Attractor = AttractorLayer
-        else:
-            raise ValueError(
-                "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'")
-
-        self.seed_bin_regressor = SeedBinRegressorLayer(
-            btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth)
-        self.seed_projector = Projector(btlnck_features, bin_embedding_dim)
-        self.projectors = nn.ModuleList([
-            Projector(num_out, bin_embedding_dim)
-            for num_out in num_out_features
-        ])
-        self.attractors = nn.ModuleList([
-            Attractor(bin_embedding_dim, n_bins, n_attractors=n_attractors[i], min_depth=min_depth, max_depth=max_depth,
-                      alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type)
-            for i in range(len(num_out_features))
-        ])
-
-        last_in = N_MIDAS_OUT + 1  # +1 for relative depth
-
-        # use log binomial instead of softmax
-        self.conditional_log_binomial = ConditionalLogBinomial(
-            last_in, bin_embedding_dim, n_classes=n_bins, min_temp=min_temp, max_temp=max_temp)
-
-    def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs):
-        """
-        Args:
-            x (torch.Tensor): Input image tensor of shape (B, C, H, W)
-            return_final_centers (bool, optional): Whether to return the final bin centers. Defaults to False.
-            denorm (bool, optional): Whether to denormalize the input image. This reverses ImageNet normalization as midas normalization is different. Defaults to False.
-            return_probs (bool, optional): Whether to return the output probability distribution. Defaults to False.
-        
-        Returns:
-            dict: Dictionary containing the following keys:
-                - rel_depth (torch.Tensor): Relative depth map of shape (B, H, W)
-                - metric_depth (torch.Tensor): Metric depth map of shape (B, 1, H, W)
-                - bin_centers (torch.Tensor): Bin centers of shape (B, n_bins). Present only if return_final_centers is True
-                - probs (torch.Tensor): Output probability distribution of shape (B, n_bins, H, W). Present only if return_probs is True
-
-        """
-        b, c, h, w = x.shape
-        # print("input shape ", x.shape)
-        self.orig_input_width = w
-        self.orig_input_height = h
-        rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True)
-        # print("output shapes", rel_depth.shape, out.shape)
-
-        outconv_activation = out[0]
-        btlnck = out[1]
-        x_blocks = out[2:]
-
-        x_d0 = self.conv2(btlnck)
-        x = x_d0
-        _, seed_b_centers = self.seed_bin_regressor(x)
-
-        if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2':
-            b_prev = (seed_b_centers - self.min_depth) / \
-                (self.max_depth - self.min_depth)
-        else:
-            b_prev = seed_b_centers
-
-        prev_b_embedding = self.seed_projector(x)
-
-        # unroll this loop for better performance
-        for projector, attractor, x in zip(self.projectors, self.attractors, x_blocks):
-            b_embedding = projector(x)
-            b, b_centers = attractor(
-                b_embedding, b_prev, prev_b_embedding, interpolate=True)
-            b_prev = b.clone()
-            prev_b_embedding = b_embedding.clone()
-
-        last = outconv_activation
-
-        if self.inverse_midas:
-            # invert depth followed by normalization
-            rel_depth = 1.0 / (rel_depth + 1e-6)
-            rel_depth = (rel_depth - rel_depth.min()) / \
-                (rel_depth.max() - rel_depth.min())
-        # concat rel depth with last. First interpolate rel depth to last size
-        rel_cond = rel_depth.unsqueeze(1)
-        rel_cond = nn.functional.interpolate(
-            rel_cond, size=last.shape[2:], mode='bilinear', align_corners=True)
-        last = torch.cat([last, rel_cond], dim=1)
-
-        b_embedding = nn.functional.interpolate(
-            b_embedding, last.shape[-2:], mode='bilinear', align_corners=True)
-        x = self.conditional_log_binomial(last, b_embedding)
-
-        # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor
-        # print(x.shape, b_centers.shape)
-        b_centers = nn.functional.interpolate(
-            b_centers, x.shape[-2:], mode='bilinear', align_corners=True)
-        out = torch.sum(x * b_centers, dim=1, keepdim=True)
-
-        # Structure output dict
-        output = dict(metric_depth=out)
-        if return_final_centers or return_probs:
-            output['bin_centers'] = b_centers
-
-        if return_probs:
-            output['probs'] = x
-
-        return output
-
-    def get_lr_params(self, lr):
-        """
-        Learning rate configuration for different layers of the model
-        Args:
-            lr (float) : Base learning rate
-        Returns:
-            list : list of parameters to optimize and their learning rates, in the format required by torch optimizers.
-        """
-        param_conf = []
-        if self.train_midas:
-            if self.encoder_lr_factor > 0:
-                param_conf.append({'params': self.core.get_enc_params_except_rel_pos(
-                ), 'lr': lr / self.encoder_lr_factor})
-
-            if self.pos_enc_lr_factor > 0:
-                param_conf.append(
-                    {'params': self.core.get_rel_pos_params(), 'lr': lr / self.pos_enc_lr_factor})
-
-            midas_params = self.core.core.scratch.parameters()
-            midas_lr_factor = self.midas_lr_factor
-            param_conf.append(
-                {'params': midas_params, 'lr': lr / midas_lr_factor})
-
-        remaining_modules = []
-        for name, child in self.named_children():
-            if name != 'core':
-                remaining_modules.append(child)
-        remaining_params = itertools.chain(
-            *[child.parameters() for child in remaining_modules])
-
-        param_conf.append({'params': remaining_params, 'lr': lr})
-
-        return param_conf
-
-    @staticmethod
-    def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs):
-        core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas,
-                               train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs)
-        model = ZoeDepth(core, **kwargs)
-        if pretrained_resource:
-            assert isinstance(pretrained_resource, str), "pretrained_resource must be a string"
-            model = load_state_from_resource(model, pretrained_resource)
-        return model
-
-    @staticmethod
-    def build_from_config(config):
-        return ZoeDepth.build(**config)
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/platformdirs/android.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/platformdirs/android.py
deleted file mode 100644
index eda80935123cb5db7e18d7fb82fe5f71991d7af8..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/platformdirs/android.py
+++ /dev/null
@@ -1,120 +0,0 @@
-from __future__ import annotations
-
-import os
-import re
-import sys
-from functools import lru_cache
-from typing import cast
-
-from .api import PlatformDirsABC
-
-
-class Android(PlatformDirsABC):
-    """
-    Follows the guidance `from here <https://android.stackexchange.com/a/216132>`_. Makes use of the
-    `appname <platformdirs.api.PlatformDirsABC.appname>` and
-    `version <platformdirs.api.PlatformDirsABC.version>`.
-    """
-
-    @property
-    def user_data_dir(self) -> str:
-        """:return: data directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/files/<AppName>``"""
-        return self._append_app_name_and_version(cast(str, _android_folder()), "files")
-
-    @property
-    def site_data_dir(self) -> str:
-        """:return: data directory shared by users, same as `user_data_dir`"""
-        return self.user_data_dir
-
-    @property
-    def user_config_dir(self) -> str:
-        """
-        :return: config directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/shared_prefs/<AppName>``
-        """
-        return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs")
-
-    @property
-    def site_config_dir(self) -> str:
-        """:return: config directory shared by the users, same as `user_config_dir`"""
-        return self.user_config_dir
-
-    @property
-    def user_cache_dir(self) -> str:
-        """:return: cache directory tied to the user, e.g. e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>``"""
-        return self._append_app_name_and_version(cast(str, _android_folder()), "cache")
-
-    @property
-    def user_state_dir(self) -> str:
-        """:return: state directory tied to the user, same as `user_data_dir`"""
-        return self.user_data_dir
-
-    @property
-    def user_log_dir(self) -> str:
-        """
-        :return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it,
-          e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/log``
-        """
-        path = self.user_cache_dir
-        if self.opinion:
-            path = os.path.join(path, "log")
-        return path
-
-    @property
-    def user_documents_dir(self) -> str:
-        """
-        :return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``
-        """
-        return _android_documents_folder()
-
-    @property
-    def user_runtime_dir(self) -> str:
-        """
-        :return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it,
-          e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/tmp``
-        """
-        path = self.user_cache_dir
-        if self.opinion:
-            path = os.path.join(path, "tmp")
-        return path
-
-
-@lru_cache(maxsize=1)
-def _android_folder() -> str | None:
-    """:return: base folder for the Android OS or None if cannot be found"""
-    try:
-        # First try to get path to android app via pyjnius
-        from jnius import autoclass
-
-        Context = autoclass("android.content.Context")  # noqa: N806
-        result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()
-    except Exception:
-        # if fails find an android folder looking path on the sys.path
-        pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
-        for path in sys.path:
-            if pattern.match(path):
-                result = path.split("/files")[0]
-                break
-        else:
-            result = None
-    return result
-
-
-@lru_cache(maxsize=1)
-def _android_documents_folder() -> str:
-    """:return: documents folder for the Android OS"""
-    # Get directories with pyjnius
-    try:
-        from jnius import autoclass
-
-        Context = autoclass("android.content.Context")  # noqa: N806
-        Environment = autoclass("android.os.Environment")  # noqa: N806
-        documents_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_DOCUMENTS).getAbsolutePath()
-    except Exception:
-        documents_dir = "/storage/emulated/0/Documents"
-
-    return documents_dir
-
-
-__all__ = [
-    "Android",
-]
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/config/instantiate.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/config/instantiate.py
deleted file mode 100644
index cbb32e19ea518eee84941b20f58d1054e84d1937..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/config/instantiate.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import dataclasses
-import logging
-from collections import abc
-from typing import Any
-
-from detectron2.utils.registry import _convert_target_to_string, locate
-
-__all__ = ["dump_dataclass", "instantiate"]
-
-
-def dump_dataclass(obj: Any):
-    """
-    Dump a dataclass recursively into a dict that can be later instantiated.
-
-    Args:
-        obj: a dataclass object
-
-    Returns:
-        dict
-    """
-    assert dataclasses.is_dataclass(obj) and not isinstance(
-        obj, type
-    ), "dump_dataclass() requires an instance of a dataclass."
-    ret = {"_target_": _convert_target_to_string(type(obj))}
-    for f in dataclasses.fields(obj):
-        v = getattr(obj, f.name)
-        if dataclasses.is_dataclass(v):
-            v = dump_dataclass(v)
-        if isinstance(v, (list, tuple)):
-            v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
-        ret[f.name] = v
-    return ret
-
-
-def instantiate(cfg):
-    """
-    Recursively instantiate objects defined in dictionaries by
-    "_target_" and arguments.
-
-    Args:
-        cfg: a dict-like object with "_target_" that defines the caller, and
-            other keys that define the arguments
-
-    Returns:
-        object instantiated by cfg
-    """
-    from omegaconf import ListConfig
-
-    if isinstance(cfg, ListConfig):
-        lst = [instantiate(x) for x in cfg]
-        return ListConfig(lst, flags={"allow_objects": True})
-    if isinstance(cfg, list):
-        # Specialize for list, because many classes take
-        # list[objects] as arguments, such as ResNet, DatasetMapper
-        return [instantiate(x) for x in cfg]
-
-    if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
-        # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
-        # but faster: https://github.com/facebookresearch/hydra/issues/1200
-        cfg = {k: instantiate(v) for k, v in cfg.items()}
-        cls = cfg.pop("_target_")
-        cls = instantiate(cls)
-
-        if isinstance(cls, str):
-            cls_name = cls
-            cls = locate(cls_name)
-            assert cls is not None, cls_name
-        else:
-            try:
-                cls_name = cls.__module__ + "." + cls.__qualname__
-            except Exception:
-                # target could be anything, so the above could fail
-                cls_name = str(cls)
-        assert callable(cls), f"_target_ {cls} does not define a callable object"
-        try:
-            return cls(**cfg)
-        except TypeError:
-            logger = logging.getLogger(__name__)
-            logger.error(f"Error when instantiating {cls_name}!")
-            raise
-    return cfg  # return as-is if don't know what to do
diff --git a/spaces/Uday007/Oil-Price-Predictor/app.py b/spaces/Uday007/Oil-Price-Predictor/app.py
deleted file mode 100644
index cd5102c50c9343f37f1019a30b3ca59d73fca666..0000000000000000000000000000000000000000
--- a/spaces/Uday007/Oil-Price-Predictor/app.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from unicodedata import numeric
-import gradio as gr
-import pandas as pd
-import numpy as np
-from joblib import load
-
-def predict_price(
-    Mendacium,Depth
-):
-
-    model=load("oil_predictor.jb")
-
-    # Create dict array from parameters
-    data={
-       "Mendacium":[Mendacium],
-       "Depth":[Depth]
-    }
-
-    xin=pd.DataFrame(data)
-    price=model.predict(xin)
-    return price[0]
-
-ui=gr.Interface(
-    fn=predict_price,
-    inputs=[ 
-        gr.inputs.Textbox(placeholder="Mendacium",numeric=True,label="MENDACIUM"),
-        gr.inputs.Textbox(placeholder="Depth",numeric=True,label="DEPTH")
-    ],
-
-    title="OIL PRICE PREDICTOR",
-    outputs="text",
-    examples=[[3.681,1958.027],[5.21,951.957],[11.612,2008.463]]
-)
-
-if __name__=="__main__":
-    ui.launch()
- 
\ No newline at end of file
diff --git a/spaces/Volkopat/arXivGPT/template.py b/spaces/Volkopat/arXivGPT/template.py
deleted file mode 100644
index 1f168a14ab9898893a4c72b5087676b642447db1..0000000000000000000000000000000000000000
--- a/spaces/Volkopat/arXivGPT/template.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from langchain.prompts.prompt import PromptTemplate
-
-_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
-Chat History:
-{chat_history}
-Follow Up Input: {question}
-Standalone question:"""
-CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
-
-template = """You are an AI assistant for answering questions about the contents of the research paper in Arxiv.
-You are given the following extracted parts of a long document and a question. Provide a conversational answer.
-If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer.
-Question: {question}
-=========
-{context}
-=========
-Answer in Markdown:"""
-QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
diff --git a/spaces/XzJosh/nanami-Bert-VITS2/train_ms.py b/spaces/XzJosh/nanami-Bert-VITS2/train_ms.py
deleted file mode 100644
index 5d109003d40497ea4493e7c73f47c1eb7370a81e..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/nanami-Bert-VITS2/train_ms.py
+++ /dev/null
@@ -1,402 +0,0 @@
-import os
-import json
-import argparse
-import itertools
-import math
-import torch
-import shutil
-from torch import nn, optim
-from torch.nn import functional as F
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard import SummaryWriter
-import torch.multiprocessing as mp
-import torch.distributed as dist
-from torch.nn.parallel import DistributedDataParallel as DDP
-from torch.cuda.amp import autocast, GradScaler
-from tqdm import tqdm
-import logging
-logging.getLogger('numba').setLevel(logging.WARNING)
-import commons
-import utils
-from data_utils import (
-    TextAudioSpeakerLoader,
-    TextAudioSpeakerCollate,
-    DistributedBucketSampler
-)
-from models import (
-    SynthesizerTrn,
-    MultiPeriodDiscriminator,
-    DurationDiscriminator,
-)
-from losses import (
-    generator_loss,
-    discriminator_loss,
-    feature_loss,
-    kl_loss
-)
-from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
-from text.symbols import symbols
-
-torch.backends.cudnn.benchmark = True
-torch.backends.cuda.matmul.allow_tf32 = True
-torch.backends.cudnn.allow_tf32 = True
-torch.set_float32_matmul_precision('medium')
-global_step = 0
-
-
-def main():
-    """Assume Single Node Multi GPUs Training Only"""
-    assert torch.cuda.is_available(), "CPU training is not allowed."
-
-    n_gpus = torch.cuda.device_count()
-    os.environ['MASTER_ADDR'] = 'localhost'
-    os.environ['MASTER_PORT'] = '65280'
-
-    hps = utils.get_hparams()
-    if not hps.cont:
-           shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth')
-           shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth')
-           shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth')
-    mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
-
-
-def run(rank, n_gpus, hps):
-    global global_step
-    if rank == 0:
-        logger = utils.get_logger(hps.model_dir)
-        logger.info(hps)
-        utils.check_git_hash(hps.model_dir)
-        writer = SummaryWriter(log_dir=hps.model_dir)
-        writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
-
-    dist.init_process_group(backend=  'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank)
-    torch.manual_seed(hps.train.seed)
-    torch.cuda.set_device(rank)
-
-    train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
-    train_sampler = DistributedBucketSampler(
-        train_dataset,
-        hps.train.batch_size,
-        [32, 300, 400, 500, 600, 700, 800, 900, 1000],
-        num_replicas=n_gpus,
-        rank=rank,
-        shuffle=True)
-    collate_fn = TextAudioSpeakerCollate()
-    train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True,
-                              collate_fn=collate_fn, batch_sampler=train_sampler)
-    if rank == 0:
-        eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
-        eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False,
-                                 batch_size=1, pin_memory=True,
-                                 drop_last=False, collate_fn=collate_fn)
-    if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True:
-        print("Using noise scaled MAS for VITS2")
-        use_noise_scaled_mas = True
-        mas_noise_scale_initial = 0.01
-        noise_scale_delta = 2e-6
-    else:
-        print("Using normal MAS for VITS1")
-        use_noise_scaled_mas = False
-        mas_noise_scale_initial = 0.0
-        noise_scale_delta = 0.0
-    if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True:
-        print("Using duration discriminator for VITS2")
-        use_duration_discriminator = True
-        net_dur_disc = DurationDiscriminator(
-         hps.model.hidden_channels, 
-         hps.model.hidden_channels, 
-         3, 
-         0.1, 
-         gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0,
-         ).cuda(rank)
-    if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True:
-        if hps.data.n_speakers == 0:
-            raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model")
-        use_spk_conditioned_encoder = True
-    else:
-        print("Using normal encoder for VITS1")
-        use_spk_conditioned_encoder = False
-
-    net_g = SynthesizerTrn(
-        len(symbols),
-        hps.data.filter_length // 2 + 1,
-        hps.train.segment_size // hps.data.hop_length,
-        n_speakers=hps.data.n_speakers,
-        mas_noise_scale_initial = mas_noise_scale_initial,
-        noise_scale_delta = noise_scale_delta,
-        **hps.model).cuda(rank)
-
-    freeze_enc = getattr(hps.model, "freeze_enc", False)
-    if freeze_enc:
-        print("freeze encoder !!!")
-        for param in net_g.enc_p.parameters():
-            param.requires_grad = False
-
-    net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
-    optim_g = torch.optim.AdamW(
-        filter(lambda p: p.requires_grad, net_g.parameters()),
-        hps.train.learning_rate,
-        betas=hps.train.betas,
-        eps=hps.train.eps)
-    optim_d = torch.optim.AdamW(
-        net_d.parameters(),
-        hps.train.learning_rate,
-        betas=hps.train.betas,
-        eps=hps.train.eps)
-    if net_dur_disc is not None:
-        optim_dur_disc = torch.optim.AdamW(
-        net_dur_disc.parameters(),
-        hps.train.learning_rate,
-        betas=hps.train.betas,
-        eps=hps.train.eps)
-    else:
-        optim_dur_disc = None
-    net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
-    net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
-    if net_dur_disc is not None:
-        net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True)
-
-    pretrain_dir = None
-    if pretrain_dir is None:
-        try:
-            if net_dur_disc is not None:
-                _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont)
-            _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g,
-                                                   optim_g, skip_optimizer=not hps.cont)
-            _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d,
-                                                   optim_d, skip_optimizer=not hps.cont)
-            
-            epoch_str = max(epoch_str, 1)
-            global_step = (epoch_str - 1) * len(train_loader)
-        except Exception as e:
-            print(e)
-            epoch_str = 1
-            global_step = 0
-    else:
-        _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g,
-                                                   optim_g, True)
-        _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d,
-                                                   optim_d, True)
-
-
-
-    scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
-    scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
-    if net_dur_disc is not None:
-        scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
-    else:
-        scheduler_dur_disc = None
-    scaler = GradScaler(enabled=hps.train.fp16_run)
-
-    for epoch in range(epoch_str, hps.train.epochs + 1):
-        if rank == 0:
-            train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
-        else:
-            train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None)
-        scheduler_g.step()
-        scheduler_d.step()
-        if net_dur_disc is not None:
-            scheduler_dur_disc.step()
-
-
-def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
-    net_g, net_d, net_dur_disc = nets
-    optim_g, optim_d, optim_dur_disc = optims
-    scheduler_g, scheduler_d, scheduler_dur_disc = schedulers
-    train_loader, eval_loader = loaders
-    if writers is not None:
-        writer, writer_eval = writers
-
-    train_loader.batch_sampler.set_epoch(epoch)
-    global global_step
-
-    net_g.train()
-    net_d.train()
-    if net_dur_disc is not None:
-        net_dur_disc.train()
-    for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)):
-        if net_g.module.use_noise_scaled_mas:
-            current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step
-            net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0)
-        x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
-        spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
-        y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
-        speakers = speakers.cuda(rank, non_blocking=True)
-        tone = tone.cuda(rank, non_blocking=True)
-        language = language.cuda(rank, non_blocking=True)
-        bert = bert.cuda(rank, non_blocking=True)
-
-        with autocast(enabled=hps.train.fp16_run):
-            y_hat, l_length, attn, ids_slice, x_mask, z_mask, \
-                (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert)
-            mel = spec_to_mel_torch(
-                spec,
-                hps.data.filter_length,
-                hps.data.n_mel_channels,
-                hps.data.sampling_rate,
-                hps.data.mel_fmin,
-                hps.data.mel_fmax)
-            y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
-            y_hat_mel = mel_spectrogram_torch(
-                y_hat.squeeze(1),
-                hps.data.filter_length,
-                hps.data.n_mel_channels,
-                hps.data.sampling_rate,
-                hps.data.hop_length,
-                hps.data.win_length,
-                hps.data.mel_fmin,
-                hps.data.mel_fmax
-            )
-
-            y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size)  # slice
-
-            # Discriminator
-            y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
-            with autocast(enabled=False):
-                loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
-                loss_disc_all = loss_disc
-            if net_dur_disc is not None:
-                y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach())
-                with autocast(enabled=False):
-                 # TODO: I think need to mean using the mask, but for now, just mean all
-                    loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g)
-                    loss_dur_disc_all = loss_dur_disc
-                optim_dur_disc.zero_grad()
-                scaler.scale(loss_dur_disc_all).backward()
-                scaler.unscale_(optim_dur_disc)
-                grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None)
-                scaler.step(optim_dur_disc)
-
-        optim_d.zero_grad()
-        scaler.scale(loss_disc_all).backward()
-        scaler.unscale_(optim_d)
-        grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
-        scaler.step(optim_d)
-
-        with autocast(enabled=hps.train.fp16_run):
-            # Generator
-            y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
-            if net_dur_disc is not None:
-                y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_)
-            with autocast(enabled=False):
-                loss_dur = torch.sum(l_length.float())
-                loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
-                loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
-
-                loss_fm = feature_loss(fmap_r, fmap_g)
-                loss_gen, losses_gen = generator_loss(y_d_hat_g)
-                loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
-                if net_dur_disc is not None:
-                    loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g)
-                    loss_gen_all += loss_dur_gen
-        optim_g.zero_grad()
-        scaler.scale(loss_gen_all).backward()
-        scaler.unscale_(optim_g)
-        grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
-        scaler.step(optim_g)
-        scaler.update()
-
-        if rank == 0:
-            if global_step % hps.train.log_interval == 0:
-                lr = optim_g.param_groups[0]['lr']
-                losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
-                logger.info('Train Epoch: {} [{:.0f}%]'.format(
-                    epoch,
-                    100. * batch_idx / len(train_loader)))
-                logger.info([x.item() for x in losses] + [global_step, lr])
-
-                scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr,
-                               "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
-                scalar_dict.update(
-                    {"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
-                scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
-                scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
-                scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
-          
-                image_dict = {
-                    "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
-                    "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
-                    "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
-                    "all/attn": utils.plot_alignment_to_numpy(attn[0, 0].data.cpu().numpy())
-                }
-                utils.summarize(
-                    writer=writer,
-                    global_step=global_step,
-                    images=image_dict,
-                    scalars=scalar_dict)
-
-            if global_step % hps.train.eval_interval == 0:
-                evaluate(hps, net_g, eval_loader, writer_eval)
-                utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,
-                                      os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
-                utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch,
-                                      os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
-                if net_dur_disc is not None:
-                    utils.save_checkpoint(net_dur_disc, optim_dur_disc, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step)))    
-                keep_ckpts = getattr(hps.train, 'keep_ckpts', 5)
-                if keep_ckpts > 0:
-                    utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True)
-
-
-        global_step += 1
-
-    if rank == 0:
-        logger.info('====> Epoch: {}'.format(epoch))
-
-
-
-def evaluate(hps, generator, eval_loader, writer_eval):
-    generator.eval()
-    image_dict = {}
-    audio_dict = {}
-    print("Evaluating ...")
-    with torch.no_grad():
-        for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in enumerate(eval_loader):
-            x, x_lengths = x.cuda(), x_lengths.cuda()
-            spec, spec_lengths = spec.cuda(), spec_lengths.cuda()
-            y, y_lengths = y.cuda(), y_lengths.cuda()
-            speakers = speakers.cuda()
-            bert = bert.cuda()
-            tone = tone.cuda()
-            language = language.cuda()
-            for use_sdp in [True, False]:
-                y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, tone, language, bert, y=spec, max_len=1000, sdp_ratio=0.0 if not use_sdp else 1.0)
-                y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length
-
-                mel = spec_to_mel_torch(
-                    spec,
-                    hps.data.filter_length,
-                    hps.data.n_mel_channels,
-                    hps.data.sampling_rate,
-                    hps.data.mel_fmin,
-                    hps.data.mel_fmax)
-                y_hat_mel = mel_spectrogram_torch(
-                    y_hat.squeeze(1).float(),
-                    hps.data.filter_length,
-                    hps.data.n_mel_channels,
-                    hps.data.sampling_rate,
-                    hps.data.hop_length,
-                    hps.data.win_length,
-                    hps.data.mel_fmin,
-                    hps.data.mel_fmax
-                )
-                image_dict.update({
-                    f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
-                })
-                audio_dict.update({
-                    f"gen/audio_{batch_idx}_{use_sdp}": y_hat[0, :, :y_hat_lengths[0]]
-                })
-                image_dict.update({f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
-                audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, :y_lengths[0]]})
-
-    utils.summarize(
-        writer=writer_eval,
-        global_step=global_step,
-        images=image_dict,
-        audios=audio_dict,
-        audio_sampling_rate=hps.data.sampling_rate
-    )
-    generator.train()
-
-if __name__ == "__main__":
-    main()
diff --git a/spaces/XzJosh/ranran-Bert-VITS2/bert_gen.py b/spaces/XzJosh/ranran-Bert-VITS2/bert_gen.py
deleted file mode 100644
index 44814715396ffc3abe84a12c74d66293c356eb4f..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/ranran-Bert-VITS2/bert_gen.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import torch
-from torch.utils.data import DataLoader
-from multiprocessing import Pool
-import commons
-import utils
-from data_utils import TextAudioSpeakerLoader, TextAudioSpeakerCollate
-from tqdm import tqdm
-import warnings
-
-from text import cleaned_text_to_sequence, get_bert
-
-config_path = 'configs/config.json'
-hps = utils.get_hparams_from_file(config_path)
-
-def process_line(line):
-    _id, spk, language_str, text, phones, tone, word2ph = line.strip().split("|")
-    phone = phones.split(" ")
-    tone = [int(i) for i in tone.split(" ")]
-    word2ph = [int(i) for i in word2ph.split(" ")]
-    w2pho = [i for i in word2ph]
-    word2ph = [i for i in word2ph]
-    phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
-
-    if hps.data.add_blank:
-        phone = commons.intersperse(phone, 0)
-        tone = commons.intersperse(tone, 0)
-        language = commons.intersperse(language, 0)
-        for i in range(len(word2ph)):
-            word2ph[i] = word2ph[i] * 2
-        word2ph[0] += 1
-    wav_path = f'{_id}'
-
-    bert_path = wav_path.replace(".wav", ".bert.pt")
-    try:
-        bert = torch.load(bert_path)
-        assert bert.shape[-1] == len(phone)
-    except:
-        bert = get_bert(text, word2ph, language_str)
-        assert bert.shape[-1] == len(phone)
-        torch.save(bert, bert_path)
-
-
-if __name__ == '__main__':
-    lines = []
-    with open(hps.data.training_files, encoding='utf-8' ) as f:
-        lines.extend(f.readlines())
-
-    with open(hps.data.validation_files, encoding='utf-8' ) as f:
-        lines.extend(f.readlines())
-
-    with Pool(processes=12) as pool: #A100 40GB suitable config,if coom,please decrease the processess number.
-        for _ in tqdm(pool.imap_unordered(process_line, lines)):
-            pass
diff --git a/spaces/Yah216/Arabic-Sentiment-Analyser/README.md b/spaces/Yah216/Arabic-Sentiment-Analyser/README.md
deleted file mode 100644
index a2d2fd159ebfa4985c623ed6365c3ef3a8690b04..0000000000000000000000000000000000000000
--- a/spaces/Yah216/Arabic-Sentiment-Analyser/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: Arabic sentiment analyzer
-emoji: 📖
-colorFrom: blue
-colorTo: indigo
-sdk: streamlit
-app_file: app.py
-pinned: true
----
-
-# Configuration
-
-`title`: _string_  
-Display title for the Space
-
-`emoji`: _string_  
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_  
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_  
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_  
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_  
-Only applicable for `streamlit` SDK.  
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_  
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).  
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_  
-Whether the Space stays on top of your list.
diff --git a/spaces/Yiqin/ChatVID/model/Vicuna.py b/spaces/Yiqin/ChatVID/model/Vicuna.py
deleted file mode 100644
index 3442b9abdec184f48fd69b05991f8b2c694af435..0000000000000000000000000000000000000000
--- a/spaces/Yiqin/ChatVID/model/Vicuna.py
+++ /dev/null
@@ -1,139 +0,0 @@
-from model.fastchat.conversation import (Conversation, SeparatorStyle,
-                                         compute_skip_echo_len)
-from model.fastchat.serve.inference import ChatIO, generate_stream, load_model
-
-
-class SimpleChatIO(ChatIO):
-
-    def prompt_for_input(self, role) -> str:
-        return input(f"{role}: ")
-
-    def prompt_for_output(self, role: str):
-        print(f"{role}: ", end="", flush=True)
-
-    def stream_output(self, output_stream, skip_echo_len: int):
-        pre = 0
-        for outputs in output_stream:
-            outputs = outputs[skip_echo_len:].strip()
-            outputs = outputs.split(" ")
-            now = len(outputs) - 1
-            if now > pre:
-                print(" ".join(outputs[pre:now]), end=" ", flush=True)
-                pre = now
-        print(" ".join(outputs[pre:]), flush=True)
-        return " ".join(outputs)
-
-
-class VicunaChatBot:
-
-    def __init__(
-        self,
-        model_path: str,
-        device: str,
-        num_gpus: str,
-        max_gpu_memory: str,
-        load_8bit: bool,
-        ChatIO: ChatIO,
-        debug: bool,
-    ):
-        self.model_path = model_path
-        self.device = device
-        self.chatio = ChatIO
-        self.debug = debug
-
-        self.model, self.tokenizer = load_model(self.model_path, device,
-                                                num_gpus, max_gpu_memory,
-                                                load_8bit, debug)
-
-    def chat(self, inp: str, temperature: float, max_new_tokens: int,
-             conv: Conversation):
-        """ Vicuna as a chatbot. """
-        conv.append_message(conv.roles[0], inp)
-        conv.append_message(conv.roles[1], None)
-
-        generate_stream_func = generate_stream
-        prompt = conv.get_prompt()
-
-        skip_echo_len = compute_skip_echo_len(self.model_path, conv, prompt)
-        stop_str = (
-            conv.sep if conv.sep_style
-            in [SeparatorStyle.SINGLE, SeparatorStyle.BAIZE] else None)
-        params = {
-            "model": self.model_path,
-            "prompt": prompt,
-            "temperature": temperature,
-            "max_new_tokens": max_new_tokens,
-            "stop": stop_str,
-        }
-        print(prompt)
-        self.chatio.prompt_for_output(conv.roles[1])
-        output_stream = generate_stream_func(self.model, self.tokenizer,
-                                             params, self.device)
-        outputs = self.chatio.stream_output(output_stream, skip_echo_len)
-        # NOTE: strip is important to align with the training data.
-        conv.messages[-1][-1] = outputs.strip()
-        return outputs, conv
-
-
-class VicunaHandler:
-    """ VicunaHandler is a class that handles the communication between the
-    frontend and the backend. """
-
-    def __init__(self, config):
-        self.config = config
-        self.chat_io = SimpleChatIO()
-        self.chatbot = VicunaChatBot(
-            self.config['model_path'],
-            self.config['device'],
-            self.config['num_gpus'],
-            self.config['max_gpu_memory'],
-            self.config['load_8bit'],
-            self.chat_io,
-            self.config['debug'],
-        )
-
-    def chat(self):
-        """ Chat with the Vicuna. """
-        pass
-
-    def gr_chatbot_init(self, caption: str):
-        """ Initialise the chatbot for gradio. """
-
-        template = self._construct_conversation(caption)
-        print("Chatbot initialised.")
-        return template.copy(), template.copy()
-
-    def gr_chat(self, inp, conv: Conversation):
-        """ Chat using gradio as the frontend. """
-        return self.chatbot.chat(inp, self.config['temperature'],
-                                 self.config['max_new_tokens'], conv)
-
-    def _construct_conversation(self, prompt):
-        """ Construct a conversation template.
-        Args:
-            prompt: the prompt for the conversation.
-        """
-
-        user_message = "The following text described what you have " +\
-            "seen, found, heard and notice from a consecutive video." +\
-            " Some of the texts may not be accurate. " +\
-            "Try to conclude what happens in the video, " +\
-            "then answer my question based on your conclusion.\n" +\
-            "<video begin>\n" + prompt + "<video end>\n" +\
-            "Example: Is this a Video?"
-
-        user_message = user_message.strip()
-
-        print(user_message)
-
-        return Conversation(
-            system=
-            "A chat between a curious user and an artificial intelligence assistant answering quetions on videos."
-            "The assistant answers the questions based on the given video captions and speech in time order.",
-            roles=("USER", "ASSISTANT"),
-            messages=(("USER", user_message), ("ASSISTANT", "yes")),
-            offset=0,
-            sep_style=SeparatorStyle.TWO,
-            sep=" ",
-            sep2="</s>",
-        )
diff --git a/spaces/Yiqin/ChatVID/model/summary/__init__.py b/spaces/Yiqin/ChatVID/model/summary/__init__.py
deleted file mode 100644
index 434d199a9b30a69be239b433aa888df4502fcf56..0000000000000000000000000000000000000000
--- a/spaces/Yiqin/ChatVID/model/summary/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .TextSummarizer import TextSummarizer
\ No newline at end of file
diff --git a/spaces/Yuelili/RealNagrse/FAQ.md b/spaces/Yuelili/RealNagrse/FAQ.md
deleted file mode 100644
index caa8c08cfe4302eb8812c823569e8a0be30fa49c..0000000000000000000000000000000000000000
--- a/spaces/Yuelili/RealNagrse/FAQ.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# FAQ
-
-1. **What is the difference of `--netscale` and `outscale`?**
-
-A: TODO.
-
-1. **How to select models?**
-
-A: TODO.
diff --git a/spaces/abdulmeLINK/programmer-bloom/README.md b/spaces/abdulmeLINK/programmer-bloom/README.md
deleted file mode 100644
index a33e7beb53ee4021e628432f9709175a64154ff8..0000000000000000000000000000000000000000
--- a/spaces/abdulmeLINK/programmer-bloom/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Programmer Bloom
-emoji: 📈
-colorFrom: indigo
-colorTo: red
-sdk: gradio
-sdk_version: 3.1.4
-app_file: app.py
-pinned: false
-models:
-  - bigscience/bloom
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/abhishek/scikit-learn-tabular-playground/README.md b/spaces/abhishek/scikit-learn-tabular-playground/README.md
deleted file mode 100644
index 02616bc5601c08f4dfcafed2e7fb963dcc658cd5..0000000000000000000000000000000000000000
--- a/spaces/abhishek/scikit-learn-tabular-playground/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Scikit Learn Tabular Playground
-emoji: 🐢
-colorFrom: gray
-colorTo: purple
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/pipelines/transforms.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/pipelines/transforms.py
deleted file mode 100644
index caed51d89ffc1259d0b086954f03c3d4c0749cf2..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/pipelines/transforms.py
+++ /dev/null
@@ -1,1811 +0,0 @@
-import copy
-import inspect
-
-import mmcv
-import numpy as np
-from numpy import random
-
-from mmdet.core import PolygonMasks
-from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
-from ..builder import PIPELINES
-
-try:
-    from imagecorruptions import corrupt
-except ImportError:
-    corrupt = None
-
-try:
-    import albumentations
-    from albumentations import Compose
-except ImportError:
-    albumentations = None
-    Compose = None
-
-
-@PIPELINES.register_module()
-class Resize(object):
-    """Resize images & bbox & mask.
-
-    This transform resizes the input image to some scale. Bboxes and masks are
-    then resized with the same scale factor. If the input dict contains the key
-    "scale", then the scale in the input dict is used, otherwise the specified
-    scale in the init method is used. If the input dict contains the key
-    "scale_factor" (if MultiScaleFlipAug does not give img_scale but
-    scale_factor), the actual scale will be computed by image shape and
-    scale_factor.
-
-    `img_scale` can either be a tuple (single-scale) or a list of tuple
-    (multi-scale). There are 3 multiscale modes:
-
-    - ``ratio_range is not None``: randomly sample a ratio from the ratio \
-      range and multiply it with the image scale.
-    - ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \
-      sample a scale from the multiscale range.
-    - ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \
-      sample a scale from multiple scales.
-
-    Args:
-        img_scale (tuple or list[tuple]): Images scales for resizing.
-        multiscale_mode (str): Either "range" or "value".
-        ratio_range (tuple[float]): (min_ratio, max_ratio)
-        keep_ratio (bool): Whether to keep the aspect ratio when resizing the
-            image.
-        bbox_clip_border (bool, optional): Whether clip the objects outside
-            the border of the image. Defaults to True.
-        backend (str): Image resize backend, choices are 'cv2' and 'pillow'.
-            These two backends generates slightly different results. Defaults
-            to 'cv2'.
-        override (bool, optional): Whether to override `scale` and
-            `scale_factor` so as to call resize twice. Default False. If True,
-            after the first resizing, the existed `scale` and `scale_factor`
-            will be ignored so the second resizing can be allowed.
-            This option is a work-around for multiple times of resize in DETR.
-            Defaults to False.
-    """
-
-    def __init__(self,
-                 img_scale=None,
-                 multiscale_mode='range',
-                 ratio_range=None,
-                 keep_ratio=True,
-                 bbox_clip_border=True,
-                 backend='cv2',
-                 override=False):
-        if img_scale is None:
-            self.img_scale = None
-        else:
-            if isinstance(img_scale, list):
-                self.img_scale = img_scale
-            else:
-                self.img_scale = [img_scale]
-            assert mmcv.is_list_of(self.img_scale, tuple)
-
-        if ratio_range is not None:
-            # mode 1: given a scale and a range of image ratio
-            assert len(self.img_scale) == 1
-        else:
-            # mode 2: given multiple scales or a range of scales
-            assert multiscale_mode in ['value', 'range']
-
-        self.backend = backend
-        self.multiscale_mode = multiscale_mode
-        self.ratio_range = ratio_range
-        self.keep_ratio = keep_ratio
-        # TODO: refactor the override option in Resize
-        self.override = override
-        self.bbox_clip_border = bbox_clip_border
-
-    @staticmethod
-    def random_select(img_scales):
-        """Randomly select an img_scale from given candidates.
-
-        Args:
-            img_scales (list[tuple]): Images scales for selection.
-
-        Returns:
-            (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \
-                where ``img_scale`` is the selected image scale and \
-                ``scale_idx`` is the selected index in the given candidates.
-        """
-
-        assert mmcv.is_list_of(img_scales, tuple)
-        scale_idx = np.random.randint(len(img_scales))
-        img_scale = img_scales[scale_idx]
-        return img_scale, scale_idx
-
-    @staticmethod
-    def random_sample(img_scales):
-        """Randomly sample an img_scale when ``multiscale_mode=='range'``.
-
-        Args:
-            img_scales (list[tuple]): Images scale range for sampling.
-                There must be two tuples in img_scales, which specify the lower
-                and upper bound of image scales.
-
-        Returns:
-            (tuple, None): Returns a tuple ``(img_scale, None)``, where \
-                ``img_scale`` is sampled scale and None is just a placeholder \
-                to be consistent with :func:`random_select`.
-        """
-
-        assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
-        img_scale_long = [max(s) for s in img_scales]
-        img_scale_short = [min(s) for s in img_scales]
-        long_edge = np.random.randint(
-            min(img_scale_long),
-            max(img_scale_long) + 1)
-        short_edge = np.random.randint(
-            min(img_scale_short),
-            max(img_scale_short) + 1)
-        img_scale = (long_edge, short_edge)
-        return img_scale, None
-
-    @staticmethod
-    def random_sample_ratio(img_scale, ratio_range):
-        """Randomly sample an img_scale when ``ratio_range`` is specified.
-
-        A ratio will be randomly sampled from the range specified by
-        ``ratio_range``. Then it would be multiplied with ``img_scale`` to
-        generate sampled scale.
-
-        Args:
-            img_scale (tuple): Images scale base to multiply with ratio.
-            ratio_range (tuple[float]): The minimum and maximum ratio to scale
-                the ``img_scale``.
-
-        Returns:
-            (tuple, None): Returns a tuple ``(scale, None)``, where \
-                ``scale`` is sampled ratio multiplied with ``img_scale`` and \
-                None is just a placeholder to be consistent with \
-                :func:`random_select`.
-        """
-
-        assert isinstance(img_scale, tuple) and len(img_scale) == 2
-        min_ratio, max_ratio = ratio_range
-        assert min_ratio <= max_ratio
-        ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
-        scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
-        return scale, None
-
-    def _random_scale(self, results):
-        """Randomly sample an img_scale according to ``ratio_range`` and
-        ``multiscale_mode``.
-
-        If ``ratio_range`` is specified, a ratio will be sampled and be
-        multiplied with ``img_scale``.
-        If multiple scales are specified by ``img_scale``, a scale will be
-        sampled according to ``multiscale_mode``.
-        Otherwise, single scale will be used.
-
-        Args:
-            results (dict): Result dict from :obj:`dataset`.
-
-        Returns:
-            dict: Two new keys 'scale` and 'scale_idx` are added into \
-                ``results``, which would be used by subsequent pipelines.
-        """
-
-        if self.ratio_range is not None:
-            scale, scale_idx = self.random_sample_ratio(
-                self.img_scale[0], self.ratio_range)
-        elif len(self.img_scale) == 1:
-            scale, scale_idx = self.img_scale[0], 0
-        elif self.multiscale_mode == 'range':
-            scale, scale_idx = self.random_sample(self.img_scale)
-        elif self.multiscale_mode == 'value':
-            scale, scale_idx = self.random_select(self.img_scale)
-        else:
-            raise NotImplementedError
-
-        results['scale'] = scale
-        results['scale_idx'] = scale_idx
-
-    def _resize_img(self, results):
-        """Resize images with ``results['scale']``."""
-        for key in results.get('img_fields', ['img']):
-            if self.keep_ratio:
-                img, scale_factor = mmcv.imrescale(
-                    results[key],
-                    results['scale'],
-                    return_scale=True,
-                    backend=self.backend)
-                # the w_scale and h_scale has minor difference
-                # a real fix should be done in the mmcv.imrescale in the future
-                new_h, new_w = img.shape[:2]
-                h, w = results[key].shape[:2]
-                w_scale = new_w / w
-                h_scale = new_h / h
-            else:
-                img, w_scale, h_scale = mmcv.imresize(
-                    results[key],
-                    results['scale'],
-                    return_scale=True,
-                    backend=self.backend)
-            results[key] = img
-
-            scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
-                                    dtype=np.float32)
-            results['img_shape'] = img.shape
-            # in case that there is no padding
-            results['pad_shape'] = img.shape
-            results['scale_factor'] = scale_factor
-            results['keep_ratio'] = self.keep_ratio
-
-    def _resize_bboxes(self, results):
-        """Resize bounding boxes with ``results['scale_factor']``."""
-        for key in results.get('bbox_fields', []):
-            bboxes = results[key] * results['scale_factor']
-            if self.bbox_clip_border:
-                img_shape = results['img_shape']
-                bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
-                bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
-            results[key] = bboxes
-
-    def _resize_masks(self, results):
-        """Resize masks with ``results['scale']``"""
-        for key in results.get('mask_fields', []):
-            if results[key] is None:
-                continue
-            if self.keep_ratio:
-                results[key] = results[key].rescale(results['scale'])
-            else:
-                results[key] = results[key].resize(results['img_shape'][:2])
-
-    def _resize_seg(self, results):
-        """Resize semantic segmentation map with ``results['scale']``."""
-        for key in results.get('seg_fields', []):
-            if self.keep_ratio:
-                gt_seg = mmcv.imrescale(
-                    results[key],
-                    results['scale'],
-                    interpolation='nearest',
-                    backend=self.backend)
-            else:
-                gt_seg = mmcv.imresize(
-                    results[key],
-                    results['scale'],
-                    interpolation='nearest',
-                    backend=self.backend)
-            results['gt_semantic_seg'] = gt_seg
-
-    def __call__(self, results):
-        """Call function to resize images, bounding boxes, masks, semantic
-        segmentation map.
-
-        Args:
-            results (dict): Result dict from loading pipeline.
-
-        Returns:
-            dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \
-                'keep_ratio' keys are added into result dict.
-        """
-
-        if 'scale' not in results:
-            if 'scale_factor' in results:
-                img_shape = results['img'].shape[:2]
-                scale_factor = results['scale_factor']
-                assert isinstance(scale_factor, float)
-                results['scale'] = tuple(
-                    [int(x * scale_factor) for x in img_shape][::-1])
-            else:
-                self._random_scale(results)
-        else:
-            if not self.override:
-                assert 'scale_factor' not in results, (
-                    'scale and scale_factor cannot be both set.')
-            else:
-                results.pop('scale')
-                if 'scale_factor' in results:
-                    results.pop('scale_factor')
-                self._random_scale(results)
-
-        self._resize_img(results)
-        self._resize_bboxes(results)
-        self._resize_masks(results)
-        self._resize_seg(results)
-        return results
-
-    def __repr__(self):
-        repr_str = self.__class__.__name__
-        repr_str += f'(img_scale={self.img_scale}, '
-        repr_str += f'multiscale_mode={self.multiscale_mode}, '
-        repr_str += f'ratio_range={self.ratio_range}, '
-        repr_str += f'keep_ratio={self.keep_ratio}, '
-        repr_str += f'bbox_clip_border={self.bbox_clip_border})'
-        return repr_str
-
-
-@PIPELINES.register_module()
-class RandomFlip(object):
-    """Flip the image & bbox & mask.
-
-    If the input dict contains the key "flip", then the flag will be used,
-    otherwise it will be randomly decided by a ratio specified in the init
-    method.
-
-    When random flip is enabled, ``flip_ratio``/``direction`` can either be a
-    float/string or tuple of float/string. There are 3 flip modes:
-
-    - ``flip_ratio`` is float, ``direction`` is string: the image will be
-        ``direction``ly flipped with probability of ``flip_ratio`` .
-        E.g., ``flip_ratio=0.5``, ``direction='horizontal'``,
-        then image will be horizontally flipped with probability of 0.5.
-    - ``flip_ratio`` is float, ``direction`` is list of string: the image wil
-        be ``direction[i]``ly flipped with probability of
-        ``flip_ratio/len(direction)``.
-        E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``,
-        then image will be horizontally flipped with probability of 0.25,
-        vertically with probability of 0.25.
-    - ``flip_ratio`` is list of float, ``direction`` is list of string:
-        given ``len(flip_ratio) == len(direction)``, the image wil
-        be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``.
-        E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal',
-        'vertical']``, then image will be horizontally flipped with probability
-         of 0.3, vertically with probability of 0.5
-
-    Args:
-        flip_ratio (float | list[float], optional): The flipping probability.
-            Default: None.
-        direction(str | list[str], optional): The flipping direction. Options
-            are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'.
-            If input is a list, the length must equal ``flip_ratio``. Each
-            element in ``flip_ratio`` indicates the flip probability of
-            corresponding direction.
-    """
-
-    def __init__(self, flip_ratio=None, direction='horizontal'):
-        if isinstance(flip_ratio, list):
-            assert mmcv.is_list_of(flip_ratio, float)
-            assert 0 <= sum(flip_ratio) <= 1
-        elif isinstance(flip_ratio, float):
-            assert 0 <= flip_ratio <= 1
-        elif flip_ratio is None:
-            pass
-        else:
-            raise ValueError('flip_ratios must be None, float, '
-                             'or list of float')
-        self.flip_ratio = flip_ratio
-
-        valid_directions = ['horizontal', 'vertical', 'diagonal']
-        if isinstance(direction, str):
-            assert direction in valid_directions
-        elif isinstance(direction, list):
-            assert mmcv.is_list_of(direction, str)
-            assert set(direction).issubset(set(valid_directions))
-        else:
-            raise ValueError('direction must be either str or list of str')
-        self.direction = direction
-
-        if isinstance(flip_ratio, list):
-            assert len(self.flip_ratio) == len(self.direction)
-
-    def bbox_flip(self, bboxes, img_shape, direction):
-        """Flip bboxes horizontally.
-
-        Args:
-            bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k)
-            img_shape (tuple[int]): Image shape (height, width)
-            direction (str): Flip direction. Options are 'horizontal',
-                'vertical'.
-
-        Returns:
-            numpy.ndarray: Flipped bounding boxes.
-        """
-
-        assert bboxes.shape[-1] % 4 == 0
-        flipped = bboxes.copy()
-        if direction == 'horizontal':
-            w = img_shape[1]
-            flipped[..., 0::4] = w - bboxes[..., 2::4]
-            flipped[..., 2::4] = w - bboxes[..., 0::4]
-        elif direction == 'vertical':
-            h = img_shape[0]
-            flipped[..., 1::4] = h - bboxes[..., 3::4]
-            flipped[..., 3::4] = h - bboxes[..., 1::4]
-        elif direction == 'diagonal':
-            w = img_shape[1]
-            h = img_shape[0]
-            flipped[..., 0::4] = w - bboxes[..., 2::4]
-            flipped[..., 1::4] = h - bboxes[..., 3::4]
-            flipped[..., 2::4] = w - bboxes[..., 0::4]
-            flipped[..., 3::4] = h - bboxes[..., 1::4]
-        else:
-            raise ValueError(f"Invalid flipping direction '{direction}'")
-        return flipped
-
-    def __call__(self, results):
-        """Call function to flip bounding boxes, masks, semantic segmentation
-        maps.
-
-        Args:
-            results (dict): Result dict from loading pipeline.
-
-        Returns:
-            dict: Flipped results, 'flip', 'flip_direction' keys are added \
-                into result dict.
-        """
-
-        if 'flip' not in results:
-            if isinstance(self.direction, list):
-                # None means non-flip
-                direction_list = self.direction + [None]
-            else:
-                # None means non-flip
-                direction_list = [self.direction, None]
-
-            if isinstance(self.flip_ratio, list):
-                non_flip_ratio = 1 - sum(self.flip_ratio)
-                flip_ratio_list = self.flip_ratio + [non_flip_ratio]
-            else:
-                non_flip_ratio = 1 - self.flip_ratio
-                # exclude non-flip
-                single_ratio = self.flip_ratio / (len(direction_list) - 1)
-                flip_ratio_list = [single_ratio] * (len(direction_list) -
-                                                    1) + [non_flip_ratio]
-
-            cur_dir = np.random.choice(direction_list, p=flip_ratio_list)
-
-            results['flip'] = cur_dir is not None
-        if 'flip_direction' not in results:
-            results['flip_direction'] = cur_dir
-        if results['flip']:
-            # flip image
-            for key in results.get('img_fields', ['img']):
-                results[key] = mmcv.imflip(
-                    results[key], direction=results['flip_direction'])
-            # flip bboxes
-            for key in results.get('bbox_fields', []):
-                results[key] = self.bbox_flip(results[key],
-                                              results['img_shape'],
-                                              results['flip_direction'])
-            # flip masks
-            for key in results.get('mask_fields', []):
-                results[key] = results[key].flip(results['flip_direction'])
-
-            # flip segs
-            for key in results.get('seg_fields', []):
-                results[key] = mmcv.imflip(
-                    results[key], direction=results['flip_direction'])
-        return results
-
-    def __repr__(self):
-        return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})'
-
-
-@PIPELINES.register_module()
-class Pad(object):
-    """Pad the image & mask.
-
-    There are two padding modes: (1) pad to a fixed size and (2) pad to the
-    minimum size that is divisible by some number.
-    Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
-
-    Args:
-        size (tuple, optional): Fixed padding size.
-        size_divisor (int, optional): The divisor of padded size.
-        pad_val (float, optional): Padding value, 0 by default.
-    """
-
-    def __init__(self, size=None, size_divisor=None, pad_val=0):
-        self.size = size
-        self.size_divisor = size_divisor
-        self.pad_val = pad_val
-        # only one of size and size_divisor should be valid
-        assert size is not None or size_divisor is not None
-        assert size is None or size_divisor is None
-
-    def _pad_img(self, results):
-        """Pad images according to ``self.size``."""
-        for key in results.get('img_fields', ['img']):
-            if self.size is not None:
-                padded_img = mmcv.impad(
-                    results[key], shape=self.size, pad_val=self.pad_val)
-            elif self.size_divisor is not None:
-                padded_img = mmcv.impad_to_multiple(
-                    results[key], self.size_divisor, pad_val=self.pad_val)
-            results[key] = padded_img
-        results['pad_shape'] = padded_img.shape
-        results['pad_fixed_size'] = self.size
-        results['pad_size_divisor'] = self.size_divisor
-
-    def _pad_masks(self, results):
-        """Pad masks according to ``results['pad_shape']``."""
-        pad_shape = results['pad_shape'][:2]
-        for key in results.get('mask_fields', []):
-            results[key] = results[key].pad(pad_shape, pad_val=self.pad_val)
-
-    def _pad_seg(self, results):
-        """Pad semantic segmentation map according to
-        ``results['pad_shape']``."""
-        for key in results.get('seg_fields', []):
-            results[key] = mmcv.impad(
-                results[key], shape=results['pad_shape'][:2])
-
-    def __call__(self, results):
-        """Call function to pad images, masks, semantic segmentation maps.
-
-        Args:
-            results (dict): Result dict from loading pipeline.
-
-        Returns:
-            dict: Updated result dict.
-        """
-        self._pad_img(results)
-        self._pad_masks(results)
-        self._pad_seg(results)
-        return results
-
-    def __repr__(self):
-        repr_str = self.__class__.__name__
-        repr_str += f'(size={self.size}, '
-        repr_str += f'size_divisor={self.size_divisor}, '
-        repr_str += f'pad_val={self.pad_val})'
-        return repr_str
-
-
-@PIPELINES.register_module()
-class Normalize(object):
-    """Normalize the image.
-
-    Added key is "img_norm_cfg".
-
-    Args:
-        mean (sequence): Mean values of 3 channels.
-        std (sequence): Std values of 3 channels.
-        to_rgb (bool): Whether to convert the image from BGR to RGB,
-            default is true.
-    """
-
-    def __init__(self, mean, std, to_rgb=True):
-        self.mean = np.array(mean, dtype=np.float32)
-        self.std = np.array(std, dtype=np.float32)
-        self.to_rgb = to_rgb
-
-    def __call__(self, results):
-        """Call function to normalize images.
-
-        Args:
-            results (dict): Result dict from loading pipeline.
-
-        Returns:
-            dict: Normalized results, 'img_norm_cfg' key is added into
-                result dict.
-        """
-        for key in results.get('img_fields', ['img']):
-            results[key] = mmcv.imnormalize(results[key], self.mean, self.std,
-                                            self.to_rgb)
-        results['img_norm_cfg'] = dict(
-            mean=self.mean, std=self.std, to_rgb=self.to_rgb)
-        return results
-
-    def __repr__(self):
-        repr_str = self.__class__.__name__
-        repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'
-        return repr_str
-
-
-@PIPELINES.register_module()
-class RandomCrop(object):
-    """Random crop the image & bboxes & masks.
-
-    The absolute `crop_size` is sampled based on `crop_type` and `image_size`,
-    then the cropped results are generated.
-
-    Args:
-        crop_size (tuple): The relative ratio or absolute pixels of
-            height and width.
-        crop_type (str, optional): one of "relative_range", "relative",
-            "absolute", "absolute_range". "relative" randomly crops
-            (h * crop_size[0], w * crop_size[1]) part from an input of size
-            (h, w). "relative_range" uniformly samples relative crop size from
-            range [crop_size[0], 1] and [crop_size[1], 1] for height and width
-            respectively. "absolute" crops from an input with absolute size
-            (crop_size[0], crop_size[1]). "absolute_range" uniformly samples
-            crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w
-            in range [crop_size[0], min(w, crop_size[1])]. Default "absolute".
-        allow_negative_crop (bool, optional): Whether to allow a crop that does
-            not contain any bbox area. Default False.
-        bbox_clip_border (bool, optional): Whether clip the objects outside
-            the border of the image. Defaults to True.
-
-    Note:
-        - If the image is smaller than the absolute crop size, return the
-            original image.
-        - The keys for bboxes, labels and masks must be aligned. That is,
-          `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and
-          `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and
-          `gt_masks_ignore`.
-        - If the crop does not contain any gt-bbox region and
-          `allow_negative_crop` is set to False, skip this image.
-    """
-
-    def __init__(self,
-                 crop_size,
-                 crop_type='absolute',
-                 allow_negative_crop=False,
-                 bbox_clip_border=True):
-        if crop_type not in [
-                'relative_range', 'relative', 'absolute', 'absolute_range'
-        ]:
-            raise ValueError(f'Invalid crop_type {crop_type}.')
-        if crop_type in ['absolute', 'absolute_range']:
-            assert crop_size[0] > 0 and crop_size[1] > 0
-            assert isinstance(crop_size[0], int) and isinstance(
-                crop_size[1], int)
-        else:
-            assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1
-        self.crop_size = crop_size
-        self.crop_type = crop_type
-        self.allow_negative_crop = allow_negative_crop
-        self.bbox_clip_border = bbox_clip_border
-        # The key correspondence from bboxes to labels and masks.
-        self.bbox2label = {
-            'gt_bboxes': 'gt_labels',
-            'gt_bboxes_ignore': 'gt_labels_ignore'
-        }
-        self.bbox2mask = {
-            'gt_bboxes': 'gt_masks',
-            'gt_bboxes_ignore': 'gt_masks_ignore'
-        }
-
-    def _crop_data(self, results, crop_size, allow_negative_crop):
-        """Function to randomly crop images, bounding boxes, masks, semantic
-        segmentation maps.
-
-        Args:
-            results (dict): Result dict from loading pipeline.
-            crop_size (tuple): Expected absolute size after cropping, (h, w).
-            allow_negative_crop (bool): Whether to allow a crop that does not
-                contain any bbox area. Default to False.
-
-        Returns:
-            dict: Randomly cropped results, 'img_shape' key in result dict is
-                updated according to crop size.
-        """
-        assert crop_size[0] > 0 and crop_size[1] > 0
-        for key in results.get('img_fields', ['img']):
-            img = results[key]
-            margin_h = max(img.shape[0] - crop_size[0], 0)
-            margin_w = max(img.shape[1] - crop_size[1], 0)
-            offset_h = np.random.randint(0, margin_h + 1)
-            offset_w = np.random.randint(0, margin_w + 1)
-            crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]
-            crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]
-
-            # crop the image
-            img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
-            img_shape = img.shape
-            results[key] = img
-        results['img_shape'] = img_shape
-
-        # crop bboxes accordingly and clip to the image boundary
-        for key in results.get('bbox_fields', []):
-            # e.g. gt_bboxes and gt_bboxes_ignore
-            bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
-                                   dtype=np.float32)
-            bboxes = results[key] - bbox_offset
-            if self.bbox_clip_border:
-                bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
-                bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
-            valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (
-                bboxes[:, 3] > bboxes[:, 1])
-            # If the crop does not contain any gt-bbox area and
-            # allow_negative_crop is False, skip this image.
-            if (key == 'gt_bboxes' and not valid_inds.any()
-                    and not allow_negative_crop):
-                return None
-            results[key] = bboxes[valid_inds, :]
-            # label fields. e.g. gt_labels and gt_labels_ignore
-            label_key = self.bbox2label.get(key)
-            if label_key in results:
-                results[label_key] = results[label_key][valid_inds]
-
-            # mask fields, e.g. gt_masks and gt_masks_ignore
-            mask_key = self.bbox2mask.get(key)
-            if mask_key in results:
-                results[mask_key] = results[mask_key][
-                    valid_inds.nonzero()[0]].crop(
-                        np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
-
-        # crop semantic seg
-        for key in results.get('seg_fields', []):
-            results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]
-
-        return results
-
-    def _get_crop_size(self, image_size):
-        """Randomly generates the absolute crop size based on `crop_type` and
-        `image_size`.
-
-        Args:
-            image_size (tuple): (h, w).
-
-        Returns:
-            crop_size (tuple): (crop_h, crop_w) in absolute pixels.
-        """
-        h, w = image_size
-        if self.crop_type == 'absolute':
-            return (min(self.crop_size[0], h), min(self.crop_size[1], w))
-        elif self.crop_type == 'absolute_range':
-            assert self.crop_size[0] <= self.crop_size[1]
-            crop_h = np.random.randint(
-                min(h, self.crop_size[0]),
-                min(h, self.crop_size[1]) + 1)
-            crop_w = np.random.randint(
-                min(w, self.crop_size[0]),
-                min(w, self.crop_size[1]) + 1)
-            return crop_h, crop_w
-        elif self.crop_type == 'relative':
-            crop_h, crop_w = self.crop_size
-            return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
-        elif self.crop_type == 'relative_range':
-            crop_size = np.asarray(self.crop_size, dtype=np.float32)
-            crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)
-            return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
-
-    def __call__(self, results):
-        """Call function to randomly crop images, bounding boxes, masks,
-        semantic segmentation maps.
-
-        Args:
-            results (dict): Result dict from loading pipeline.
-
-        Returns:
-            dict: Randomly cropped results, 'img_shape' key in result dict is
-                updated according to crop size.
-        """
-        image_size = results['img'].shape[:2]
-        crop_size = self._get_crop_size(image_size)
-        results = self._crop_data(results, crop_size, self.allow_negative_crop)
-        return results
-
-    def __repr__(self):
-        repr_str = self.__class__.__name__
-        repr_str += f'(crop_size={self.crop_size}, '
-        repr_str += f'crop_type={self.crop_type}, '
-        repr_str += f'allow_negative_crop={self.allow_negative_crop}, '
-        repr_str += f'bbox_clip_border={self.bbox_clip_border})'
-        return repr_str
-
-
-@PIPELINES.register_module()
-class SegRescale(object):
-    """Rescale semantic segmentation maps.
-
-    Args:
-        scale_factor (float): The scale factor of the final output.
-        backend (str): Image rescale backend, choices are 'cv2' and 'pillow'.
-            These two backends generates slightly different results. Defaults
-            to 'cv2'.
-    """
-
-    def __init__(self, scale_factor=1, backend='cv2'):
-        self.scale_factor = scale_factor
-        self.backend = backend
-
-    def __call__(self, results):
-        """Call function to scale the semantic segmentation map.
-
-        Args:
-            results (dict): Result dict from loading pipeline.
-
-        Returns:
-            dict: Result dict with semantic segmentation map scaled.
-        """
-
-        for key in results.get('seg_fields', []):
-            if self.scale_factor != 1:
-                results[key] = mmcv.imrescale(
-                    results[key],
-                    self.scale_factor,
-                    interpolation='nearest',
-                    backend=self.backend)
-        return results
-
-    def __repr__(self):
-        return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
-
-
-@PIPELINES.register_module()
-class PhotoMetricDistortion(object):
-    """Apply photometric distortion to image sequentially, every transformation
-    is applied with a probability of 0.5. The position of random contrast is in
-    second or second to last.
-
-    1. random brightness
-    2. random contrast (mode 0)
-    3. convert color from BGR to HSV
-    4. random saturation
-    5. random hue
-    6. convert color from HSV to BGR
-    7. random contrast (mode 1)
-    8. randomly swap channels
-
-    Args:
-        brightness_delta (int): delta of brightness.
-        contrast_range (tuple): range of contrast.
-        saturation_range (tuple): range of saturation.
-        hue_delta (int): delta of hue.
-    """
-
-    def __init__(self,
-                 brightness_delta=32,
-                 contrast_range=(0.5, 1.5),
-                 saturation_range=(0.5, 1.5),
-                 hue_delta=18):
-        self.brightness_delta = brightness_delta
-        self.contrast_lower, self.contrast_upper = contrast_range
-        self.saturation_lower, self.saturation_upper = saturation_range
-        self.hue_delta = hue_delta
-
-    def __call__(self, results):
-        """Call function to perform photometric distortion on images.
-
-        Args:
-            results (dict): Result dict from loading pipeline.
-
-        Returns:
-            dict: Result dict with images distorted.
-        """
-
-        if 'img_fields' in results:
-            assert results['img_fields'] == ['img'], \
-                'Only single img_fields is allowed'
-        img = results['img']
-        assert img.dtype == np.float32, \
-            'PhotoMetricDistortion needs the input image of dtype np.float32,'\
-            ' please set "to_float32=True" in "LoadImageFromFile" pipeline'
-        # random brightness
-        if random.randint(2):
-            delta = random.uniform(-self.brightness_delta,
-                                   self.brightness_delta)
-            img += delta
-
-        # mode == 0 --> do random contrast first
-        # mode == 1 --> do random contrast last
-        mode = random.randint(2)
-        if mode == 1:
-            if random.randint(2):
-                alpha = random.uniform(self.contrast_lower,
-                                       self.contrast_upper)
-                img *= alpha
-
-        # convert color from BGR to HSV
-        img = mmcv.bgr2hsv(img)
-
-        # random saturation
-        if random.randint(2):
-            img[..., 1] *= random.uniform(self.saturation_lower,
-                                          self.saturation_upper)
-
-        # random hue
-        if random.randint(2):
-            img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
-            img[..., 0][img[..., 0] > 360] -= 360
-            img[..., 0][img[..., 0] < 0] += 360
-
-        # convert color from HSV to BGR
-        img = mmcv.hsv2bgr(img)
-
-        # random contrast
-        if mode == 0:
-            if random.randint(2):
-                alpha = random.uniform(self.contrast_lower,
-                                       self.contrast_upper)
-                img *= alpha
-
-        # randomly swap channels
-        if random.randint(2):
-            img = img[..., random.permutation(3)]
-
-        results['img'] = img
-        return results
-
-    def __repr__(self):
-        repr_str = self.__class__.__name__
-        repr_str += f'(\nbrightness_delta={self.brightness_delta},\n'
-        repr_str += 'contrast_range='
-        repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n'
-        repr_str += 'saturation_range='
-        repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n'
-        repr_str += f'hue_delta={self.hue_delta})'
-        return repr_str
-
-
-@PIPELINES.register_module()
-class Expand(object):
-    """Random expand the image & bboxes.
-
-    Randomly place the original image on a canvas of 'ratio' x original image
-    size filled with mean values. The ratio is in the range of ratio_range.
-
-    Args:
-        mean (tuple): mean value of dataset.
-        to_rgb (bool): if need to convert the order of mean to align with RGB.
-        ratio_range (tuple): range of expand ratio.
-        prob (float): probability of applying this transformation
-    """
-
-    def __init__(self,
-                 mean=(0, 0, 0),
-                 to_rgb=True,
-                 ratio_range=(1, 4),
-                 seg_ignore_label=None,
-                 prob=0.5):
-        self.to_rgb = to_rgb
-        self.ratio_range = ratio_range
-        if to_rgb:
-            self.mean = mean[::-1]
-        else:
-            self.mean = mean
-        self.min_ratio, self.max_ratio = ratio_range
-        self.seg_ignore_label = seg_ignore_label
-        self.prob = prob
-
-    def __call__(self, results):
-        """Call function to expand images, bounding boxes.
-
-        Args:
-            results (dict): Result dict from loading pipeline.
-
-        Returns:
-            dict: Result dict with images, bounding boxes expanded
-        """
-
-        if random.uniform(0, 1) > self.prob:
-            return results
-
-        if 'img_fields' in results:
-            assert results['img_fields'] == ['img'], \
-                'Only single img_fields is allowed'
-        img = results['img']
-
-        h, w, c = img.shape
-        ratio = random.uniform(self.min_ratio, self.max_ratio)
-        # speedup expand when meets large image
-        if np.all(self.mean == self.mean[0]):
-            expand_img = np.empty((int(h * ratio), int(w * ratio), c),
-                                  img.dtype)
-            expand_img.fill(self.mean[0])
-        else:
-            expand_img = np.full((int(h * ratio), int(w * ratio), c),
-                                 self.mean,
-                                 dtype=img.dtype)
-        left = int(random.uniform(0, w * ratio - w))
-        top = int(random.uniform(0, h * ratio - h))
-        expand_img[top:top + h, left:left + w] = img
-
-        results['img'] = expand_img
-        # expand bboxes
-        for key in results.get('bbox_fields', []):
-            results[key] = results[key] + np.tile(
-                (left, top), 2).astype(results[key].dtype)
-
-        # expand masks
-        for key in results.get('mask_fields', []):
-            results[key] = results[key].expand(
-                int(h * ratio), int(w * ratio), top, left)
-
-        # expand segs
-        for key in results.get('seg_fields', []):
-            gt_seg = results[key]
-            expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),
-                                    self.seg_ignore_label,
-                                    dtype=gt_seg.dtype)
-            expand_gt_seg[top:top + h, left:left + w] = gt_seg
-            results[key] = expand_gt_seg
-        return results
-
-    def __repr__(self):
-        repr_str = self.__class__.__name__
-        repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '
-        repr_str += f'ratio_range={self.ratio_range}, '
-        repr_str += f'seg_ignore_label={self.seg_ignore_label})'
-        return repr_str
-
-
-@PIPELINES.register_module()
-class MinIoURandomCrop(object):
-    """Random crop the image & bboxes, the cropped patches have minimum IoU
-    requirement with original image & bboxes, the IoU threshold is randomly
-    selected from min_ious.
-
-    Args:
-        min_ious (tuple): minimum IoU threshold for all intersections with
-        bounding boxes
-        min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
-        where a >= min_crop_size).
-        bbox_clip_border (bool, optional): Whether clip the objects outside
-            the border of the image. Defaults to True.
-
-    Note:
-        The keys for bboxes, labels and masks should be paired. That is, \
-        `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \
-        `gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`.
-    """
-
-    def __init__(self,
-                 min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
-                 min_crop_size=0.3,
-                 bbox_clip_border=True):
-        # 1: return ori img
-        self.min_ious = min_ious
-        self.sample_mode = (1, *min_ious, 0)
-        self.min_crop_size = min_crop_size
-        self.bbox_clip_border = bbox_clip_border
-        self.bbox2label = {
-            'gt_bboxes': 'gt_labels',
-            'gt_bboxes_ignore': 'gt_labels_ignore'
-        }
-        self.bbox2mask = {
-            'gt_bboxes': 'gt_masks',
-            'gt_bboxes_ignore': 'gt_masks_ignore'
-        }
-
-    def __call__(self, results):
-        """Call function to crop images and bounding boxes with minimum IoU
-        constraint.
-
-        Args:
-            results (dict): Result dict from loading pipeline.
-
-        Returns:
-            dict: Result dict with images and bounding boxes cropped, \
-                'img_shape' key is updated.
-        """
-
-        if 'img_fields' in results:
-            assert results['img_fields'] == ['img'], \
-                'Only single img_fields is allowed'
-        img = results['img']
-        assert 'bbox_fields' in results
-        boxes = [results[key] for key in results['bbox_fields']]
-        boxes = np.concatenate(boxes, 0)
-        h, w, c = img.shape
-        while True:
-            mode = random.choice(self.sample_mode)
-            self.mode = mode
-            if mode == 1:
-                return results
-
-            min_iou = mode
-            for i in range(50):
-                new_w = random.uniform(self.min_crop_size * w, w)
-                new_h = random.uniform(self.min_crop_size * h, h)
-
-                # h / w in [0.5, 2]
-                if new_h / new_w < 0.5 or new_h / new_w > 2:
-                    continue
-
-                left = random.uniform(w - new_w)
-                top = random.uniform(h - new_h)
-
-                patch = np.array(
-                    (int(left), int(top), int(left + new_w), int(top + new_h)))
-                # Line or point crop is not allowed
-                if patch[2] == patch[0] or patch[3] == patch[1]:
-                    continue
-                overlaps = bbox_overlaps(
-                    patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
-                if len(overlaps) > 0 and overlaps.min() < min_iou:
-                    continue
-
-                # center of boxes should inside the crop img
-                # only adjust boxes and instance masks when the gt is not empty
-                if len(overlaps) > 0:
-                    # adjust boxes
-                    def is_center_of_bboxes_in_patch(boxes, patch):
-                        center = (boxes[:, :2] + boxes[:, 2:]) / 2
-                        mask = ((center[:, 0] > patch[0]) *
-                                (center[:, 1] > patch[1]) *
-                                (center[:, 0] < patch[2]) *
-                                (center[:, 1] < patch[3]))
-                        return mask
-
-                    mask = is_center_of_bboxes_in_patch(boxes, patch)
-                    if not mask.any():
-                        continue
-                    for key in results.get('bbox_fields', []):
-                        boxes = results[key].copy()
-                        mask = is_center_of_bboxes_in_patch(boxes, patch)
-                        boxes = boxes[mask]
-                        if self.bbox_clip_border:
-                            boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
-                            boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
-                        boxes -= np.tile(patch[:2], 2)
-
-                        results[key] = boxes
-                        # labels
-                        label_key = self.bbox2label.get(key)
-                        if label_key in results:
-                            results[label_key] = results[label_key][mask]
-
-                        # mask fields
-                        mask_key = self.bbox2mask.get(key)
-                        if mask_key in results:
-                            results[mask_key] = results[mask_key][
-                                mask.nonzero()[0]].crop(patch)
-                # adjust the img no matter whether the gt is empty before crop
-                img = img[patch[1]:patch[3], patch[0]:patch[2]]
-                results['img'] = img
-                results['img_shape'] = img.shape
-
-                # seg fields
-                for key in results.get('seg_fields', []):
-                    results[key] = results[key][patch[1]:patch[3],
-                                                patch[0]:patch[2]]
-                return results
-
-    def __repr__(self):
-        repr_str = self.__class__.__name__
-        repr_str += f'(min_ious={self.min_ious}, '
-        repr_str += f'min_crop_size={self.min_crop_size}, '
-        repr_str += f'bbox_clip_border={self.bbox_clip_border})'
-        return repr_str
-
-
-@PIPELINES.register_module()
-class Corrupt(object):
-    """Corruption augmentation.
-
-    Corruption transforms implemented based on
-    `imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_.
-
-    Args:
-        corruption (str): Corruption name.
-        severity (int, optional): The severity of corruption. Default: 1.
-    """
-
-    def __init__(self, corruption, severity=1):
-        self.corruption = corruption
-        self.severity = severity
-
-    def __call__(self, results):
-        """Call function to corrupt image.
-
-        Args:
-            results (dict): Result dict from loading pipeline.
-
-        Returns:
-            dict: Result dict with images corrupted.
-        """
-
-        if corrupt is None:
-            raise RuntimeError('imagecorruptions is not installed')
-        if 'img_fields' in results:
-            assert results['img_fields'] == ['img'], \
-                'Only single img_fields is allowed'
-        results['img'] = corrupt(
-            results['img'].astype(np.uint8),
-            corruption_name=self.corruption,
-            severity=self.severity)
-        return results
-
-    def __repr__(self):
-        repr_str = self.__class__.__name__
-        repr_str += f'(corruption={self.corruption}, '
-        repr_str += f'severity={self.severity})'
-        return repr_str
-
-
-@PIPELINES.register_module()
-class Albu(object):
-    """Albumentation augmentation.
-
-    Adds custom transformations from Albumentations library.
-    Please, visit `https://albumentations.readthedocs.io`
-    to get more information.
-
-    An example of ``transforms`` is as followed:
-
-    .. code-block::
-
-        [
-            dict(
-                type='ShiftScaleRotate',
-                shift_limit=0.0625,
-                scale_limit=0.0,
-                rotate_limit=0,
-                interpolation=1,
-                p=0.5),
-            dict(
-                type='RandomBrightnessContrast',
-                brightness_limit=[0.1, 0.3],
-                contrast_limit=[0.1, 0.3],
-                p=0.2),
-            dict(type='ChannelShuffle', p=0.1),
-            dict(
-                type='OneOf',
-                transforms=[
-                    dict(type='Blur', blur_limit=3, p=1.0),
-                    dict(type='MedianBlur', blur_limit=3, p=1.0)
-                ],
-                p=0.1),
-        ]
-
-    Args:
-        transforms (list[dict]): A list of albu transformations
-        bbox_params (dict): Bbox_params for albumentation `Compose`
-        keymap (dict): Contains {'input key':'albumentation-style key'}
-        skip_img_without_anno (bool): Whether to skip the image if no ann left
-            after aug
-    """
-
-    def __init__(self,
-                 transforms,
-                 bbox_params=None,
-                 keymap=None,
-                 update_pad_shape=False,
-                 skip_img_without_anno=False):
-        if Compose is None:
-            raise RuntimeError('albumentations is not installed')
-
-        # Args will be modified later, copying it will be safer
-        transforms = copy.deepcopy(transforms)
-        if bbox_params is not None:
-            bbox_params = copy.deepcopy(bbox_params)
-        if keymap is not None:
-            keymap = copy.deepcopy(keymap)
-        self.transforms = transforms
-        self.filter_lost_elements = False
-        self.update_pad_shape = update_pad_shape
-        self.skip_img_without_anno = skip_img_without_anno
-
-        # A simple workaround to remove masks without boxes
-        if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params
-                and 'filter_lost_elements' in bbox_params):
-            self.filter_lost_elements = True
-            self.origin_label_fields = bbox_params['label_fields']
-            bbox_params['label_fields'] = ['idx_mapper']
-            del bbox_params['filter_lost_elements']
-
-        self.bbox_params = (
-            self.albu_builder(bbox_params) if bbox_params else None)
-        self.aug = Compose([self.albu_builder(t) for t in self.transforms],
-                           bbox_params=self.bbox_params)
-
-        if not keymap:
-            self.keymap_to_albu = {
-                'img': 'image',
-                'gt_masks': 'masks',
-                'gt_bboxes': 'bboxes'
-            }
-        else:
-            self.keymap_to_albu = keymap
-        self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}
-
-    def albu_builder(self, cfg):
-        """Import a module from albumentations.
-
-        It inherits some of :func:`build_from_cfg` logic.
-
-        Args:
-            cfg (dict): Config dict. It should at least contain the key "type".
-
-        Returns:
-            obj: The constructed object.
-        """
-
-        assert isinstance(cfg, dict) and 'type' in cfg
-        args = cfg.copy()
-
-        obj_type = args.pop('type')
-        if mmcv.is_str(obj_type):
-            if albumentations is None:
-                raise RuntimeError('albumentations is not installed')
-            obj_cls = getattr(albumentations, obj_type)
-        elif inspect.isclass(obj_type):
-            obj_cls = obj_type
-        else:
-            raise TypeError(
-                f'type must be a str or valid type, but got {type(obj_type)}')
-
-        if 'transforms' in args:
-            args['transforms'] = [
-                self.albu_builder(transform)
-                for transform in args['transforms']
-            ]
-
-        return obj_cls(**args)
-
-    @staticmethod
-    def mapper(d, keymap):
-        """Dictionary mapper. Renames keys according to keymap provided.
-
-        Args:
-            d (dict): old dict
-            keymap (dict): {'old_key':'new_key'}
-        Returns:
-            dict: new dict.
-        """
-
-        updated_dict = {}
-        for k, v in zip(d.keys(), d.values()):
-            new_k = keymap.get(k, k)
-            updated_dict[new_k] = d[k]
-        return updated_dict
-
-    def __call__(self, results):
-        # dict to albumentations format
-        results = self.mapper(results, self.keymap_to_albu)
-        # TODO: add bbox_fields
-        if 'bboxes' in results:
-            # to list of boxes
-            if isinstance(results['bboxes'], np.ndarray):
-                results['bboxes'] = [x for x in results['bboxes']]
-            # add pseudo-field for filtration
-            if self.filter_lost_elements:
-                results['idx_mapper'] = np.arange(len(results['bboxes']))
-
-        # TODO: Support mask structure in albu
-        if 'masks' in results:
-            if isinstance(results['masks'], PolygonMasks):
-                raise NotImplementedError(
-                    'Albu only supports BitMap masks now')
-            ori_masks = results['masks']
-            if albumentations.__version__ < '0.5':
-                results['masks'] = results['masks'].masks
-            else:
-                results['masks'] = [mask for mask in results['masks'].masks]
-
-        results = self.aug(**results)
-
-        if 'bboxes' in results:
-            if isinstance(results['bboxes'], list):
-                results['bboxes'] = np.array(
-                    results['bboxes'], dtype=np.float32)
-            results['bboxes'] = results['bboxes'].reshape(-1, 4)
-
-            # filter label_fields
-            if self.filter_lost_elements:
-
-                for label in self.origin_label_fields:
-                    results[label] = np.array(
-                        [results[label][i] for i in results['idx_mapper']])
-                if 'masks' in results:
-                    results['masks'] = np.array(
-                        [results['masks'][i] for i in results['idx_mapper']])
-                    results['masks'] = ori_masks.__class__(
-                        results['masks'], results['image'].shape[0],
-                        results['image'].shape[1])
-
-                if (not len(results['idx_mapper'])
-                        and self.skip_img_without_anno):
-                    return None
-
-        if 'gt_labels' in results:
-            if isinstance(results['gt_labels'], list):
-                results['gt_labels'] = np.array(results['gt_labels'])
-            results['gt_labels'] = results['gt_labels'].astype(np.int64)
-
-        # back to the original format
-        results = self.mapper(results, self.keymap_back)
-
-        # update final shape
-        if self.update_pad_shape:
-            results['pad_shape'] = results['img'].shape
-
-        return results
-
-    def __repr__(self):
-        repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'
-        return repr_str
-
-
-@PIPELINES.register_module()
-class RandomCenterCropPad(object):
-    """Random center crop and random around padding for CornerNet.
-
-    This operation generates randomly cropped image from the original image and
-    pads it simultaneously. Different from :class:`RandomCrop`, the output
-    shape may not equal to ``crop_size`` strictly. We choose a random value
-    from ``ratios`` and the output shape could be larger or smaller than
-    ``crop_size``. The padding operation is also different from :class:`Pad`,
-    here we use around padding instead of right-bottom padding.
-
-    The relation between output image (padding image) and original image:
-
-    .. code:: text
-
-                        output image
-
-               +----------------------------+
-               |          padded area       |
-        +------|----------------------------|----------+
-        |      |         cropped area       |          |
-        |      |         +---------------+  |          |
-        |      |         |    .   center |  |          | original image
-        |      |         |        range  |  |          |
-        |      |         +---------------+  |          |
-        +------|----------------------------|----------+
-               |          padded area       |
-               +----------------------------+
-
-    There are 5 main areas in the figure:
-
-    - output image: output image of this operation, also called padding
-      image in following instruction.
-    - original image: input image of this operation.
-    - padded area: non-intersect area of output image and original image.
-    - cropped area: the overlap of output image and original image.
-    - center range: a smaller area where random center chosen from.
-      center range is computed by ``border`` and original image's shape
-      to avoid our random center is too close to original image's border.
-
-    Also this operation act differently in train and test mode, the summary
-    pipeline is listed below.
-
-    Train pipeline:
-
-    1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image
-       will be ``random_ratio * crop_size``.
-    2. Choose a ``random_center`` in center range.
-    3. Generate padding image with center matches the ``random_center``.
-    4. Initialize the padding image with pixel value equals to ``mean``.
-    5. Copy the cropped area to padding image.
-    6. Refine annotations.
-
-    Test pipeline:
-
-    1. Compute output shape according to ``test_pad_mode``.
-    2. Generate padding image with center matches the original image
-       center.
-    3. Initialize the padding image with pixel value equals to ``mean``.
-    4. Copy the ``cropped area`` to padding image.
-
-    Args:
-        crop_size (tuple | None): expected size after crop, final size will
-            computed according to ratio. Requires (h, w) in train mode, and
-            None in test mode.
-        ratios (tuple): random select a ratio from tuple and crop image to
-            (crop_size[0] * ratio) * (crop_size[1] * ratio).
-            Only available in train mode.
-        border (int): max distance from center select area to image border.
-            Only available in train mode.
-        mean (sequence): Mean values of 3 channels.
-        std (sequence): Std values of 3 channels.
-        to_rgb (bool): Whether to convert the image from BGR to RGB.
-        test_mode (bool): whether involve random variables in transform.
-            In train mode, crop_size is fixed, center coords and ratio is
-            random selected from predefined lists. In test mode, crop_size
-            is image's original shape, center coords and ratio is fixed.
-        test_pad_mode (tuple): padding method and padding shape value, only
-            available in test mode. Default is using 'logical_or' with
-            127 as padding shape value.
-
-            - 'logical_or': final_shape = input_shape | padding_shape_value
-            - 'size_divisor': final_shape = int(
-              ceil(input_shape / padding_shape_value) * padding_shape_value)
-        bbox_clip_border (bool, optional): Whether clip the objects outside
-            the border of the image. Defaults to True.
-    """
-
-    def __init__(self,
-                 crop_size=None,
-                 ratios=(0.9, 1.0, 1.1),
-                 border=128,
-                 mean=None,
-                 std=None,
-                 to_rgb=None,
-                 test_mode=False,
-                 test_pad_mode=('logical_or', 127),
-                 bbox_clip_border=True):
-        if test_mode:
-            assert crop_size is None, 'crop_size must be None in test mode'
-            assert ratios is None, 'ratios must be None in test mode'
-            assert border is None, 'border must be None in test mode'
-            assert isinstance(test_pad_mode, (list, tuple))
-            assert test_pad_mode[0] in ['logical_or', 'size_divisor']
-        else:
-            assert isinstance(crop_size, (list, tuple))
-            assert crop_size[0] > 0 and crop_size[1] > 0, (
-                'crop_size must > 0 in train mode')
-            assert isinstance(ratios, (list, tuple))
-            assert test_pad_mode is None, (
-                'test_pad_mode must be None in train mode')
-
-        self.crop_size = crop_size
-        self.ratios = ratios
-        self.border = border
-        # We do not set default value to mean, std and to_rgb because these
-        # hyper-parameters are easy to forget but could affect the performance.
-        # Please use the same setting as Normalize for performance assurance.
-        assert mean is not None and std is not None and to_rgb is not None
-        self.to_rgb = to_rgb
-        self.input_mean = mean
-        self.input_std = std
-        if to_rgb:
-            self.mean = mean[::-1]
-            self.std = std[::-1]
-        else:
-            self.mean = mean
-            self.std = std
-        self.test_mode = test_mode
-        self.test_pad_mode = test_pad_mode
-        self.bbox_clip_border = bbox_clip_border
-
-    def _get_border(self, border, size):
-        """Get final border for the target size.
-
-        This function generates a ``final_border`` according to image's shape.
-        The area between ``final_border`` and ``size - final_border`` is the
-        ``center range``. We randomly choose center from the ``center range``
-        to avoid our random center is too close to original image's border.
-        Also ``center range`` should be larger than 0.
-
-        Args:
-            border (int): The initial border, default is 128.
-            size (int): The width or height of original image.
-        Returns:
-            int: The final border.
-        """
-        k = 2 * border / size
-        i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))
-        return border // i
-
-    def _filter_boxes(self, patch, boxes):
-        """Check whether the center of each box is in the patch.
-
-        Args:
-            patch (list[int]): The cropped area, [left, top, right, bottom].
-            boxes (numpy array, (N x 4)): Ground truth boxes.
-
-        Returns:
-            mask (numpy array, (N,)): Each box is inside or outside the patch.
-        """
-        center = (boxes[:, :2] + boxes[:, 2:]) / 2
-        mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (
-            center[:, 0] < patch[2]) * (
-                center[:, 1] < patch[3])
-        return mask
-
-    def _crop_image_and_paste(self, image, center, size):
-        """Crop image with a given center and size, then paste the cropped
-        image to a blank image with two centers align.
-
-        This function is equivalent to generating a blank image with ``size``
-        as its shape. Then cover it on the original image with two centers (
-        the center of blank image and the random center of original image)
-        aligned. The overlap area is paste from the original image and the
-        outside area is filled with ``mean pixel``.
-
-        Args:
-            image (np array, H x W x C): Original image.
-            center (list[int]): Target crop center coord.
-            size (list[int]): Target crop size. [target_h, target_w]
-
-        Returns:
-            cropped_img (np array, target_h x target_w x C): Cropped image.
-            border (np array, 4): The distance of four border of
-                ``cropped_img`` to the original image area, [top, bottom,
-                left, right]
-            patch (list[int]): The cropped area, [left, top, right, bottom].
-        """
-        center_y, center_x = center
-        target_h, target_w = size
-        img_h, img_w, img_c = image.shape
-
-        x0 = max(0, center_x - target_w // 2)
-        x1 = min(center_x + target_w // 2, img_w)
-        y0 = max(0, center_y - target_h // 2)
-        y1 = min(center_y + target_h // 2, img_h)
-        patch = np.array((int(x0), int(y0), int(x1), int(y1)))
-
-        left, right = center_x - x0, x1 - center_x
-        top, bottom = center_y - y0, y1 - center_y
-
-        cropped_center_y, cropped_center_x = target_h // 2, target_w // 2
-        cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)
-        for i in range(img_c):
-            cropped_img[:, :, i] += self.mean[i]
-        y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)
-        x_slice = slice(cropped_center_x - left, cropped_center_x + right)
-        cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
-
-        border = np.array([
-            cropped_center_y - top, cropped_center_y + bottom,
-            cropped_center_x - left, cropped_center_x + right
-        ],
-                          dtype=np.float32)
-
-        return cropped_img, border, patch
-
-    def _train_aug(self, results):
-        """Random crop and around padding the original image.
-
-        Args:
-            results (dict): Image infomations in the augment pipeline.
-
-        Returns:
-            results (dict): The updated dict.
-        """
-        img = results['img']
-        h, w, c = img.shape
-        boxes = results['gt_bboxes']
-        while True:
-            scale = random.choice(self.ratios)
-            new_h = int(self.crop_size[0] * scale)
-            new_w = int(self.crop_size[1] * scale)
-            h_border = self._get_border(self.border, h)
-            w_border = self._get_border(self.border, w)
-
-            for i in range(50):
-                center_x = random.randint(low=w_border, high=w - w_border)
-                center_y = random.randint(low=h_border, high=h - h_border)
-
-                cropped_img, border, patch = self._crop_image_and_paste(
-                    img, [center_y, center_x], [new_h, new_w])
-
-                mask = self._filter_boxes(patch, boxes)
-                # if image do not have valid bbox, any crop patch is valid.
-                if not mask.any() and len(boxes) > 0:
-                    continue
-
-                results['img'] = cropped_img
-                results['img_shape'] = cropped_img.shape
-                results['pad_shape'] = cropped_img.shape
-
-                x0, y0, x1, y1 = patch
-
-                left_w, top_h = center_x - x0, center_y - y0
-                cropped_center_x, cropped_center_y = new_w // 2, new_h // 2
-
-                # crop bboxes accordingly and clip to the image boundary
-                for key in results.get('bbox_fields', []):
-                    mask = self._filter_boxes(patch, results[key])
-                    bboxes = results[key][mask]
-                    bboxes[:, 0:4:2] += cropped_center_x - left_w - x0
-                    bboxes[:, 1:4:2] += cropped_center_y - top_h - y0
-                    if self.bbox_clip_border:
-                        bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)
-                        bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)
-                    keep = (bboxes[:, 2] > bboxes[:, 0]) & (
-                        bboxes[:, 3] > bboxes[:, 1])
-                    bboxes = bboxes[keep]
-                    results[key] = bboxes
-                    if key in ['gt_bboxes']:
-                        if 'gt_labels' in results:
-                            labels = results['gt_labels'][mask]
-                            labels = labels[keep]
-                            results['gt_labels'] = labels
-                        if 'gt_masks' in results:
-                            raise NotImplementedError(
-                                'RandomCenterCropPad only supports bbox.')
-
-                # crop semantic seg
-                for key in results.get('seg_fields', []):
-                    raise NotImplementedError(
-                        'RandomCenterCropPad only supports bbox.')
-                return results
-
-    def _test_aug(self, results):
-        """Around padding the original image without cropping.
-
-        The padding mode and value are from ``test_pad_mode``.
-
-        Args:
-            results (dict): Image infomations in the augment pipeline.
-
-        Returns:
-            results (dict): The updated dict.
-        """
-        img = results['img']
-        h, w, c = img.shape
-        results['img_shape'] = img.shape
-        if self.test_pad_mode[0] in ['logical_or']:
-            target_h = h | self.test_pad_mode[1]
-            target_w = w | self.test_pad_mode[1]
-        elif self.test_pad_mode[0] in ['size_divisor']:
-            divisor = self.test_pad_mode[1]
-            target_h = int(np.ceil(h / divisor)) * divisor
-            target_w = int(np.ceil(w / divisor)) * divisor
-        else:
-            raise NotImplementedError(
-                'RandomCenterCropPad only support two testing pad mode:'
-                'logical-or and size_divisor.')
-
-        cropped_img, border, _ = self._crop_image_and_paste(
-            img, [h // 2, w // 2], [target_h, target_w])
-        results['img'] = cropped_img
-        results['pad_shape'] = cropped_img.shape
-        results['border'] = border
-        return results
-
-    def __call__(self, results):
-        img = results['img']
-        assert img.dtype == np.float32, (
-            'RandomCenterCropPad needs the input image of dtype np.float32,'
-            ' please set "to_float32=True" in "LoadImageFromFile" pipeline')
-        h, w, c = img.shape
-        assert c == len(self.mean)
-        if self.test_mode:
-            return self._test_aug(results)
-        else:
-            return self._train_aug(results)
-
-    def __repr__(self):
-        repr_str = self.__class__.__name__
-        repr_str += f'(crop_size={self.crop_size}, '
-        repr_str += f'ratios={self.ratios}, '
-        repr_str += f'border={self.border}, '
-        repr_str += f'mean={self.input_mean}, '
-        repr_str += f'std={self.input_std}, '
-        repr_str += f'to_rgb={self.to_rgb}, '
-        repr_str += f'test_mode={self.test_mode}, '
-        repr_str += f'test_pad_mode={self.test_pad_mode}, '
-        repr_str += f'bbox_clip_border={self.bbox_clip_border})'
-        return repr_str
-
-
-@PIPELINES.register_module()
-class CutOut(object):
-    """CutOut operation.
-
-    Randomly drop some regions of image used in
-    `Cutout <https://arxiv.org/abs/1708.04552>`_.
-
-    Args:
-        n_holes (int | tuple[int, int]): Number of regions to be dropped.
-            If it is given as a list, number of holes will be randomly
-            selected from the closed interval [`n_holes[0]`, `n_holes[1]`].
-        cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate
-            shape of dropped regions. It can be `tuple[int, int]` to use a
-            fixed cutout shape, or `list[tuple[int, int]]` to randomly choose
-            shape from the list.
-        cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The
-            candidate ratio of dropped regions. It can be `tuple[float, float]`
-            to use a fixed ratio or `list[tuple[float, float]]` to randomly
-            choose ratio from the list. Please note that `cutout_shape`
-            and `cutout_ratio` cannot be both given at the same time.
-        fill_in (tuple[float, float, float] | tuple[int, int, int]): The value
-            of pixel to fill in the dropped regions. Default: (0, 0, 0).
-    """
-
-    def __init__(self,
-                 n_holes,
-                 cutout_shape=None,
-                 cutout_ratio=None,
-                 fill_in=(0, 0, 0)):
-
-        assert (cutout_shape is None) ^ (cutout_ratio is None), \
-            'Either cutout_shape or cutout_ratio should be specified.'
-        assert (isinstance(cutout_shape, (list, tuple))
-                or isinstance(cutout_ratio, (list, tuple)))
-        if isinstance(n_holes, tuple):
-            assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]
-        else:
-            n_holes = (n_holes, n_holes)
-        self.n_holes = n_holes
-        self.fill_in = fill_in
-        self.with_ratio = cutout_ratio is not None
-        self.candidates = cutout_ratio if self.with_ratio else cutout_shape
-        if not isinstance(self.candidates, list):
-            self.candidates = [self.candidates]
-
-    def __call__(self, results):
-        """Call function to drop some regions of image."""
-        h, w, c = results['img'].shape
-        n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)
-        for _ in range(n_holes):
-            x1 = np.random.randint(0, w)
-            y1 = np.random.randint(0, h)
-            index = np.random.randint(0, len(self.candidates))
-            if not self.with_ratio:
-                cutout_w, cutout_h = self.candidates[index]
-            else:
-                cutout_w = int(self.candidates[index][0] * w)
-                cutout_h = int(self.candidates[index][1] * h)
-
-            x2 = np.clip(x1 + cutout_w, 0, w)
-            y2 = np.clip(y1 + cutout_h, 0, h)
-            results['img'][y1:y2, x1:x2, :] = self.fill_in
-
-        return results
-
-    def __repr__(self):
-        repr_str = self.__class__.__name__
-        repr_str += f'(n_holes={self.n_holes}, '
-        repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio
-                     else f'cutout_shape={self.candidates}, ')
-        repr_str += f'fill_in={self.fill_in})'
-        return repr_str
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/cnn/utils/flops_counter.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/cnn/utils/flops_counter.py
deleted file mode 100644
index d10af5feca7f4b8c0ba359b7b1c826f754e048be..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/cnn/utils/flops_counter.py
+++ /dev/null
@@ -1,599 +0,0 @@
-# Modified from flops-counter.pytorch by Vladislav Sovrasov
-# original repo: https://github.com/sovrasov/flops-counter.pytorch
-
-# MIT License
-
-# Copyright (c) 2018 Vladislav Sovrasov
-
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-import sys
-from functools import partial
-
-import numpy as np
-import torch
-import torch.nn as nn
-
-import annotator.uniformer.mmcv as mmcv
-
-
-def get_model_complexity_info(model,
-                              input_shape,
-                              print_per_layer_stat=True,
-                              as_strings=True,
-                              input_constructor=None,
-                              flush=False,
-                              ost=sys.stdout):
-    """Get complexity information of a model.
-
-    This method can calculate FLOPs and parameter counts of a model with
-    corresponding input shape. It can also print complexity information for
-    each layer in a model.
-
-    Supported layers are listed as below:
-        - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.
-        - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``,
-            ``nn.ReLU6``.
-        - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,
-            ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
-            ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
-            ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
-            ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
-        - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,
-            ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,
-            ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.
-        - Linear: ``nn.Linear``.
-        - Deconvolution: ``nn.ConvTranspose2d``.
-        - Upsample: ``nn.Upsample``.
-
-    Args:
-        model (nn.Module): The model for complexity calculation.
-        input_shape (tuple): Input shape used for calculation.
-        print_per_layer_stat (bool): Whether to print complexity information
-            for each layer in a model. Default: True.
-        as_strings (bool): Output FLOPs and params counts in a string form.
-            Default: True.
-        input_constructor (None | callable): If specified, it takes a callable
-            method that generates input. otherwise, it will generate a random
-            tensor with input shape to calculate FLOPs. Default: None.
-        flush (bool): same as that in :func:`print`. Default: False.
-        ost (stream): same as ``file`` param in :func:`print`.
-            Default: sys.stdout.
-
-    Returns:
-        tuple[float | str]: If ``as_strings`` is set to True, it will return
-            FLOPs and parameter counts in a string format. otherwise, it will
-            return those in a float number format.
-    """
-    assert type(input_shape) is tuple
-    assert len(input_shape) >= 1
-    assert isinstance(model, nn.Module)
-    flops_model = add_flops_counting_methods(model)
-    flops_model.eval()
-    flops_model.start_flops_count()
-    if input_constructor:
-        input = input_constructor(input_shape)
-        _ = flops_model(**input)
-    else:
-        try:
-            batch = torch.ones(()).new_empty(
-                (1, *input_shape),
-                dtype=next(flops_model.parameters()).dtype,
-                device=next(flops_model.parameters()).device)
-        except StopIteration:
-            # Avoid StopIteration for models which have no parameters,
-            # like `nn.Relu()`, `nn.AvgPool2d`, etc.
-            batch = torch.ones(()).new_empty((1, *input_shape))
-
-        _ = flops_model(batch)
-
-    flops_count, params_count = flops_model.compute_average_flops_cost()
-    if print_per_layer_stat:
-        print_model_with_flops(
-            flops_model, flops_count, params_count, ost=ost, flush=flush)
-    flops_model.stop_flops_count()
-
-    if as_strings:
-        return flops_to_string(flops_count), params_to_string(params_count)
-
-    return flops_count, params_count
-
-
-def flops_to_string(flops, units='GFLOPs', precision=2):
-    """Convert FLOPs number into a string.
-
-    Note that Here we take a multiply-add counts as one FLOP.
-
-    Args:
-        flops (float): FLOPs number to be converted.
-        units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',
-            'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically
-            choose the most suitable unit for FLOPs. Default: 'GFLOPs'.
-        precision (int): Digit number after the decimal point. Default: 2.
-
-    Returns:
-        str: The converted FLOPs number with units.
-
-    Examples:
-        >>> flops_to_string(1e9)
-        '1.0 GFLOPs'
-        >>> flops_to_string(2e5, 'MFLOPs')
-        '0.2 MFLOPs'
-        >>> flops_to_string(3e-9, None)
-        '3e-09 FLOPs'
-    """
-    if units is None:
-        if flops // 10**9 > 0:
-            return str(round(flops / 10.**9, precision)) + ' GFLOPs'
-        elif flops // 10**6 > 0:
-            return str(round(flops / 10.**6, precision)) + ' MFLOPs'
-        elif flops // 10**3 > 0:
-            return str(round(flops / 10.**3, precision)) + ' KFLOPs'
-        else:
-            return str(flops) + ' FLOPs'
-    else:
-        if units == 'GFLOPs':
-            return str(round(flops / 10.**9, precision)) + ' ' + units
-        elif units == 'MFLOPs':
-            return str(round(flops / 10.**6, precision)) + ' ' + units
-        elif units == 'KFLOPs':
-            return str(round(flops / 10.**3, precision)) + ' ' + units
-        else:
-            return str(flops) + ' FLOPs'
-
-
-def params_to_string(num_params, units=None, precision=2):
-    """Convert parameter number into a string.
-
-    Args:
-        num_params (float): Parameter number to be converted.
-        units (str | None): Converted FLOPs units. Options are None, 'M',
-            'K' and ''. If set to None, it will automatically choose the most
-            suitable unit for Parameter number. Default: None.
-        precision (int): Digit number after the decimal point. Default: 2.
-
-    Returns:
-        str: The converted parameter number with units.
-
-    Examples:
-        >>> params_to_string(1e9)
-        '1000.0 M'
-        >>> params_to_string(2e5)
-        '200.0 k'
-        >>> params_to_string(3e-9)
-        '3e-09'
-    """
-    if units is None:
-        if num_params // 10**6 > 0:
-            return str(round(num_params / 10**6, precision)) + ' M'
-        elif num_params // 10**3:
-            return str(round(num_params / 10**3, precision)) + ' k'
-        else:
-            return str(num_params)
-    else:
-        if units == 'M':
-            return str(round(num_params / 10.**6, precision)) + ' ' + units
-        elif units == 'K':
-            return str(round(num_params / 10.**3, precision)) + ' ' + units
-        else:
-            return str(num_params)
-
-
-def print_model_with_flops(model,
-                           total_flops,
-                           total_params,
-                           units='GFLOPs',
-                           precision=3,
-                           ost=sys.stdout,
-                           flush=False):
-    """Print a model with FLOPs for each layer.
-
-    Args:
-        model (nn.Module): The model to be printed.
-        total_flops (float): Total FLOPs of the model.
-        total_params (float): Total parameter counts of the model.
-        units (str | None): Converted FLOPs units. Default: 'GFLOPs'.
-        precision (int): Digit number after the decimal point. Default: 3.
-        ost (stream): same as `file` param in :func:`print`.
-            Default: sys.stdout.
-        flush (bool): same as that in :func:`print`. Default: False.
-
-    Example:
-        >>> class ExampleModel(nn.Module):
-
-        >>> def __init__(self):
-        >>>     super().__init__()
-        >>>     self.conv1 = nn.Conv2d(3, 8, 3)
-        >>>     self.conv2 = nn.Conv2d(8, 256, 3)
-        >>>     self.conv3 = nn.Conv2d(256, 8, 3)
-        >>>     self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
-        >>>     self.flatten = nn.Flatten()
-        >>>     self.fc = nn.Linear(8, 1)
-
-        >>> def forward(self, x):
-        >>>     x = self.conv1(x)
-        >>>     x = self.conv2(x)
-        >>>     x = self.conv3(x)
-        >>>     x = self.avg_pool(x)
-        >>>     x = self.flatten(x)
-        >>>     x = self.fc(x)
-        >>>     return x
-
-        >>> model = ExampleModel()
-        >>> x = (3, 16, 16)
-        to print the complexity information state for each layer, you can use
-        >>> get_model_complexity_info(model, x)
-        or directly use
-        >>> print_model_with_flops(model, 4579784.0, 37361)
-        ExampleModel(
-          0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs,
-          (conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1))  # noqa: E501
-          (conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1))
-          (conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1))
-          (avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1))
-          (flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, )
-          (fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True)
-        )
-    """
-
-    def accumulate_params(self):
-        if is_supported_instance(self):
-            return self.__params__
-        else:
-            sum = 0
-            for m in self.children():
-                sum += m.accumulate_params()
-            return sum
-
-    def accumulate_flops(self):
-        if is_supported_instance(self):
-            return self.__flops__ / model.__batch_counter__
-        else:
-            sum = 0
-            for m in self.children():
-                sum += m.accumulate_flops()
-            return sum
-
-    def flops_repr(self):
-        accumulated_num_params = self.accumulate_params()
-        accumulated_flops_cost = self.accumulate_flops()
-        return ', '.join([
-            params_to_string(
-                accumulated_num_params, units='M', precision=precision),
-            '{:.3%} Params'.format(accumulated_num_params / total_params),
-            flops_to_string(
-                accumulated_flops_cost, units=units, precision=precision),
-            '{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops),
-            self.original_extra_repr()
-        ])
-
-    def add_extra_repr(m):
-        m.accumulate_flops = accumulate_flops.__get__(m)
-        m.accumulate_params = accumulate_params.__get__(m)
-        flops_extra_repr = flops_repr.__get__(m)
-        if m.extra_repr != flops_extra_repr:
-            m.original_extra_repr = m.extra_repr
-            m.extra_repr = flops_extra_repr
-            assert m.extra_repr != m.original_extra_repr
-
-    def del_extra_repr(m):
-        if hasattr(m, 'original_extra_repr'):
-            m.extra_repr = m.original_extra_repr
-            del m.original_extra_repr
-        if hasattr(m, 'accumulate_flops'):
-            del m.accumulate_flops
-
-    model.apply(add_extra_repr)
-    print(model, file=ost, flush=flush)
-    model.apply(del_extra_repr)
-
-
-def get_model_parameters_number(model):
-    """Calculate parameter number of a model.
-
-    Args:
-        model (nn.module): The model for parameter number calculation.
-
-    Returns:
-        float: Parameter number of the model.
-    """
-    num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
-    return num_params
-
-
-def add_flops_counting_methods(net_main_module):
-    # adding additional methods to the existing module object,
-    # this is done this way so that each function has access to self object
-    net_main_module.start_flops_count = start_flops_count.__get__(
-        net_main_module)
-    net_main_module.stop_flops_count = stop_flops_count.__get__(
-        net_main_module)
-    net_main_module.reset_flops_count = reset_flops_count.__get__(
-        net_main_module)
-    net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(  # noqa: E501
-        net_main_module)
-
-    net_main_module.reset_flops_count()
-
-    return net_main_module
-
-
-def compute_average_flops_cost(self):
-    """Compute average FLOPs cost.
-
-    A method to compute average FLOPs cost, which will be available after
-    `add_flops_counting_methods()` is called on a desired net object.
-
-    Returns:
-        float: Current mean flops consumption per image.
-    """
-    batches_count = self.__batch_counter__
-    flops_sum = 0
-    for module in self.modules():
-        if is_supported_instance(module):
-            flops_sum += module.__flops__
-    params_sum = get_model_parameters_number(self)
-    return flops_sum / batches_count, params_sum
-
-
-def start_flops_count(self):
-    """Activate the computation of mean flops consumption per image.
-
-    A method to activate the computation of mean flops consumption per image.
-    which will be available after ``add_flops_counting_methods()`` is called on
-    a desired net object. It should be called before running the network.
-    """
-    add_batch_counter_hook_function(self)
-
-    def add_flops_counter_hook_function(module):
-        if is_supported_instance(module):
-            if hasattr(module, '__flops_handle__'):
-                return
-
-            else:
-                handle = module.register_forward_hook(
-                    get_modules_mapping()[type(module)])
-
-            module.__flops_handle__ = handle
-
-    self.apply(partial(add_flops_counter_hook_function))
-
-
-def stop_flops_count(self):
-    """Stop computing the mean flops consumption per image.
-
-    A method to stop computing the mean flops consumption per image, which will
-    be available after ``add_flops_counting_methods()`` is called on a desired
-    net object. It can be called to pause the computation whenever.
-    """
-    remove_batch_counter_hook_function(self)
-    self.apply(remove_flops_counter_hook_function)
-
-
-def reset_flops_count(self):
-    """Reset statistics computed so far.
-
-    A method to Reset computed statistics, which will be available after
-    `add_flops_counting_methods()` is called on a desired net object.
-    """
-    add_batch_counter_variables_or_reset(self)
-    self.apply(add_flops_counter_variable_or_reset)
-
-
-# ---- Internal functions
-def empty_flops_counter_hook(module, input, output):
-    module.__flops__ += 0
-
-
-def upsample_flops_counter_hook(module, input, output):
-    output_size = output[0]
-    batch_size = output_size.shape[0]
-    output_elements_count = batch_size
-    for val in output_size.shape[1:]:
-        output_elements_count *= val
-    module.__flops__ += int(output_elements_count)
-
-
-def relu_flops_counter_hook(module, input, output):
-    active_elements_count = output.numel()
-    module.__flops__ += int(active_elements_count)
-
-
-def linear_flops_counter_hook(module, input, output):
-    input = input[0]
-    output_last_dim = output.shape[
-        -1]  # pytorch checks dimensions, so here we don't care much
-    module.__flops__ += int(np.prod(input.shape) * output_last_dim)
-
-
-def pool_flops_counter_hook(module, input, output):
-    input = input[0]
-    module.__flops__ += int(np.prod(input.shape))
-
-
-def norm_flops_counter_hook(module, input, output):
-    input = input[0]
-
-    batch_flops = np.prod(input.shape)
-    if (getattr(module, 'affine', False)
-            or getattr(module, 'elementwise_affine', False)):
-        batch_flops *= 2
-    module.__flops__ += int(batch_flops)
-
-
-def deconv_flops_counter_hook(conv_module, input, output):
-    # Can have multiple inputs, getting the first one
-    input = input[0]
-
-    batch_size = input.shape[0]
-    input_height, input_width = input.shape[2:]
-
-    kernel_height, kernel_width = conv_module.kernel_size
-    in_channels = conv_module.in_channels
-    out_channels = conv_module.out_channels
-    groups = conv_module.groups
-
-    filters_per_channel = out_channels // groups
-    conv_per_position_flops = (
-        kernel_height * kernel_width * in_channels * filters_per_channel)
-
-    active_elements_count = batch_size * input_height * input_width
-    overall_conv_flops = conv_per_position_flops * active_elements_count
-    bias_flops = 0
-    if conv_module.bias is not None:
-        output_height, output_width = output.shape[2:]
-        bias_flops = out_channels * batch_size * output_height * output_height
-    overall_flops = overall_conv_flops + bias_flops
-
-    conv_module.__flops__ += int(overall_flops)
-
-
-def conv_flops_counter_hook(conv_module, input, output):
-    # Can have multiple inputs, getting the first one
-    input = input[0]
-
-    batch_size = input.shape[0]
-    output_dims = list(output.shape[2:])
-
-    kernel_dims = list(conv_module.kernel_size)
-    in_channels = conv_module.in_channels
-    out_channels = conv_module.out_channels
-    groups = conv_module.groups
-
-    filters_per_channel = out_channels // groups
-    conv_per_position_flops = int(
-        np.prod(kernel_dims)) * in_channels * filters_per_channel
-
-    active_elements_count = batch_size * int(np.prod(output_dims))
-
-    overall_conv_flops = conv_per_position_flops * active_elements_count
-
-    bias_flops = 0
-
-    if conv_module.bias is not None:
-
-        bias_flops = out_channels * active_elements_count
-
-    overall_flops = overall_conv_flops + bias_flops
-
-    conv_module.__flops__ += int(overall_flops)
-
-
-def batch_counter_hook(module, input, output):
-    batch_size = 1
-    if len(input) > 0:
-        # Can have multiple inputs, getting the first one
-        input = input[0]
-        batch_size = len(input)
-    else:
-        pass
-        print('Warning! No positional inputs found for a module, '
-              'assuming batch size is 1.')
-    module.__batch_counter__ += batch_size
-
-
-def add_batch_counter_variables_or_reset(module):
-
-    module.__batch_counter__ = 0
-
-
-def add_batch_counter_hook_function(module):
-    if hasattr(module, '__batch_counter_handle__'):
-        return
-
-    handle = module.register_forward_hook(batch_counter_hook)
-    module.__batch_counter_handle__ = handle
-
-
-def remove_batch_counter_hook_function(module):
-    if hasattr(module, '__batch_counter_handle__'):
-        module.__batch_counter_handle__.remove()
-        del module.__batch_counter_handle__
-
-
-def add_flops_counter_variable_or_reset(module):
-    if is_supported_instance(module):
-        if hasattr(module, '__flops__') or hasattr(module, '__params__'):
-            print('Warning: variables __flops__ or __params__ are already '
-                  'defined for the module' + type(module).__name__ +
-                  ' ptflops can affect your code!')
-        module.__flops__ = 0
-        module.__params__ = get_model_parameters_number(module)
-
-
-def is_supported_instance(module):
-    if type(module) in get_modules_mapping():
-        return True
-    return False
-
-
-def remove_flops_counter_hook_function(module):
-    if is_supported_instance(module):
-        if hasattr(module, '__flops_handle__'):
-            module.__flops_handle__.remove()
-            del module.__flops_handle__
-
-
-def get_modules_mapping():
-    return {
-        # convolutions
-        nn.Conv1d: conv_flops_counter_hook,
-        nn.Conv2d: conv_flops_counter_hook,
-        mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook,
-        nn.Conv3d: conv_flops_counter_hook,
-        mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook,
-        # activations
-        nn.ReLU: relu_flops_counter_hook,
-        nn.PReLU: relu_flops_counter_hook,
-        nn.ELU: relu_flops_counter_hook,
-        nn.LeakyReLU: relu_flops_counter_hook,
-        nn.ReLU6: relu_flops_counter_hook,
-        # poolings
-        nn.MaxPool1d: pool_flops_counter_hook,
-        nn.AvgPool1d: pool_flops_counter_hook,
-        nn.AvgPool2d: pool_flops_counter_hook,
-        nn.MaxPool2d: pool_flops_counter_hook,
-        mmcv.cnn.bricks.MaxPool2d: pool_flops_counter_hook,
-        nn.MaxPool3d: pool_flops_counter_hook,
-        mmcv.cnn.bricks.MaxPool3d: pool_flops_counter_hook,
-        nn.AvgPool3d: pool_flops_counter_hook,
-        nn.AdaptiveMaxPool1d: pool_flops_counter_hook,
-        nn.AdaptiveAvgPool1d: pool_flops_counter_hook,
-        nn.AdaptiveMaxPool2d: pool_flops_counter_hook,
-        nn.AdaptiveAvgPool2d: pool_flops_counter_hook,
-        nn.AdaptiveMaxPool3d: pool_flops_counter_hook,
-        nn.AdaptiveAvgPool3d: pool_flops_counter_hook,
-        # normalizations
-        nn.BatchNorm1d: norm_flops_counter_hook,
-        nn.BatchNorm2d: norm_flops_counter_hook,
-        nn.BatchNorm3d: norm_flops_counter_hook,
-        nn.GroupNorm: norm_flops_counter_hook,
-        nn.InstanceNorm1d: norm_flops_counter_hook,
-        nn.InstanceNorm2d: norm_flops_counter_hook,
-        nn.InstanceNorm3d: norm_flops_counter_hook,
-        nn.LayerNorm: norm_flops_counter_hook,
-        # FC
-        nn.Linear: linear_flops_counter_hook,
-        mmcv.cnn.bricks.Linear: linear_flops_counter_hook,
-        # Upscale
-        nn.Upsample: upsample_flops_counter_hook,
-        # Deconvolution
-        nn.ConvTranspose2d: deconv_flops_counter_hook,
-        mmcv.cnn.bricks.ConvTranspose2d: deconv_flops_counter_hook,
-    }
diff --git a/spaces/abidlabs/image-classifier/app.py b/spaces/abidlabs/image-classifier/app.py
deleted file mode 100644
index b0b7f8262adc35cc8ac59ea680a9fdbf3cf2dc68..0000000000000000000000000000000000000000
--- a/spaces/abidlabs/image-classifier/app.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from transformers import ViTFeatureExtractor, ViTForImageClassification
-from PIL import Image
-import torch
-import torch.nn.functional as F
-import time
-
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
-
-feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224')
-model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(device)
-
-def predict(image):
-    inputs = feature_extractor(images=image, return_tensors="pt").to(device)
-    outputs = model(**inputs)
-    logits = outputs.logits
-    predicted_class_prob = F.softmax(logits, dim=-1).detach().cpu().numpy().max()
-    predicted_class_idx = logits.argmax(-1).item()
-    label = model.config.id2label[predicted_class_idx].split(",")[0]
-    time.sleep(2)
-    return {label: float(predicted_class_prob)}
- 
-import gradio as gr
-
-gr.Interface(predict, gr.Image(type="pil"), "label").queue(concurrency_count=1).launch()
\ No newline at end of file
diff --git a/spaces/adirik/stylemc-demo/encoder4editing/criteria/moco_loss.py b/spaces/adirik/stylemc-demo/encoder4editing/criteria/moco_loss.py
deleted file mode 100644
index 8fb13fbd426202cff9014c876c85b0d5c4ec6a9d..0000000000000000000000000000000000000000
--- a/spaces/adirik/stylemc-demo/encoder4editing/criteria/moco_loss.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import torch
-from torch import nn
-import torch.nn.functional as F
-
-from configs.paths_config import model_paths
-
-
-class MocoLoss(nn.Module):
-
-    def __init__(self, opts):
-        super(MocoLoss, self).__init__()
-        print("Loading MOCO model from path: {}".format(model_paths["moco"]))
-        self.model = self.__load_model()
-        self.model.eval()
-        for param in self.model.parameters():
-            param.requires_grad = False
-
-    @staticmethod
-    def __load_model():
-        import torchvision.models as models
-        model = models.__dict__["resnet50"]()
-        # freeze all layers but the last fc
-        for name, param in model.named_parameters():
-            if name not in ['fc.weight', 'fc.bias']:
-                param.requires_grad = False
-        checkpoint = torch.load(model_paths['moco'], map_location="cpu")
-        state_dict = checkpoint['state_dict']
-        # rename moco pre-trained keys
-        for k in list(state_dict.keys()):
-            # retain only encoder_q up to before the embedding layer
-            if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):
-                # remove prefix
-                state_dict[k[len("module.encoder_q."):]] = state_dict[k]
-            # delete renamed or unused k
-            del state_dict[k]
-        msg = model.load_state_dict(state_dict, strict=False)
-        assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
-        # remove output layer
-        model = nn.Sequential(*list(model.children())[:-1]).cuda()
-        return model
-
-    def extract_feats(self, x):
-        x = F.interpolate(x, size=224)
-        x_feats = self.model(x)
-        x_feats = nn.functional.normalize(x_feats, dim=1)
-        x_feats = x_feats.squeeze()
-        return x_feats
-
-    def forward(self, y_hat, y, x):
-        n_samples = x.shape[0]
-        x_feats = self.extract_feats(x)
-        y_feats = self.extract_feats(y)
-        y_hat_feats = self.extract_feats(y_hat)
-        y_feats = y_feats.detach()
-        loss = 0
-        sim_improvement = 0
-        sim_logs = []
-        count = 0
-        for i in range(n_samples):
-            diff_target = y_hat_feats[i].dot(y_feats[i])
-            diff_input = y_hat_feats[i].dot(x_feats[i])
-            diff_views = y_feats[i].dot(x_feats[i])
-            sim_logs.append({'diff_target': float(diff_target),
-                             'diff_input': float(diff_input),
-                             'diff_views': float(diff_views)})
-            loss += 1 - diff_target
-            sim_diff = float(diff_target) - float(diff_views)
-            sim_improvement += sim_diff
-            count += 1
-
-        return loss / count, sim_improvement / count, sim_logs
diff --git a/spaces/akhaliq/BlendGAN/op/upfirdn2d.py b/spaces/akhaliq/BlendGAN/op/upfirdn2d.py
deleted file mode 100644
index 464005c7aa9fd6a45550ac231a150acb3b30163e..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/BlendGAN/op/upfirdn2d.py
+++ /dev/null
@@ -1,207 +0,0 @@
-import os
-
-import torch
-from torch.nn import functional as F
-from torch.autograd import Function
-from torch.utils.cpp_extension import load
-
-
-module_path = os.path.dirname(__file__)
-
-cuda_available = torch.cuda.is_available()
-
-if cuda_available:
-    upfirdn2d_op = load(
-        "upfirdn2d",
-        sources=[
-            os.path.join(module_path, "upfirdn2d.cpp"),
-            os.path.join(module_path, "upfirdn2d_kernel.cu"),
-        ],
-    )
-else:
-    fused = None
-    print("upfirdn2d.py is running on cpu")
-
-
-class UpFirDn2dBackward(Function):
-    @staticmethod
-    def forward(
-        ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
-    ):
-
-        up_x, up_y = up
-        down_x, down_y = down
-        g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
-
-        grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
-
-        grad_input = upfirdn2d_op.upfirdn2d(
-            grad_output,
-            grad_kernel,
-            down_x,
-            down_y,
-            up_x,
-            up_y,
-            g_pad_x0,
-            g_pad_x1,
-            g_pad_y0,
-            g_pad_y1,
-        )
-        grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
-
-        ctx.save_for_backward(kernel)
-
-        pad_x0, pad_x1, pad_y0, pad_y1 = pad
-
-        ctx.up_x = up_x
-        ctx.up_y = up_y
-        ctx.down_x = down_x
-        ctx.down_y = down_y
-        ctx.pad_x0 = pad_x0
-        ctx.pad_x1 = pad_x1
-        ctx.pad_y0 = pad_y0
-        ctx.pad_y1 = pad_y1
-        ctx.in_size = in_size
-        ctx.out_size = out_size
-
-        return grad_input
-
-    @staticmethod
-    def backward(ctx, gradgrad_input):
-        kernel, = ctx.saved_tensors
-
-        gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
-
-        gradgrad_out = upfirdn2d_op.upfirdn2d(
-            gradgrad_input,
-            kernel,
-            ctx.up_x,
-            ctx.up_y,
-            ctx.down_x,
-            ctx.down_y,
-            ctx.pad_x0,
-            ctx.pad_x1,
-            ctx.pad_y0,
-            ctx.pad_y1,
-        )
-        # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
-        gradgrad_out = gradgrad_out.view(
-            ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
-        )
-
-        return gradgrad_out, None, None, None, None, None, None, None, None
-
-
-class UpFirDn2d(Function):
-    @staticmethod
-    def forward(ctx, input, kernel, up, down, pad):
-        up_x, up_y = up
-        down_x, down_y = down
-        pad_x0, pad_x1, pad_y0, pad_y1 = pad
-
-        kernel_h, kernel_w = kernel.shape
-        batch, channel, in_h, in_w = input.shape
-        ctx.in_size = input.shape
-
-        input = input.reshape(-1, in_h, in_w, 1)
-
-        ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
-
-        out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
-        out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
-        ctx.out_size = (out_h, out_w)
-
-        ctx.up = (up_x, up_y)
-        ctx.down = (down_x, down_y)
-        ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
-
-        g_pad_x0 = kernel_w - pad_x0 - 1
-        g_pad_y0 = kernel_h - pad_y0 - 1
-        g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
-        g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
-
-        ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
-
-        out = upfirdn2d_op.upfirdn2d(
-            input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
-        )
-        # out = out.view(major, out_h, out_w, minor)
-        out = out.view(-1, channel, out_h, out_w)
-
-        return out
-
-    @staticmethod
-    def backward(ctx, grad_output):
-        kernel, grad_kernel = ctx.saved_tensors
-
-        grad_input = UpFirDn2dBackward.apply(
-            grad_output,
-            kernel,
-            grad_kernel,
-            ctx.up,
-            ctx.down,
-            ctx.pad,
-            ctx.g_pad,
-            ctx.in_size,
-            ctx.out_size,
-        )
-
-        return grad_input, None, None, None, None
-
-
-def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
-    if input.device.type == "cpu":
-        out = upfirdn2d_native(
-            input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]
-        )
-
-    else:
-        out = UpFirDn2d.apply(
-            input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
-        )
-
-    return out
-
-
-def upfirdn2d_native(
-    input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
-):
-    _, channel, in_h, in_w = input.shape
-    input = input.reshape(-1, in_h, in_w, 1)
-
-    _, in_h, in_w, minor = input.shape
-    kernel_h, kernel_w = kernel.shape
-
-    out = input.view(-1, in_h, 1, in_w, 1, minor)
-    out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
-    out = out.view(-1, in_h * up_y, in_w * up_x, minor)
-
-    out = F.pad(
-        out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
-    )
-    out = out[
-        :,
-        max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
-        max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
-        :,
-    ]
-
-    out = out.permute(0, 3, 1, 2)
-    out = out.reshape(
-        [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
-    )
-    w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
-    out = F.conv2d(out, w)
-    out = out.reshape(
-        -1,
-        minor,
-        in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
-        in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
-    )
-    out = out.permute(0, 2, 3, 1)
-    out = out[:, ::down_y, ::down_x, :]
-
-    out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
-    out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
-
-    return out.view(-1, channel, out_h, out_w)
diff --git a/spaces/akhaliq/GPEN/face_detect/layers/functions/prior_box.py b/spaces/akhaliq/GPEN/face_detect/layers/functions/prior_box.py
deleted file mode 100644
index 80c7f858371ed71f39ed609eb44b423d8693bf61..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/GPEN/face_detect/layers/functions/prior_box.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import torch
-from itertools import product as product
-import numpy as np
-from math import ceil
-
-
-class PriorBox(object):
-    def __init__(self, cfg, image_size=None, phase='train'):
-        super(PriorBox, self).__init__()
-        self.min_sizes = cfg['min_sizes']
-        self.steps = cfg['steps']
-        self.clip = cfg['clip']
-        self.image_size = image_size
-        self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps]
-        self.name = "s"
-
-    def forward(self):
-        anchors = []
-        for k, f in enumerate(self.feature_maps):
-            min_sizes = self.min_sizes[k]
-            for i, j in product(range(f[0]), range(f[1])):
-                for min_size in min_sizes:
-                    s_kx = min_size / self.image_size[1]
-                    s_ky = min_size / self.image_size[0]
-                    dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
-                    dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
-                    for cy, cx in product(dense_cy, dense_cx):
-                        anchors += [cx, cy, s_kx, s_ky]
-
-        # back to torch land
-        output = torch.Tensor(anchors).view(-1, 4)
-        if self.clip:
-            output.clamp_(max=1, min=0)
-        return output
diff --git a/spaces/akhaliq/Music_Source_Separation/bytesep/models/resunet_subbandtime.py b/spaces/akhaliq/Music_Source_Separation/bytesep/models/resunet_subbandtime.py
deleted file mode 100644
index d5ac3c5cd5aa2e3d49b7513b2e577ba148c80d7e..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/Music_Source_Separation/bytesep/models/resunet_subbandtime.py
+++ /dev/null
@@ -1,545 +0,0 @@
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torchlibrosa.stft import ISTFT, STFT, magphase
-
-from bytesep.models.pytorch_modules import Base, init_bn, init_layer
-from bytesep.models.subband_tools.pqmf import PQMF
-
-
-class ConvBlockRes(nn.Module):
-    def __init__(self, in_channels, out_channels, kernel_size, activation, momentum):
-        r"""Residual block."""
-        super(ConvBlockRes, self).__init__()
-
-        self.activation = activation
-        padding = [kernel_size[0] // 2, kernel_size[1] // 2]
-
-        self.bn1 = nn.BatchNorm2d(in_channels, momentum=momentum)
-        self.bn2 = nn.BatchNorm2d(out_channels, momentum=momentum)
-
-        self.conv1 = nn.Conv2d(
-            in_channels=in_channels,
-            out_channels=out_channels,
-            kernel_size=kernel_size,
-            stride=(1, 1),
-            dilation=(1, 1),
-            padding=padding,
-            bias=False,
-        )
-
-        self.conv2 = nn.Conv2d(
-            in_channels=out_channels,
-            out_channels=out_channels,
-            kernel_size=kernel_size,
-            stride=(1, 1),
-            dilation=(1, 1),
-            padding=padding,
-            bias=False,
-        )
-
-        if in_channels != out_channels:
-            self.shortcut = nn.Conv2d(
-                in_channels=in_channels,
-                out_channels=out_channels,
-                kernel_size=(1, 1),
-                stride=(1, 1),
-                padding=(0, 0),
-            )
-            self.is_shortcut = True
-        else:
-            self.is_shortcut = False
-
-        self.init_weights()
-
-    def init_weights(self):
-        init_bn(self.bn1)
-        init_bn(self.bn2)
-        init_layer(self.conv1)
-        init_layer(self.conv2)
-
-        if self.is_shortcut:
-            init_layer(self.shortcut)
-
-    def forward(self, x):
-        origin = x
-        x = self.conv1(F.leaky_relu_(self.bn1(x), negative_slope=0.01))
-        x = self.conv2(F.leaky_relu_(self.bn2(x), negative_slope=0.01))
-
-        if self.is_shortcut:
-            return self.shortcut(origin) + x
-        else:
-            return origin + x
-
-
-class EncoderBlockRes4B(nn.Module):
-    def __init__(
-        self, in_channels, out_channels, kernel_size, downsample, activation, momentum
-    ):
-        r"""Encoder block, contains 8 convolutional layers."""
-        super(EncoderBlockRes4B, self).__init__()
-
-        self.conv_block1 = ConvBlockRes(
-            in_channels, out_channels, kernel_size, activation, momentum
-        )
-        self.conv_block2 = ConvBlockRes(
-            out_channels, out_channels, kernel_size, activation, momentum
-        )
-        self.conv_block3 = ConvBlockRes(
-            out_channels, out_channels, kernel_size, activation, momentum
-        )
-        self.conv_block4 = ConvBlockRes(
-            out_channels, out_channels, kernel_size, activation, momentum
-        )
-        self.downsample = downsample
-
-    def forward(self, x):
-        encoder = self.conv_block1(x)
-        encoder = self.conv_block2(encoder)
-        encoder = self.conv_block3(encoder)
-        encoder = self.conv_block4(encoder)
-        encoder_pool = F.avg_pool2d(encoder, kernel_size=self.downsample)
-        return encoder_pool, encoder
-
-
-class DecoderBlockRes4B(nn.Module):
-    def __init__(
-        self, in_channels, out_channels, kernel_size, upsample, activation, momentum
-    ):
-        r"""Decoder block, contains 1 transpose convolutional and 8 convolutional layers."""
-        super(DecoderBlockRes4B, self).__init__()
-        self.kernel_size = kernel_size
-        self.stride = upsample
-        self.activation = activation
-
-        self.conv1 = torch.nn.ConvTranspose2d(
-            in_channels=in_channels,
-            out_channels=out_channels,
-            kernel_size=self.stride,
-            stride=self.stride,
-            padding=(0, 0),
-            bias=False,
-            dilation=(1, 1),
-        )
-
-        self.bn1 = nn.BatchNorm2d(in_channels, momentum=momentum)
-        self.conv_block2 = ConvBlockRes(
-            out_channels * 2, out_channels, kernel_size, activation, momentum
-        )
-        self.conv_block3 = ConvBlockRes(
-            out_channels, out_channels, kernel_size, activation, momentum
-        )
-        self.conv_block4 = ConvBlockRes(
-            out_channels, out_channels, kernel_size, activation, momentum
-        )
-        self.conv_block5 = ConvBlockRes(
-            out_channels, out_channels, kernel_size, activation, momentum
-        )
-
-        self.init_weights()
-
-    def init_weights(self):
-        init_bn(self.bn1)
-        init_layer(self.conv1)
-
-    def forward(self, input_tensor, concat_tensor):
-        x = self.conv1(F.relu_(self.bn1(input_tensor)))
-        x = torch.cat((x, concat_tensor), dim=1)
-        x = self.conv_block2(x)
-        x = self.conv_block3(x)
-        x = self.conv_block4(x)
-        x = self.conv_block5(x)
-        return x
-
-
-class ResUNet143_Subbandtime(nn.Module, Base):
-    def __init__(self, input_channels, target_sources_num):
-        super(ResUNet143_Subbandtime, self).__init__()
-
-        self.input_channels = input_channels
-        self.target_sources_num = target_sources_num
-
-        window_size = 512
-        hop_size = 110
-        center = True
-        pad_mode = "reflect"
-        window = "hann"
-        activation = "leaky_relu"
-        momentum = 0.01
-
-        self.subbands_num = 4
-        self.K = 4  # outputs: |M|, cos∠M, sin∠M, Q
-
-        self.downsample_ratio = 2 ** 5  # This number equals 2^{#encoder_blcoks}
-
-        self.pqmf = PQMF(
-            N=self.subbands_num,
-            M=64,
-            project_root='bytesep/models/subband_tools/filters',
-        )
-
-        self.stft = STFT(
-            n_fft=window_size,
-            hop_length=hop_size,
-            win_length=window_size,
-            window=window,
-            center=center,
-            pad_mode=pad_mode,
-            freeze_parameters=True,
-        )
-
-        self.istft = ISTFT(
-            n_fft=window_size,
-            hop_length=hop_size,
-            win_length=window_size,
-            window=window,
-            center=center,
-            pad_mode=pad_mode,
-            freeze_parameters=True,
-        )
-
-        self.bn0 = nn.BatchNorm2d(window_size // 2 + 1, momentum=momentum)
-
-        self.encoder_block1 = EncoderBlockRes4B(
-            in_channels=input_channels * self.subbands_num,
-            out_channels=32,
-            kernel_size=(3, 3),
-            downsample=(2, 2),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.encoder_block2 = EncoderBlockRes4B(
-            in_channels=32,
-            out_channels=64,
-            kernel_size=(3, 3),
-            downsample=(2, 2),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.encoder_block3 = EncoderBlockRes4B(
-            in_channels=64,
-            out_channels=128,
-            kernel_size=(3, 3),
-            downsample=(2, 2),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.encoder_block4 = EncoderBlockRes4B(
-            in_channels=128,
-            out_channels=256,
-            kernel_size=(3, 3),
-            downsample=(2, 2),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.encoder_block5 = EncoderBlockRes4B(
-            in_channels=256,
-            out_channels=384,
-            kernel_size=(3, 3),
-            downsample=(2, 2),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.encoder_block6 = EncoderBlockRes4B(
-            in_channels=384,
-            out_channels=384,
-            kernel_size=(3, 3),
-            downsample=(1, 2),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.conv_block7a = EncoderBlockRes4B(
-            in_channels=384,
-            out_channels=384,
-            kernel_size=(3, 3),
-            downsample=(1, 1),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.conv_block7b = EncoderBlockRes4B(
-            in_channels=384,
-            out_channels=384,
-            kernel_size=(3, 3),
-            downsample=(1, 1),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.conv_block7c = EncoderBlockRes4B(
-            in_channels=384,
-            out_channels=384,
-            kernel_size=(3, 3),
-            downsample=(1, 1),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.conv_block7d = EncoderBlockRes4B(
-            in_channels=384,
-            out_channels=384,
-            kernel_size=(3, 3),
-            downsample=(1, 1),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.decoder_block1 = DecoderBlockRes4B(
-            in_channels=384,
-            out_channels=384,
-            kernel_size=(3, 3),
-            upsample=(1, 2),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.decoder_block2 = DecoderBlockRes4B(
-            in_channels=384,
-            out_channels=384,
-            kernel_size=(3, 3),
-            upsample=(2, 2),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.decoder_block3 = DecoderBlockRes4B(
-            in_channels=384,
-            out_channels=256,
-            kernel_size=(3, 3),
-            upsample=(2, 2),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.decoder_block4 = DecoderBlockRes4B(
-            in_channels=256,
-            out_channels=128,
-            kernel_size=(3, 3),
-            upsample=(2, 2),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.decoder_block5 = DecoderBlockRes4B(
-            in_channels=128,
-            out_channels=64,
-            kernel_size=(3, 3),
-            upsample=(2, 2),
-            activation=activation,
-            momentum=momentum,
-        )
-        self.decoder_block6 = DecoderBlockRes4B(
-            in_channels=64,
-            out_channels=32,
-            kernel_size=(3, 3),
-            upsample=(2, 2),
-            activation=activation,
-            momentum=momentum,
-        )
-
-        self.after_conv_block1 = EncoderBlockRes4B(
-            in_channels=32,
-            out_channels=32,
-            kernel_size=(3, 3),
-            downsample=(1, 1),
-            activation=activation,
-            momentum=momentum,
-        )
-
-        self.after_conv2 = nn.Conv2d(
-            in_channels=32,
-            out_channels=target_sources_num
-            * input_channels
-            * self.K
-            * self.subbands_num,
-            kernel_size=(1, 1),
-            stride=(1, 1),
-            padding=(0, 0),
-            bias=True,
-        )
-
-        self.init_weights()
-
-    def init_weights(self):
-        init_bn(self.bn0)
-        init_layer(self.after_conv2)
-
-    def feature_maps_to_wav(
-        self,
-        input_tensor: torch.Tensor,
-        sp: torch.Tensor,
-        sin_in: torch.Tensor,
-        cos_in: torch.Tensor,
-        audio_length: int,
-    ) -> torch.Tensor:
-        r"""Convert feature maps to waveform.
-
-        Args:
-            input_tensor: (batch_size, target_sources_num * input_channels * self.K, time_steps, freq_bins)
-            sp: (batch_size, target_sources_num * input_channels, time_steps, freq_bins)
-            sin_in: (batch_size, target_sources_num * input_channels, time_steps, freq_bins)
-            cos_in: (batch_size, target_sources_num * input_channels, time_steps, freq_bins)
-
-        Outputs:
-            waveform: (batch_size, target_sources_num * input_channels, segment_samples)
-        """
-        batch_size, _, time_steps, freq_bins = input_tensor.shape
-
-        x = input_tensor.reshape(
-            batch_size,
-            self.target_sources_num,
-            self.input_channels,
-            self.K,
-            time_steps,
-            freq_bins,
-        )
-        # x: (batch_size, target_sources_num, input_channles, K, time_steps, freq_bins)
-
-        mask_mag = torch.sigmoid(x[:, :, :, 0, :, :])
-        _mask_real = torch.tanh(x[:, :, :, 1, :, :])
-        _mask_imag = torch.tanh(x[:, :, :, 2, :, :])
-        linear_mag = torch.tanh(x[:, :, :, 3, :, :])
-        _, mask_cos, mask_sin = magphase(_mask_real, _mask_imag)
-        # mask_cos, mask_sin: (batch_size, target_sources_num, input_channles, time_steps, freq_bins)
-
-        # Y = |Y|cos∠Y + j|Y|sin∠Y
-        #   = |Y|cos(∠X + ∠M) + j|Y|sin(∠X + ∠M)
-        #   = |Y|(cos∠X cos∠M - sin∠X sin∠M) + j|Y|(sin∠X cos∠M + cos∠X sin∠M)
-        out_cos = (
-            cos_in[:, None, :, :, :] * mask_cos - sin_in[:, None, :, :, :] * mask_sin
-        )
-        out_sin = (
-            sin_in[:, None, :, :, :] * mask_cos + cos_in[:, None, :, :, :] * mask_sin
-        )
-        # out_cos: (batch_size, target_sources_num, input_channles, time_steps, freq_bins)
-        # out_sin: (batch_size, target_sources_num, input_channles, time_steps, freq_bins)
-
-        # Calculate |Y|.
-        out_mag = F.relu_(sp[:, None, :, :, :] * mask_mag + linear_mag)
-        # out_mag: (batch_size, target_sources_num, input_channles, time_steps, freq_bins)
-
-        # Calculate Y_{real} and Y_{imag} for ISTFT.
-        out_real = out_mag * out_cos
-        out_imag = out_mag * out_sin
-        # out_real, out_imag: (batch_size, target_sources_num, input_channles, time_steps, freq_bins)
-
-        # Reformat shape to (n, 1, time_steps, freq_bins) for ISTFT.
-        shape = (
-            batch_size * self.target_sources_num * self.input_channels,
-            1,
-            time_steps,
-            freq_bins,
-        )
-        out_real = out_real.reshape(shape)
-        out_imag = out_imag.reshape(shape)
-
-        # ISTFT.
-        x = self.istft(out_real, out_imag, audio_length)
-        # (batch_size * target_sources_num * input_channels, segments_num)
-
-        # Reshape.
-        waveform = x.reshape(
-            batch_size, self.target_sources_num * self.input_channels, audio_length
-        )
-        # (batch_size, target_sources_num * input_channels, segments_num)
-
-        return waveform
-
-    def forward(self, input_dict):
-        r"""Forward data into the module.
-
-        Args:
-            input_dict: dict, e.g., {
-                waveform: (batch_size, input_channels, segment_samples),
-                ...,
-            }
-
-        Outputs:
-            output_dict: dict, e.g., {
-                'waveform': (batch_size, input_channels, segment_samples),
-                ...,
-            }
-        """
-        mixtures = input_dict['waveform']
-        # (batch_size, input_channels, segment_samples)
-
-        subband_x = self.pqmf.analysis(mixtures)
-        # subband_x: (batch_size, input_channels * subbands_num, segment_samples)
-
-        mag, cos_in, sin_in = self.wav_to_spectrogram_phase(subband_x)
-        # mag, cos_in, sin_in: (batch_size, input_channels * subbands_num, time_steps, freq_bins)
-
-        # Batch normalize on individual frequency bins.
-        x = mag.transpose(1, 3)
-        x = self.bn0(x)
-        x = x.transpose(1, 3)
-        # (batch_size, input_channels * subbands_num, time_steps, freq_bins)
-
-        # Pad spectrogram to be evenly divided by downsample ratio.
-        origin_len = x.shape[2]
-        pad_len = (
-            int(np.ceil(x.shape[2] / self.downsample_ratio)) * self.downsample_ratio
-            - origin_len
-        )
-        x = F.pad(x, pad=(0, 0, 0, pad_len))
-        # x: (batch_size, input_channels * subbands_num, padded_time_steps, freq_bins)
-
-        # Let frequency bins be evenly divided by 2, e.g., 257 -> 256
-        x = x[..., 0 : x.shape[-1] - 1]  # (bs, input_channels, T, F)
-        # x: (batch_size, input_channels * subbands_num, padded_time_steps, freq_bins)
-
-        # UNet
-        (x1_pool, x1) = self.encoder_block1(x)  # x1_pool: (bs, 32, T / 2, F / 2)
-        (x2_pool, x2) = self.encoder_block2(x1_pool)  # x2_pool: (bs, 64, T / 4, F / 4)
-        (x3_pool, x3) = self.encoder_block3(x2_pool)  # x3_pool: (bs, 128, T / 8, F / 8)
-        (x4_pool, x4) = self.encoder_block4(
-            x3_pool
-        )  # x4_pool: (bs, 256, T / 16, F / 16)
-        (x5_pool, x5) = self.encoder_block5(
-            x4_pool
-        )  # x5_pool: (bs, 384, T / 32, F / 32)
-        (x6_pool, x6) = self.encoder_block6(
-            x5_pool
-        )  # x6_pool: (bs, 384, T / 32, F / 64)
-        (x_center, _) = self.conv_block7a(x6_pool)  # (bs, 384, T / 32, F / 64)
-        (x_center, _) = self.conv_block7b(x_center)  # (bs, 384, T / 32, F / 64)
-        (x_center, _) = self.conv_block7c(x_center)  # (bs, 384, T / 32, F / 64)
-        (x_center, _) = self.conv_block7d(x_center)  # (bs, 384, T / 32, F / 64)
-        x7 = self.decoder_block1(x_center, x6)  # (bs, 384, T / 32, F / 32)
-        x8 = self.decoder_block2(x7, x5)  # (bs, 384, T / 16, F / 16)
-        x9 = self.decoder_block3(x8, x4)  # (bs, 256, T / 8, F / 8)
-        x10 = self.decoder_block4(x9, x3)  # (bs, 128, T / 4, F / 4)
-        x11 = self.decoder_block5(x10, x2)  # (bs, 64, T / 2, F / 2)
-        x12 = self.decoder_block6(x11, x1)  # (bs, 32, T, F)
-        (x, _) = self.after_conv_block1(x12)  # (bs, 32, T, F)
-
-        x = self.after_conv2(x)
-        # (batch_size, subbands_num * target_sources_num * input_channles * self.K, T, F')
-
-        # Recover shape
-        x = F.pad(x, pad=(0, 1))  # Pad frequency, e.g., 256 -> 257.
-
-        x = x[:, :, 0:origin_len, :]
-        # (batch_size, subbands_num * target_sources_num * input_channles * self.K, T, F')
-
-        audio_length = subband_x.shape[2]
-
-        # Recover each subband spectrograms to subband waveforms. Then synthesis
-        # the subband waveforms to a waveform.
-        C1 = x.shape[1] // self.subbands_num
-        C2 = mag.shape[1] // self.subbands_num
-
-        separated_subband_audio = torch.cat(
-            [
-                self.feature_maps_to_wav(
-                    input_tensor=x[:, j * C1 : (j + 1) * C1, :, :],
-                    sp=mag[:, j * C2 : (j + 1) * C2, :, :],
-                    sin_in=sin_in[:, j * C2 : (j + 1) * C2, :, :],
-                    cos_in=cos_in[:, j * C2 : (j + 1) * C2, :, :],
-                    audio_length=audio_length,
-                )
-                for j in range(self.subbands_num)
-            ],
-            dim=1,
-        )
-        # (batch_size, subbands_num * target_sources_num * input_channles, segment_samples)
-
-        separated_audio = self.pqmf.synthesis(separated_subband_audio)
-        # (batch_size, input_channles, segment_samples)
-
-        output_dict = {'waveform': separated_audio}
-
-        return output_dict
diff --git a/spaces/akhaliq/Real-Time-Voice-Cloning/synthesizer/utils/_cmudict.py b/spaces/akhaliq/Real-Time-Voice-Cloning/synthesizer/utils/_cmudict.py
deleted file mode 100644
index 2cef1f896d4fb78478884fe8e810956998d5e3b3..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/Real-Time-Voice-Cloning/synthesizer/utils/_cmudict.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import re
-
-valid_symbols = [
-  "AA", "AA0", "AA1", "AA2", "AE", "AE0", "AE1", "AE2", "AH", "AH0", "AH1", "AH2",
-  "AO", "AO0", "AO1", "AO2", "AW", "AW0", "AW1", "AW2", "AY", "AY0", "AY1", "AY2",
-  "B", "CH", "D", "DH", "EH", "EH0", "EH1", "EH2", "ER", "ER0", "ER1", "ER2", "EY",
-  "EY0", "EY1", "EY2", "F", "G", "HH", "IH", "IH0", "IH1", "IH2", "IY", "IY0", "IY1",
-  "IY2", "JH", "K", "L", "M", "N", "NG", "OW", "OW0", "OW1", "OW2", "OY", "OY0",
-  "OY1", "OY2", "P", "R", "S", "SH", "T", "TH", "UH", "UH0", "UH1", "UH2", "UW",
-  "UW0", "UW1", "UW2", "V", "W", "Y", "Z", "ZH"
-]
-
-_valid_symbol_set = set(valid_symbols)
-
-
-class CMUDict:
-  """Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict"""
-  def __init__(self, file_or_path, keep_ambiguous=True):
-    if isinstance(file_or_path, str):
-      with open(file_or_path, encoding="latin-1") as f:
-        entries = _parse_cmudict(f)
-    else:
-      entries = _parse_cmudict(file_or_path)
-    if not keep_ambiguous:
-      entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
-    self._entries = entries
-
-
-  def __len__(self):
-    return len(self._entries)
-
-
-  def lookup(self, word):
-    """Returns list of ARPAbet pronunciations of the given word."""
-    return self._entries.get(word.upper())
-
-
-
-_alt_re = re.compile(r"\([0-9]+\)")
-
-
-def _parse_cmudict(file):
-  cmudict = {}
-  for line in file:
-    if len(line) and (line[0] >= "A" and line[0] <= "Z" or line[0] == "'"):
-      parts = line.split("  ")
-      word = re.sub(_alt_re, "", parts[0])
-      pronunciation = _get_pronunciation(parts[1])
-      if pronunciation:
-        if word in cmudict:
-          cmudict[word].append(pronunciation)
-        else:
-          cmudict[word] = [pronunciation]
-  return cmudict
-
-
-def _get_pronunciation(s):
-  parts = s.strip().split(" ")
-  for part in parts:
-    if part not in _valid_symbol_set:
-      return None
-  return " ".join(parts)
diff --git a/spaces/akshatsanghvi/spam-email-detection/README.md b/spaces/akshatsanghvi/spam-email-detection/README.md
deleted file mode 100644
index 6fe48d8f892618391ded4242bfcb7f72e683b2e1..0000000000000000000000000000000000000000
--- a/spaces/akshatsanghvi/spam-email-detection/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-title: Spam Email Detection
-emoji: 💌
-colorFrom: pink
-colorTo: blue
-sdk: gradio
-sdk_version: 3.17.0
-app_file: app.py
-pinned: false
-license: artistic-2.0
----
-# spam-mail-detection
-A simple text classifier in Python that uses the Naive Bayes model to classify e-mails as spam or ham, 
-in other words, it used naive-bayes method to detect if a email or message is spam or not.
-### What is a Spam message ?
-Spam is any kind of unwanted, unsolicited digital communication that gets sent out in bulk. 
-Often spam is sent via email, but it can also be distributed via text messages, phone calls, or social media.
-
-dataset downloaded from kaggle. 👉 https://www.kaggle.com/datasets/mfaisalqureshi/spam-email?resource=download
diff --git a/spaces/ali-ghamdan/deoldify/fastai/version.py b/spaces/ali-ghamdan/deoldify/fastai/version.py
deleted file mode 100644
index f533a81bcb2dbae1879f59695714ec82f95f384b..0000000000000000000000000000000000000000
--- a/spaces/ali-ghamdan/deoldify/fastai/version.py
+++ /dev/null
@@ -1,2 +0,0 @@
-__all__ = ['__version__']
-__version__ = '1.0.56.dev0'
diff --git a/spaces/aliabd/SummerTime/model/defaults.py b/spaces/aliabd/SummerTime/model/defaults.py
deleted file mode 100644
index b9acbf3ca368d343c760a4bf48a475d87fcf7ace..0000000000000000000000000000000000000000
--- a/spaces/aliabd/SummerTime/model/defaults.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from .single_doc import PegasusModel
-
-
-class summarizer(PegasusModel):
-    def __init__(self, device="cpu"):
-        super(summarizer, self).__init__(device)
-
-    def show_capability(self):
-        print("Pegasus is the default singe-document summarization model.")
-        super(summarizer, self).show_capability()
diff --git a/spaces/aliabd/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/docs/infinibatch/datasets.html b/spaces/aliabd/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/docs/infinibatch/datasets.html
deleted file mode 100644
index bcd7bcb81e9e2e6c0700fbf10d31fdc35f8576ee..0000000000000000000000000000000000000000
--- a/spaces/aliabd/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/docs/infinibatch/datasets.html
+++ /dev/null
@@ -1,242 +0,0 @@
-<!doctype html>
-<html lang="en">
-<head>
-<meta charset="utf-8">
-<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
-<meta name="generator" content="pdoc 0.7.5" />
-<title>infinibatch.datasets API documentation</title>
-<meta name="description" content="" />
-<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
-<link href='https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/8.0.0/sanitize.min.css' rel='stylesheet'>
-<link href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css" rel="stylesheet">
-<style>.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{font-weight:bold}#index h4 + ul{margin-bottom:.6em}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
-<style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
-<style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
-</head>
-<body>
-<main>
-<article id="content">
-<header>
-<h1 class="title">Module <code>infinibatch.datasets</code></h1>
-</header>
-<section id="section-intro">
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">from .iterators import create_source_iterator, SelectManyIterator, PrefetchIterator, BufferedShuffleIterator, BlockwiseShuffleIterator, MapIterator
-from typing import List, Union, Iterable, Iterator, Callable, Any, Optional, Dict
-import os, sys
-
-&#34;&#34;&#34;
-This module contains common datasets, which are implemented as convenience functions that compose underlying Infinibatch iterators.
-&#34;&#34;&#34;
-
-
-def bump_seed(seed: Optional[int], step = 1):
-    &#34;&#34;&#34;
-    Helper to bump a random seed if not None.
-    &#34;&#34;&#34;
-    return None if seed is None else seed + 1
-
-
-def chunked_dataset_iterator(chunk_refs: List, read_chunk_fn: Callable[[Any], Iterator], buffer_size: int,
-                             train: bool=True,
-                             seed: Optional[int]=None, shuffle: bool=True, use_windowed: bool=False,
-                             transform: Callable[[Any],Any]=None,
-                             prefetch: bool=True,
-                             num_instances: int=1, instance_rank: int=0):
-    &#34;&#34;&#34;
-    Dataset reading data from gzipped chunks.
-
-    If train=True, this chunks are strided assigned to instances in strides and the data is infinitely repeated in permutations.
-    Otherwise, the chunks are split among the instances in consecutive blocks and the data is not repeated.
-    This way, when using this dataset for inference on multiple GPUs, to order the outputs in a way that corresponds
-    to the original order of the data items in the dataset, one simply has to collect the lists of outputs from each GPU
-    and then concatenate these lists in order of increasing rank.
-    When using MPI, this can be achieved by a gather-operation to get a list of lists of outputs, one list per GPU,
-    followed by flattening the lists back into a single list.
-
-    Args:
-        chunk_refs: references (such as path names) to chunk files
-        read_chunk_fn: function(chunk_ref) -&gt; Iterator to read a chunk&#39;s content into an iterator over its items, e.g. read a file and split into text lines
-        train: see above
-        shuffle: if true, the data is shuffled. If train is False then shuffle must be False as well.
-        buffer_size: size of the buffer in number of samples / data items used for shuffling (default: 2**20)
-        transform: transform to be applied to each data item (transform(Any) -&gt; Any)
-        prefetch: if True, insert a prefetch iterator with buffer_size
-        seed: random seed (or None)
-        num_instances: number of instances of this dataset. Meant for use with multi-process data loading, e.g., in distributed training.
-        instance_rank: rank of this instance of the dataset. Meant for use with multi-process data loading, e.g., in distributed training.
-        use_windowed: temporary option to switch back to the WindowedShuffleIterator (default False). Will go away once shown that we don&#39;t need it anymore.
-    &#34;&#34;&#34;
-    if not train and shuffle:
-        raise ValueError(&#39;shuffling is not supported when train=False&#39;)
-    # set up the chunk reader
-    chunk_refs = create_source_iterator(chunk_refs, train=train, seed=seed, shuffle=shuffle, num_instances=num_instances, instance_rank=instance_rank)
-    # set up the item reader
-    samples = SelectManyIterator(source_iterator=chunk_refs, collection_selector=read_chunk_fn)
-    # wrap the I/O operation in a prefetch iterator
-    if prefetch:
-        samples = PrefetchIterator(samples, buffer_size)
-    # set up the item randomizer
-    if shuffle:
-        if use_windowed:
-            samples = BufferedShuffleIterator(samples, buffer_size, bump_seed(seed, 1))
-        else:
-            samples = BlockwiseShuffleIterator(samples, buffer_size, bump_seed(seed, 1))
-    # apply transform, if given
-    if transform is not None:
-        samples = MapIterator(samples, transform)
-    # this is what we are serving out
-    return samples</code></pre>
-</details>
-</section>
-<section>
-</section>
-<section>
-</section>
-<section>
-<h2 class="section-title" id="header-functions">Functions</h2>
-<dl>
-<dt id="infinibatch.datasets.bump_seed"><code class="name flex">
-<span>def <span class="ident">bump_seed</span></span>(<span>seed: Union[int, NoneType], step=1)</span>
-</code></dt>
-<dd>
-<section class="desc"><p>Helper to bump a random seed if not None.</p></section>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">def bump_seed(seed: Optional[int], step = 1):
-    &#34;&#34;&#34;
-    Helper to bump a random seed if not None.
-    &#34;&#34;&#34;
-    return None if seed is None else seed + 1</code></pre>
-</details>
-</dd>
-<dt id="infinibatch.datasets.chunked_dataset_iterator"><code class="name flex">
-<span>def <span class="ident">chunked_dataset_iterator</span></span>(<span>chunk_refs: List, read_chunk_fn: Callable[[Any], Iterator], buffer_size: int, train: bool = True, seed: Union[int, NoneType] = None, shuffle: bool = True, use_windowed: bool = False, transform: Callable[[Any], Any] = None, prefetch: bool = True, num_instances: int = 1, instance_rank: int = 0)</span>
-</code></dt>
-<dd>
-<section class="desc"><p>Dataset reading data from gzipped chunks.</p>
-<p>If train=True, this chunks are strided assigned to instances in strides and the data is infinitely repeated in permutations.
-Otherwise, the chunks are split among the instances in consecutive blocks and the data is not repeated.
-This way, when using this dataset for inference on multiple GPUs, to order the outputs in a way that corresponds
-to the original order of the data items in the dataset, one simply has to collect the lists of outputs from each GPU
-and then concatenate these lists in order of increasing rank.
-When using MPI, this can be achieved by a gather-operation to get a list of lists of outputs, one list per GPU,
-followed by flattening the lists back into a single list.</p>
-<h2 id="args">Args</h2>
-<dl>
-<dt><strong><code>chunk_refs</code></strong></dt>
-<dd>references (such as path names) to chunk files</dd>
-<dt><strong><code>read_chunk_fn</code></strong></dt>
-<dd>function(chunk_ref) -&gt; Iterator to read a chunk's content into an iterator over its items, e.g. read a file and split into text lines</dd>
-<dt><strong><code>train</code></strong></dt>
-<dd>see above</dd>
-<dt><strong><code>shuffle</code></strong></dt>
-<dd>if true, the data is shuffled. If train is False then shuffle must be False as well.</dd>
-<dt><strong><code>buffer_size</code></strong></dt>
-<dd>size of the buffer in number of samples / data items used for shuffling (default: 2**20)</dd>
-<dt><strong><code>transform</code></strong></dt>
-<dd>transform to be applied to each data item (transform(Any) -&gt; Any)</dd>
-<dt><strong><code>prefetch</code></strong></dt>
-<dd>if True, insert a prefetch iterator with buffer_size</dd>
-<dt><strong><code>seed</code></strong></dt>
-<dd>random seed (or None)</dd>
-<dt><strong><code>num_instances</code></strong></dt>
-<dd>number of instances of this dataset. Meant for use with multi-process data loading, e.g., in distributed training.</dd>
-<dt><strong><code>instance_rank</code></strong></dt>
-<dd>rank of this instance of the dataset. Meant for use with multi-process data loading, e.g., in distributed training.</dd>
-<dt><strong><code>use_windowed</code></strong></dt>
-<dd>temporary option to switch back to the WindowedShuffleIterator (default False). Will go away once shown that we don't need it anymore.</dd>
-</dl></section>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">def chunked_dataset_iterator(chunk_refs: List, read_chunk_fn: Callable[[Any], Iterator], buffer_size: int,
-                             train: bool=True,
-                             seed: Optional[int]=None, shuffle: bool=True, use_windowed: bool=False,
-                             transform: Callable[[Any],Any]=None,
-                             prefetch: bool=True,
-                             num_instances: int=1, instance_rank: int=0):
-    &#34;&#34;&#34;
-    Dataset reading data from gzipped chunks.
-
-    If train=True, this chunks are strided assigned to instances in strides and the data is infinitely repeated in permutations.
-    Otherwise, the chunks are split among the instances in consecutive blocks and the data is not repeated.
-    This way, when using this dataset for inference on multiple GPUs, to order the outputs in a way that corresponds
-    to the original order of the data items in the dataset, one simply has to collect the lists of outputs from each GPU
-    and then concatenate these lists in order of increasing rank.
-    When using MPI, this can be achieved by a gather-operation to get a list of lists of outputs, one list per GPU,
-    followed by flattening the lists back into a single list.
-
-    Args:
-        chunk_refs: references (such as path names) to chunk files
-        read_chunk_fn: function(chunk_ref) -&gt; Iterator to read a chunk&#39;s content into an iterator over its items, e.g. read a file and split into text lines
-        train: see above
-        shuffle: if true, the data is shuffled. If train is False then shuffle must be False as well.
-        buffer_size: size of the buffer in number of samples / data items used for shuffling (default: 2**20)
-        transform: transform to be applied to each data item (transform(Any) -&gt; Any)
-        prefetch: if True, insert a prefetch iterator with buffer_size
-        seed: random seed (or None)
-        num_instances: number of instances of this dataset. Meant for use with multi-process data loading, e.g., in distributed training.
-        instance_rank: rank of this instance of the dataset. Meant for use with multi-process data loading, e.g., in distributed training.
-        use_windowed: temporary option to switch back to the WindowedShuffleIterator (default False). Will go away once shown that we don&#39;t need it anymore.
-    &#34;&#34;&#34;
-    if not train and shuffle:
-        raise ValueError(&#39;shuffling is not supported when train=False&#39;)
-    # set up the chunk reader
-    chunk_refs = create_source_iterator(chunk_refs, train=train, seed=seed, shuffle=shuffle, num_instances=num_instances, instance_rank=instance_rank)
-    # set up the item reader
-    samples = SelectManyIterator(source_iterator=chunk_refs, collection_selector=read_chunk_fn)
-    # wrap the I/O operation in a prefetch iterator
-    if prefetch:
-        samples = PrefetchIterator(samples, buffer_size)
-    # set up the item randomizer
-    if shuffle:
-        if use_windowed:
-            samples = BufferedShuffleIterator(samples, buffer_size, bump_seed(seed, 1))
-        else:
-            samples = BlockwiseShuffleIterator(samples, buffer_size, bump_seed(seed, 1))
-    # apply transform, if given
-    if transform is not None:
-        samples = MapIterator(samples, transform)
-    # this is what we are serving out
-    return samples</code></pre>
-</details>
-</dd>
-</dl>
-</section>
-<section>
-</section>
-</article>
-<nav id="sidebar">
-<h1>Index</h1>
-<div class="toc">
-<ul></ul>
-</div>
-<ul id="index">
-<li><h3>Super-module</h3>
-<ul>
-<li><code><a title="infinibatch" href="index.html">infinibatch</a></code></li>
-</ul>
-</li>
-<li><h3><a href="#header-functions">Functions</a></h3>
-<ul class="">
-<li><code><a title="infinibatch.datasets.bump_seed" href="#infinibatch.datasets.bump_seed">bump_seed</a></code></li>
-<li><code><a title="infinibatch.datasets.chunked_dataset_iterator" href="#infinibatch.datasets.chunked_dataset_iterator">chunked_dataset_iterator</a></code></li>
-</ul>
-</li>
-</ul>
-</nav>
-</main>
-<footer id="footer">
-<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
-</footer>
-<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
-<script>hljs.initHighlightingOnLoad()</script>
-</body>
-</html>
\ No newline at end of file
diff --git a/spaces/allknowingroger/Image-Models-Test32/app.py b/spaces/allknowingroger/Image-Models-Test32/app.py
deleted file mode 100644
index 0f62bcd1c58615be8f0070bfa52c86436c115e07..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test32/app.py
+++ /dev/null
@@ -1,144 +0,0 @@
-import gradio as gr
-# import os
-# import sys
-# from pathlib import Path
-import time
-
-models =[
-    "Yntec/RainbowClassicAnime",
-    "WALIDALI/toonyoudif",
-    "digiplay/Realisian_v4",
-    "digiplay/Realisian_v5",
-    "Kendong/Joyance_teddy",
-    "bongo2112/sdxl-db-diamondplatnumz-portrait",
-    "digiplay/hellopure_v2.24Beta",
-    "Yntec/DreamAnything",
-    "dwancin/memoji",
-]
-
-
-model_functions = {}
-model_idx = 1
-for model_path in models:
-    try:
-        model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False)
-    except Exception as error:
-        def the_fn(txt):
-            return None
-        model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"])
-    model_idx+=1
-
-
-def send_it_idx(idx):
-    def send_it_fn(prompt):
-        output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt)
-        return output
-    return send_it_fn
-
-def get_prompts(prompt_text):
-    return prompt_text
-
-def clear_it(val):
-    if int(val) != 0:
-        val = 0
-    else:
-        val = 0
-        pass
-    return val
-
-def all_task_end(cnt,t_stamp):
-    to = t_stamp + 60
-    et = time.time()
-    if et > to and t_stamp != 0:
-        d = gr.update(value=0)
-        tog = gr.update(value=1)
-        #print(f'to: {to}  et: {et}')
-    else:
-        if cnt != 0:
-            d = gr.update(value=et)
-        else:
-            d = gr.update(value=0)
-        tog = gr.update(value=0)
-        #print (f'passing:  to: {to}  et: {et}')
-        pass
-    return d, tog
-
-def all_task_start():
-    print("\n\n\n\n\n\n\n")
-    t = time.gmtime()
-    t_stamp = time.time()
-    current_time = time.strftime("%H:%M:%S", t)
-    return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0)
-
-def clear_fn():
-    nn = len(models)
-    return tuple([None, *[None for _ in range(nn)]])
-
-
-
-with gr.Blocks(title="SD Models") as my_interface:
-    with gr.Column(scale=12):
-        # with gr.Row():
-        #     gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""")
-        with gr.Row():
-            with gr.Row(scale=6):
-                primary_prompt=gr.Textbox(label="Prompt", value="")
-                # real_prompt=gr.Textbox(label="Real prompt")
-            with gr.Row(scale=6):
-                # improve_prompts_btn=gr.Button("Improve")
-                with gr.Row():
-                    run=gr.Button("Run",variant="primary")
-                    clear_btn=gr.Button("Clear")
-        with gr.Row():
-            sd_outputs = {}
-            model_idx = 1
-            for model_path in models:
-                with gr.Column(scale=3, min_width=320):
-                    with gr.Box():
-                        sd_outputs[model_idx] = gr.Image(label=model_path)
-                    pass
-                model_idx += 1
-                pass
-            pass
-
-        with gr.Row(visible=False):
-            start_box=gr.Number(interactive=False)
-            end_box=gr.Number(interactive=False)
-            tog_box=gr.Textbox(value=0,interactive=False)
-
-        start_box.change(
-            all_task_end,
-            [start_box, end_box],
-            [start_box, tog_box],
-            every=1,
-            show_progress=False)
-
-        primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box])
-        run.click(all_task_start, None, [start_box, end_box, tog_box])
-        runs_dict = {}
-        model_idx = 1
-        for model_path in models:
-            runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]])
-            model_idx += 1
-            pass
-        pass
-
-        # improve_prompts_btn_clicked=improve_prompts_btn.click(
-        #     get_prompts,
-        #     inputs=[primary_prompt],
-        #     outputs=[primary_prompt],
-        #     cancels=list(runs_dict.values()))
-        clear_btn.click(
-            clear_fn,
-            None,
-            [primary_prompt, *list(sd_outputs.values())],
-            cancels=[*list(runs_dict.values())])
-        tog_box.change(
-            clear_it,
-            tog_box,
-            tog_box,
-            cancels=[*list(runs_dict.values())])
-
-my_interface.queue(concurrency_count=600, status_update_rate=1)
-my_interface.launch(inline=True, show_api=False)
- 
\ No newline at end of file
diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/i686-w64-mingw32.cmake b/spaces/amarchheda/ChordDuplicate/portaudio/i686-w64-mingw32.cmake
deleted file mode 100644
index c3331b67a1b5bd16d35dd8a384e21670581d9023..0000000000000000000000000000000000000000
--- a/spaces/amarchheda/ChordDuplicate/portaudio/i686-w64-mingw32.cmake
+++ /dev/null
@@ -1,17 +0,0 @@
-# CMake Toolchain file for cross-compiling PortAudio to i686-w64-mingw32
-# Inspired from: https://gitlab.kitware.com/cmake/community/-/wikis/doc/cmake/cross_compiling/Mingw
-# Example usage: $ cmake -DCMAKE_TOOLCHAIN_FILE=i686-w64-mingw32.cmake .
-# i686-w64-mingw32 needs to be installed for this to work. On Debian-based
-# distributions the package is typically named `mingw-w64`.
-
-SET(CMAKE_SYSTEM_NAME Windows)
-
-SET(CMAKE_C_COMPILER i686-w64-mingw32-gcc)
-SET(CMAKE_CXX_COMPILER i686-w64-mingw32-g++)
-SET(CMAKE_RC_COMPILER i686-w64-mingw32-windres)
-
-SET(CMAKE_FIND_ROOT_PATH /usr/i686-w64-mingw32)
-
-set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
-set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
-set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/pablio/test_w_saw8.c b/spaces/amarchheda/ChordDuplicate/portaudio/pablio/test_w_saw8.c
deleted file mode 100644
index 70686c19b200957e72051c177e1c599acda0bc53..0000000000000000000000000000000000000000
--- a/spaces/amarchheda/ChordDuplicate/portaudio/pablio/test_w_saw8.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * $Id$
- * test_w_saw8.c
- * Generate stereo 8 bit sawtooth waveforms.
- *
- * Author: Phil Burk, http://www.softsynth.com
- *
- * This program uses PABLIO, the Portable Audio Blocking I/O Library.
- * PABLIO is built on top of PortAudio, the Portable Audio Library.
- *
- * For more information see: http://www.portaudio.com
- * Copyright (c) 1999-2000 Ross Bencina and Phil Burk
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files
- * (the "Software"), to deal in the Software without restriction,
- * including without limitation the rights to use, copy, modify, merge,
- * publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so,
- * subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * The text above constitutes the entire PortAudio license; however, 
- * the PortAudio community also makes the following non-binding requests:
- *
- * Any person wishing to distribute modifications to the Software is
- * requested to send the modifications to the original developer so that
- * they can be incorporated into the canonical version. It is also 
- * requested that these non-binding requests be included along with the 
- * license above.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <math.h>
-#include "pablio.h"
-#include <string.h>
-
-#define SAMPLE_RATE         (22050)
-#define NUM_SECONDS             (6)
-#define SAMPLES_PER_FRAME       (2)
-
-
-#define FRAMES_PER_BLOCK    (100)
-
-unsigned char   samples[FRAMES_PER_BLOCK][SAMPLES_PER_FRAME];
-unsigned char   phases[SAMPLES_PER_FRAME];
-
-/*******************************************************************/
-int main(void);
-int main(void)
-{
-    int             i,j;
-    PaError         err;
-    PABLIO_Stream  *aOutStream;
-
-    printf("Generate unsigned 8 bit sawtooth waves using PABLIO.\n");
-    fflush(stdout);
-
-    /* Open simplified blocking I/O layer on top of PortAudio. */
-    err = OpenAudioStream( &aOutStream, SAMPLE_RATE, paUInt8,
-                           (PABLIO_WRITE | PABLIO_STEREO) );
-    if( err != paNoError ) goto error;
-
-    /* Initialize oscillator phases to "ground" level for paUInt8. */
-    phases[0] = 128;
-    phases[1] = 128;
-
-    for( i=0; i<(NUM_SECONDS * SAMPLE_RATE); i += FRAMES_PER_BLOCK )
-    {
-        /* Generate sawtooth waveforms in a block for efficiency. */
-        for( j=0; j<FRAMES_PER_BLOCK; j++ )
-        {
-            /* Generate a sawtooth wave by incrementing a variable. */
-            phases[0] += 1;
-            /* We don't have to do anything special to wrap when using paUint8 because
-             * 8 bit arithmetic automatically wraps. */
-            samples[j][0] = phases[0];
-
-            /* On the second channel, generate a higher sawtooth wave. */
-            phases[1] += 3;
-            samples[j][1] = phases[1];
-        }
-
-        /* Write samples to output. */
-        WriteAudioStream( aOutStream, samples, FRAMES_PER_BLOCK );
-    }
-
-    CloseAudioStream( aOutStream );
-
-    printf("Sawtooth sound test complete.\n" );
-    fflush(stdout);
-    return 0;
-
-error:
-    fprintf( stderr, "An error occurred while using PABLIO\n" );
-    fprintf( stderr, "Error number: %d\n", err );
-    fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
-    return -1;
-}
diff --git a/spaces/amitjamadagni/qs-benchmarks/plot_scripts/plot_display_com_pack.py b/spaces/amitjamadagni/qs-benchmarks/plot_scripts/plot_display_com_pack.py
deleted file mode 100644
index 10504fe5ccbd9c2f9e60408037f971523036614f..0000000000000000000000000000000000000000
--- a/spaces/amitjamadagni/qs-benchmarks/plot_scripts/plot_display_com_pack.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import numpy as np
-import h5py
-import os
-
-import mercury as mr
-
-import sys
-sys.path.append('/plot_scripts/')
-from map_packages_colors_all import *
-from plot_scripts_all import *
-
-# print(" Tasks to choose from : ")
-# print(" Heisenberg dynamics (hdyn), Random Quantum Circuits (rqc), Quantum Fourier Transform (qft)")
-# print("###############################")
-# print(" Compute capability choices for packages :")
-# print(" singlethread, multithread, gpu")
-# print("###############################")
-# print(" Precision choices for different compute capabilities :")
-# print(" sp (single precision), dp (double precision)")
-# print("###############################")
-
-# task_1 = input(" Enter the task to compare : ")
-# package_1 = input(" Enter the choice of the package, package 1 : ")
-# p_com_cap = input(" Enter the choice of the compute capability : ")
-# p_prec = input(" Enter the choice of the precision : ")
-
-package_str = ['qiskit' , 'cirq', 'qsimcirq', 'pennylane', 'pennylane_l', 'qibo', 'qibojit', 'yao', 'quest', 'qulacs', 'intel_qs_cpp', 'projectq', 'svsim',  'hybridq', 'hiq', 'qcgpu', 'qrack_sch', 'cuquantum_qiskit', 'cuquantum_qsimcirq', 'qpanda',  'qpp', 'myqlm', 'myqlm_cpp', 'braket']
-
-# N = 36
-ngates_hdyn = [5544, 7722, 9900, 12078, 14256, 16434, 18612, 20790, 22968, 25146, 27324, 29502, 31680, 33858, 36036, 38214]
-ngates_rqc = [3300, 3850, 4400, 5158, 5760, 6362, 7120, 7670, 8220, 8978, 9580, 10182, 10940]
-ngates_qft = [186, 344, 550, 804, 1106, 1456, 1854, 2300, 2794, 3336, 3926, 4564, 5250, 5984, 6766, 7596]
-
-def abs_time_pack(t1, t2, cc, pr, N_end):
-
-    if t1 == "Heisenberg dynamics":
-        t1 = "hdyn"
-        ng1 = ngates_hdyn
-    elif t1 == "Random Quantum Circuit":
-        t1 = "rqc"
-        ng1 = ngates_rqc
-    elif t1 == "Quantum Fourier Transform":
-        t1 = "qft"
-        ng1 = ngates_qft
-
-    if t2 == "Heisenberg dynamics":
-        t2 = "hdyn"
-        ng2 = ngates_hdyn
-    elif t2 == "Random Quantum Circuit":
-        t2 = "rqc"
-        ng2 = ngates_rqc
-    elif t2 == "Quantum Fourier Transform":
-        t2 = "qft"
-        ng2 = ngates_qft
-
-    if cc == "Singlethread":
-        cc = "singlethread"
-    elif cc == "Multithread":
-        cc = "multithread"
-    elif cc == "GPU":
-        cc = "gpu"
-
-    if pr == "Single":
-        pr = "sp"
-    elif pr == "Double":
-        pr = "dp"
-
-    fig, ax = plt.subplots()
-
-    dir = os.getcwd()
-
-    if t1 == 'hdyn' or t1 == 'qft':
-        N_arr_t1 = np.arange(6, N_end, 2)
-    elif t1 == 'rqc':
-        N_arr_t1 = np.arange(12, N_end, 2)
-
-    if t2 == 'hdyn' or t2 == 'qft':
-        N_arr_t2 = np.arange(6, N_end, 2)
-    elif t2 == 'rqc':
-        N_arr_t2 = np.arange(12, N_end, 2)
-
-    # pack_list = []
-
-    for package in package_str:
-        dat_f1 = dir + '/data/{}/{}_{}_{}.h5'.format(t1, package, cc, pr)
-        dat_f2 = dir + '/data/{}/{}_{}_{}.h5'.format(t2, package, cc, pr)
-
-        if os.path.isfile(dat_f1) and os.path.isfile(dat_f2):
-            h5f1 = h5py.File(dat_f1, 'r')
-            dat1 = h5f1[storage_dict[package]][:]
-            h5f1.close()
-            # pack_list.append(package)
-
-            h5f2 = h5py.File(dat_f2, 'r')
-            dat2 = h5f2[storage_dict[package]][:]
-            h5f2.close()
-
-            if N_arr_t1[0] > N_arr_t2[0]:
-                dat2 = dat2[3:]
-                N_arr = N_arr_t1
-            elif N_arr_t1[0] < N_arr_t2[0]:
-                dat1 = dat1[3:]
-                N_arr = N_arr_t2
-            else:
-                N_arr = N_arr_t1
-            plot_comp_data_n_arr(N_arr, dat1, dat2, package+'_'+t1+'_'+cc+'_'+pr)
-
-
-    # save_flag = input("Do you want to save the plot?")
-    # if save_flag == "Y":
-    #     gen_settings(fig, ax, r"N (system size)", r"Time ($t_{package}$)", False, True, True, 10**-1, 10**5, "out", "perf_{}_{}_{}_{}_{}_{}_{}_{}.pdf".format(t1, p1, p1_cc, p1_pr, t2, p2, p2_cc, p2_pr))
-    # else:
-    if N_arr_t1[0] > N_arr_t2[0]:
-        N_arr = N_arr_t1
-        ng2 = ng2[3:]
-    elif N_arr_t1[0] < N_arr_t2[0]:
-        N_arr = N_arr_t2
-        ng1 = ng1[3:]
-    else:
-        N_arr = N_arr_t1
-
-    mr.Md(f" Number of gates of Task 1: {ng1}")
-    mr.Md(f" Number of gates of Task 2: {ng2}")
-
-    plot_comp_data_n_arr(N_arr, ng1, ng2, 'gate_count')
-    gen_settings(fig, ax, r"N (system size)", r"Task I/Task II ($t_{T1}/t_{T2}$)", False, False, True, N_arr[0]-2, N_arr[-1], True, -3, 25, "out", None)
-    # else:
-    #     print(" Re-select the options as the requested option data is not available.")
-
-# pkg_str = ['qiskit' , 'cirq', 'qsimcirq', 'pennylane', 'pennylane_l', 'qibo', 'qibojit', 'yao', 'quest', 'qulacs', 'intel_qs_cpp', 'projectq', 'svsim',  'hybridq', 'hiq', 'qcgpu', 'qrack_sch']
-
-# abs_time(pkg_str, task_1, p_com_cap, p_prec)
-# abs_time("Heisenberg dynamics", "Singlethread", "Single", 'qsimcirq')
-# abs_time_pack("Heisenberg dynamics", "Random Quantum Circuit", "Singlethread", "Single", 34)
-# abs_time_pack("Heisenberg dynamics", "Quantum Fourier Transform", "GPU", "Single", 38)
diff --git a/spaces/anzorq/sd-to-diffusers/README.md b/spaces/anzorq/sd-to-diffusers/README.md
deleted file mode 100644
index 33164e00cc1011593a8afc1b5e2d745066647677..0000000000000000000000000000000000000000
--- a/spaces/anzorq/sd-to-diffusers/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: SD To Diffusers
-emoji: 🎨➡️🧨
-colorFrom: indigo
-colorTo: blue
-sdk: gradio
-sdk_version: 3.9.1
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/aodianyun/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/swin_transformer.py b/spaces/aodianyun/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/swin_transformer.py
deleted file mode 100644
index 1c66194deb5dd370e797e57e2712f44303e568cc..0000000000000000000000000000000000000000
--- a/spaces/aodianyun/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/swin_transformer.py
+++ /dev/null
@@ -1,802 +0,0 @@
-# ------------------------------------------------------------------------
-# Grounding DINO
-# url: https://github.com/IDEA-Research/GroundingDINO
-# Copyright (c) 2023 IDEA. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------
-# DINO
-# Copyright (c) 2022 IDEA. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# --------------------------------------------------------
-# modified from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py
-# --------------------------------------------------------
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.utils.checkpoint as checkpoint
-from timm.models.layers import DropPath, to_2tuple, trunc_normal_
-
-from groundingdino.util.misc import NestedTensor
-
-
-class Mlp(nn.Module):
-    """Multilayer perceptron."""
-
-    def __init__(
-        self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0
-    ):
-        super().__init__()
-        out_features = out_features or in_features
-        hidden_features = hidden_features or in_features
-        self.fc1 = nn.Linear(in_features, hidden_features)
-        self.act = act_layer()
-        self.fc2 = nn.Linear(hidden_features, out_features)
-        self.drop = nn.Dropout(drop)
-
-    def forward(self, x):
-        x = self.fc1(x)
-        x = self.act(x)
-        x = self.drop(x)
-        x = self.fc2(x)
-        x = self.drop(x)
-        return x
-
-
-def window_partition(x, window_size):
-    """
-    Args:
-        x: (B, H, W, C)
-        window_size (int): window size
-    Returns:
-        windows: (num_windows*B, window_size, window_size, C)
-    """
-    B, H, W, C = x.shape
-    x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
-    windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
-    return windows
-
-
-def window_reverse(windows, window_size, H, W):
-    """
-    Args:
-        windows: (num_windows*B, window_size, window_size, C)
-        window_size (int): Window size
-        H (int): Height of image
-        W (int): Width of image
-    Returns:
-        x: (B, H, W, C)
-    """
-    B = int(windows.shape[0] / (H * W / window_size / window_size))
-    x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
-    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
-    return x
-
-
-class WindowAttention(nn.Module):
-    """Window based multi-head self attention (W-MSA) module with relative position bias.
-    It supports both of shifted and non-shifted window.
-    Args:
-        dim (int): Number of input channels.
-        window_size (tuple[int]): The height and width of the window.
-        num_heads (int): Number of attention heads.
-        qkv_bias (bool, optional):  If True, add a learnable bias to query, key, value. Default: True
-        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
-        attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
-        proj_drop (float, optional): Dropout ratio of output. Default: 0.0
-    """
-
-    def __init__(
-        self,
-        dim,
-        window_size,
-        num_heads,
-        qkv_bias=True,
-        qk_scale=None,
-        attn_drop=0.0,
-        proj_drop=0.0,
-    ):
-
-        super().__init__()
-        self.dim = dim
-        self.window_size = window_size  # Wh, Ww
-        self.num_heads = num_heads
-        head_dim = dim // num_heads
-        self.scale = qk_scale or head_dim**-0.5
-
-        # define a parameter table of relative position bias
-        self.relative_position_bias_table = nn.Parameter(
-            torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
-        )  # 2*Wh-1 * 2*Ww-1, nH
-
-        # get pair-wise relative position index for each token inside the window
-        coords_h = torch.arange(self.window_size[0])
-        coords_w = torch.arange(self.window_size[1])
-        coords = torch.stack(torch.meshgrid([coords_h, coords_w]))  # 2, Wh, Ww
-        coords_flatten = torch.flatten(coords, 1)  # 2, Wh*Ww
-        relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]  # 2, Wh*Ww, Wh*Ww
-        relative_coords = relative_coords.permute(1, 2, 0).contiguous()  # Wh*Ww, Wh*Ww, 2
-        relative_coords[:, :, 0] += self.window_size[0] - 1  # shift to start from 0
-        relative_coords[:, :, 1] += self.window_size[1] - 1
-        relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
-        relative_position_index = relative_coords.sum(-1)  # Wh*Ww, Wh*Ww
-        self.register_buffer("relative_position_index", relative_position_index)
-
-        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
-        self.attn_drop = nn.Dropout(attn_drop)
-        self.proj = nn.Linear(dim, dim)
-        self.proj_drop = nn.Dropout(proj_drop)
-
-        trunc_normal_(self.relative_position_bias_table, std=0.02)
-        self.softmax = nn.Softmax(dim=-1)
-
-    def forward(self, x, mask=None):
-        """Forward function.
-        Args:
-            x: input features with shape of (num_windows*B, N, C)
-            mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
-        """
-        B_, N, C = x.shape
-        qkv = (
-            self.qkv(x)
-            .reshape(B_, N, 3, self.num_heads, C // self.num_heads)
-            .permute(2, 0, 3, 1, 4)
-        )
-        q, k, v = qkv[0], qkv[1], qkv[2]  # make torchscript happy (cannot use tensor as tuple)
-
-        q = q * self.scale
-        attn = q @ k.transpose(-2, -1)
-
-        relative_position_bias = self.relative_position_bias_table[
-            self.relative_position_index.view(-1)
-        ].view(
-            self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
-        )  # Wh*Ww,Wh*Ww,nH
-        relative_position_bias = relative_position_bias.permute(
-            2, 0, 1
-        ).contiguous()  # nH, Wh*Ww, Wh*Ww
-        attn = attn + relative_position_bias.unsqueeze(0)
-
-        if mask is not None:
-            nW = mask.shape[0]
-            attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
-            attn = attn.view(-1, self.num_heads, N, N)
-            attn = self.softmax(attn)
-        else:
-            attn = self.softmax(attn)
-
-        attn = self.attn_drop(attn)
-
-        x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
-        x = self.proj(x)
-        x = self.proj_drop(x)
-        return x
-
-
-class SwinTransformerBlock(nn.Module):
-    """Swin Transformer Block.
-    Args:
-        dim (int): Number of input channels.
-        num_heads (int): Number of attention heads.
-        window_size (int): Window size.
-        shift_size (int): Shift size for SW-MSA.
-        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
-        qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
-        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
-        drop (float, optional): Dropout rate. Default: 0.0
-        attn_drop (float, optional): Attention dropout rate. Default: 0.0
-        drop_path (float, optional): Stochastic depth rate. Default: 0.0
-        act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
-        norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm
-    """
-
-    def __init__(
-        self,
-        dim,
-        num_heads,
-        window_size=7,
-        shift_size=0,
-        mlp_ratio=4.0,
-        qkv_bias=True,
-        qk_scale=None,
-        drop=0.0,
-        attn_drop=0.0,
-        drop_path=0.0,
-        act_layer=nn.GELU,
-        norm_layer=nn.LayerNorm,
-    ):
-        super().__init__()
-        self.dim = dim
-        self.num_heads = num_heads
-        self.window_size = window_size
-        self.shift_size = shift_size
-        self.mlp_ratio = mlp_ratio
-        assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
-
-        self.norm1 = norm_layer(dim)
-        self.attn = WindowAttention(
-            dim,
-            window_size=to_2tuple(self.window_size),
-            num_heads=num_heads,
-            qkv_bias=qkv_bias,
-            qk_scale=qk_scale,
-            attn_drop=attn_drop,
-            proj_drop=drop,
-        )
-
-        self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
-        self.norm2 = norm_layer(dim)
-        mlp_hidden_dim = int(dim * mlp_ratio)
-        self.mlp = Mlp(
-            in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop
-        )
-
-        self.H = None
-        self.W = None
-
-    def forward(self, x, mask_matrix):
-        """Forward function.
-        Args:
-            x: Input feature, tensor size (B, H*W, C).
-            H, W: Spatial resolution of the input feature.
-            mask_matrix: Attention mask for cyclic shift.
-        """
-        B, L, C = x.shape
-        H, W = self.H, self.W
-        assert L == H * W, "input feature has wrong size"
-
-        shortcut = x
-        x = self.norm1(x)
-        x = x.view(B, H, W, C)
-
-        # pad feature maps to multiples of window size
-        pad_l = pad_t = 0
-        pad_r = (self.window_size - W % self.window_size) % self.window_size
-        pad_b = (self.window_size - H % self.window_size) % self.window_size
-        x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
-        _, Hp, Wp, _ = x.shape
-
-        # cyclic shift
-        if self.shift_size > 0:
-            shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
-            attn_mask = mask_matrix
-        else:
-            shifted_x = x
-            attn_mask = None
-
-        # partition windows
-        x_windows = window_partition(
-            shifted_x, self.window_size
-        )  # nW*B, window_size, window_size, C
-        x_windows = x_windows.view(
-            -1, self.window_size * self.window_size, C
-        )  # nW*B, window_size*window_size, C
-
-        # W-MSA/SW-MSA
-        attn_windows = self.attn(x_windows, mask=attn_mask)  # nW*B, window_size*window_size, C
-
-        # merge windows
-        attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
-        shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp)  # B H' W' C
-
-        # reverse cyclic shift
-        if self.shift_size > 0:
-            x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
-        else:
-            x = shifted_x
-
-        if pad_r > 0 or pad_b > 0:
-            x = x[:, :H, :W, :].contiguous()
-
-        x = x.view(B, H * W, C)
-
-        # FFN
-        x = shortcut + self.drop_path(x)
-        x = x + self.drop_path(self.mlp(self.norm2(x)))
-
-        return x
-
-
-class PatchMerging(nn.Module):
-    """Patch Merging Layer
-    Args:
-        dim (int): Number of input channels.
-        norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm
-    """
-
-    def __init__(self, dim, norm_layer=nn.LayerNorm):
-        super().__init__()
-        self.dim = dim
-        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
-        self.norm = norm_layer(4 * dim)
-
-    def forward(self, x, H, W):
-        """Forward function.
-        Args:
-            x: Input feature, tensor size (B, H*W, C).
-            H, W: Spatial resolution of the input feature.
-        """
-        B, L, C = x.shape
-        assert L == H * W, "input feature has wrong size"
-
-        x = x.view(B, H, W, C)
-
-        # padding
-        pad_input = (H % 2 == 1) or (W % 2 == 1)
-        if pad_input:
-            x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
-
-        x0 = x[:, 0::2, 0::2, :]  # B H/2 W/2 C
-        x1 = x[:, 1::2, 0::2, :]  # B H/2 W/2 C
-        x2 = x[:, 0::2, 1::2, :]  # B H/2 W/2 C
-        x3 = x[:, 1::2, 1::2, :]  # B H/2 W/2 C
-        x = torch.cat([x0, x1, x2, x3], -1)  # B H/2 W/2 4*C
-        x = x.view(B, -1, 4 * C)  # B H/2*W/2 4*C
-
-        x = self.norm(x)
-        x = self.reduction(x)
-
-        return x
-
-
-class BasicLayer(nn.Module):
-    """A basic Swin Transformer layer for one stage.
-    Args:
-        dim (int): Number of feature channels
-        depth (int): Depths of this stage.
-        num_heads (int): Number of attention head.
-        window_size (int): Local window size. Default: 7.
-        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
-        qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
-        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
-        drop (float, optional): Dropout rate. Default: 0.0
-        attn_drop (float, optional): Attention dropout rate. Default: 0.0
-        drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
-        norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
-        downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
-        use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
-    """
-
-    def __init__(
-        self,
-        dim,
-        depth,
-        num_heads,
-        window_size=7,
-        mlp_ratio=4.0,
-        qkv_bias=True,
-        qk_scale=None,
-        drop=0.0,
-        attn_drop=0.0,
-        drop_path=0.0,
-        norm_layer=nn.LayerNorm,
-        downsample=None,
-        use_checkpoint=False,
-    ):
-        super().__init__()
-        self.window_size = window_size
-        self.shift_size = window_size // 2
-        self.depth = depth
-        self.use_checkpoint = use_checkpoint
-
-        # build blocks
-        self.blocks = nn.ModuleList(
-            [
-                SwinTransformerBlock(
-                    dim=dim,
-                    num_heads=num_heads,
-                    window_size=window_size,
-                    shift_size=0 if (i % 2 == 0) else window_size // 2,
-                    mlp_ratio=mlp_ratio,
-                    qkv_bias=qkv_bias,
-                    qk_scale=qk_scale,
-                    drop=drop,
-                    attn_drop=attn_drop,
-                    drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
-                    norm_layer=norm_layer,
-                )
-                for i in range(depth)
-            ]
-        )
-
-        # patch merging layer
-        if downsample is not None:
-            self.downsample = downsample(dim=dim, norm_layer=norm_layer)
-        else:
-            self.downsample = None
-
-    def forward(self, x, H, W):
-        """Forward function.
-        Args:
-            x: Input feature, tensor size (B, H*W, C).
-            H, W: Spatial resolution of the input feature.
-        """
-
-        # calculate attention mask for SW-MSA
-        Hp = int(np.ceil(H / self.window_size)) * self.window_size
-        Wp = int(np.ceil(W / self.window_size)) * self.window_size
-        img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device)  # 1 Hp Wp 1
-        h_slices = (
-            slice(0, -self.window_size),
-            slice(-self.window_size, -self.shift_size),
-            slice(-self.shift_size, None),
-        )
-        w_slices = (
-            slice(0, -self.window_size),
-            slice(-self.window_size, -self.shift_size),
-            slice(-self.shift_size, None),
-        )
-        cnt = 0
-        for h in h_slices:
-            for w in w_slices:
-                img_mask[:, h, w, :] = cnt
-                cnt += 1
-
-        mask_windows = window_partition(
-            img_mask, self.window_size
-        )  # nW, window_size, window_size, 1
-        mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
-        attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
-        attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
-            attn_mask == 0, float(0.0)
-        )
-
-        for blk in self.blocks:
-            blk.H, blk.W = H, W
-            if self.use_checkpoint:
-                x = checkpoint.checkpoint(blk, x, attn_mask)
-            else:
-                x = blk(x, attn_mask)
-        if self.downsample is not None:
-            x_down = self.downsample(x, H, W)
-            Wh, Ww = (H + 1) // 2, (W + 1) // 2
-            return x, H, W, x_down, Wh, Ww
-        else:
-            return x, H, W, x, H, W
-
-
-class PatchEmbed(nn.Module):
-    """Image to Patch Embedding
-    Args:
-        patch_size (int): Patch token size. Default: 4.
-        in_chans (int): Number of input image channels. Default: 3.
-        embed_dim (int): Number of linear projection output channels. Default: 96.
-        norm_layer (nn.Module, optional): Normalization layer. Default: None
-    """
-
-    def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
-        super().__init__()
-        patch_size = to_2tuple(patch_size)
-        self.patch_size = patch_size
-
-        self.in_chans = in_chans
-        self.embed_dim = embed_dim
-
-        self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
-        if norm_layer is not None:
-            self.norm = norm_layer(embed_dim)
-        else:
-            self.norm = None
-
-    def forward(self, x):
-        """Forward function."""
-        # padding
-        _, _, H, W = x.size()
-        if W % self.patch_size[1] != 0:
-            x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
-        if H % self.patch_size[0] != 0:
-            x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
-
-        x = self.proj(x)  # B C Wh Ww
-        if self.norm is not None:
-            Wh, Ww = x.size(2), x.size(3)
-            x = x.flatten(2).transpose(1, 2)
-            x = self.norm(x)
-            x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
-
-        return x
-
-
-class SwinTransformer(nn.Module):
-    """Swin Transformer backbone.
-        A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`  -
-          https://arxiv.org/pdf/2103.14030
-    Args:
-        pretrain_img_size (int): Input image size for training the pretrained model,
-            used in absolute postion embedding. Default 224.
-        patch_size (int | tuple(int)): Patch size. Default: 4.
-        in_chans (int): Number of input image channels. Default: 3.
-        embed_dim (int): Number of linear projection output channels. Default: 96.
-        depths (tuple[int]): Depths of each Swin Transformer stage.
-        num_heads (tuple[int]): Number of attention head of each stage.
-        window_size (int): Window size. Default: 7.
-        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
-        qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
-        qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
-        drop_rate (float): Dropout rate.
-        attn_drop_rate (float): Attention dropout rate. Default: 0.
-        drop_path_rate (float): Stochastic depth rate. Default: 0.2.
-        norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
-        ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
-        patch_norm (bool): If True, add normalization after patch embedding. Default: True.
-        out_indices (Sequence[int]): Output from which stages.
-        frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-            -1 means not freezing any parameters.
-        use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
-        dilation (bool): if True, the output size if 16x downsample, ow 32x downsample.
-    """
-
-    def __init__(
-        self,
-        pretrain_img_size=224,
-        patch_size=4,
-        in_chans=3,
-        embed_dim=96,
-        depths=[2, 2, 6, 2],
-        num_heads=[3, 6, 12, 24],
-        window_size=7,
-        mlp_ratio=4.0,
-        qkv_bias=True,
-        qk_scale=None,
-        drop_rate=0.0,
-        attn_drop_rate=0.0,
-        drop_path_rate=0.2,
-        norm_layer=nn.LayerNorm,
-        ape=False,
-        patch_norm=True,
-        out_indices=(0, 1, 2, 3),
-        frozen_stages=-1,
-        dilation=False,
-        use_checkpoint=False,
-    ):
-        super().__init__()
-
-        self.pretrain_img_size = pretrain_img_size
-        self.num_layers = len(depths)
-        self.embed_dim = embed_dim
-        self.ape = ape
-        self.patch_norm = patch_norm
-        self.out_indices = out_indices
-        self.frozen_stages = frozen_stages
-        self.dilation = dilation
-
-        # if use_checkpoint:
-        #     print("use_checkpoint!!!!!!!!!!!!!!!!!!!!!!!!")
-
-        # split image into non-overlapping patches
-        self.patch_embed = PatchEmbed(
-            patch_size=patch_size,
-            in_chans=in_chans,
-            embed_dim=embed_dim,
-            norm_layer=norm_layer if self.patch_norm else None,
-        )
-
-        # absolute position embedding
-        if self.ape:
-            pretrain_img_size = to_2tuple(pretrain_img_size)
-            patch_size = to_2tuple(patch_size)
-            patches_resolution = [
-                pretrain_img_size[0] // patch_size[0],
-                pretrain_img_size[1] // patch_size[1],
-            ]
-
-            self.absolute_pos_embed = nn.Parameter(
-                torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])
-            )
-            trunc_normal_(self.absolute_pos_embed, std=0.02)
-
-        self.pos_drop = nn.Dropout(p=drop_rate)
-
-        # stochastic depth
-        dpr = [
-            x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
-        ]  # stochastic depth decay rule
-
-        # build layers
-        self.layers = nn.ModuleList()
-        # prepare downsample list
-        downsamplelist = [PatchMerging for i in range(self.num_layers)]
-        downsamplelist[-1] = None
-        num_features = [int(embed_dim * 2**i) for i in range(self.num_layers)]
-        if self.dilation:
-            downsamplelist[-2] = None
-            num_features[-1] = int(embed_dim * 2 ** (self.num_layers - 1)) // 2
-        for i_layer in range(self.num_layers):
-            layer = BasicLayer(
-                # dim=int(embed_dim * 2 ** i_layer),
-                dim=num_features[i_layer],
-                depth=depths[i_layer],
-                num_heads=num_heads[i_layer],
-                window_size=window_size,
-                mlp_ratio=mlp_ratio,
-                qkv_bias=qkv_bias,
-                qk_scale=qk_scale,
-                drop=drop_rate,
-                attn_drop=attn_drop_rate,
-                drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
-                norm_layer=norm_layer,
-                # downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
-                downsample=downsamplelist[i_layer],
-                use_checkpoint=use_checkpoint,
-            )
-            self.layers.append(layer)
-
-        # num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
-        self.num_features = num_features
-
-        # add a norm layer for each output
-        for i_layer in out_indices:
-            layer = norm_layer(num_features[i_layer])
-            layer_name = f"norm{i_layer}"
-            self.add_module(layer_name, layer)
-
-        self._freeze_stages()
-
-    def _freeze_stages(self):
-        if self.frozen_stages >= 0:
-            self.patch_embed.eval()
-            for param in self.patch_embed.parameters():
-                param.requires_grad = False
-
-        if self.frozen_stages >= 1 and self.ape:
-            self.absolute_pos_embed.requires_grad = False
-
-        if self.frozen_stages >= 2:
-            self.pos_drop.eval()
-            for i in range(0, self.frozen_stages - 1):
-                m = self.layers[i]
-                m.eval()
-                for param in m.parameters():
-                    param.requires_grad = False
-
-    # def init_weights(self, pretrained=None):
-    #     """Initialize the weights in backbone.
-    #     Args:
-    #         pretrained (str, optional): Path to pre-trained weights.
-    #             Defaults to None.
-    #     """
-
-    #     def _init_weights(m):
-    #         if isinstance(m, nn.Linear):
-    #             trunc_normal_(m.weight, std=.02)
-    #             if isinstance(m, nn.Linear) and m.bias is not None:
-    #                 nn.init.constant_(m.bias, 0)
-    #         elif isinstance(m, nn.LayerNorm):
-    #             nn.init.constant_(m.bias, 0)
-    #             nn.init.constant_(m.weight, 1.0)
-
-    #     if isinstance(pretrained, str):
-    #         self.apply(_init_weights)
-    #         logger = get_root_logger()
-    #         load_checkpoint(self, pretrained, strict=False, logger=logger)
-    #     elif pretrained is None:
-    #         self.apply(_init_weights)
-    #     else:
-    #         raise TypeError('pretrained must be a str or None')
-
-    def forward_raw(self, x):
-        """Forward function."""
-        x = self.patch_embed(x)
-
-        Wh, Ww = x.size(2), x.size(3)
-        if self.ape:
-            # interpolate the position embedding to the corresponding size
-            absolute_pos_embed = F.interpolate(
-                self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic"
-            )
-            x = (x + absolute_pos_embed).flatten(2).transpose(1, 2)  # B Wh*Ww C
-        else:
-            x = x.flatten(2).transpose(1, 2)
-        x = self.pos_drop(x)
-
-        outs = []
-        for i in range(self.num_layers):
-            layer = self.layers[i]
-            x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
-            # import ipdb; ipdb.set_trace()
-
-            if i in self.out_indices:
-                norm_layer = getattr(self, f"norm{i}")
-                x_out = norm_layer(x_out)
-
-                out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
-                outs.append(out)
-        # in:
-        #   torch.Size([2, 3, 1024, 1024])
-        # outs:
-        #   [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \
-        #       torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])]
-        return tuple(outs)
-
-    def forward(self, tensor_list: NestedTensor):
-        x = tensor_list.tensors
-
-        """Forward function."""
-        x = self.patch_embed(x)
-
-        Wh, Ww = x.size(2), x.size(3)
-        if self.ape:
-            # interpolate the position embedding to the corresponding size
-            absolute_pos_embed = F.interpolate(
-                self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic"
-            )
-            x = (x + absolute_pos_embed).flatten(2).transpose(1, 2)  # B Wh*Ww C
-        else:
-            x = x.flatten(2).transpose(1, 2)
-        x = self.pos_drop(x)
-
-        outs = []
-        for i in range(self.num_layers):
-            layer = self.layers[i]
-            x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
-
-            if i in self.out_indices:
-                norm_layer = getattr(self, f"norm{i}")
-                x_out = norm_layer(x_out)
-
-                out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
-                outs.append(out)
-        # in:
-        #   torch.Size([2, 3, 1024, 1024])
-        # out:
-        #   [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \
-        #       torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])]
-
-        # collect for nesttensors
-        outs_dict = {}
-        for idx, out_i in enumerate(outs):
-            m = tensor_list.mask
-            assert m is not None
-            mask = F.interpolate(m[None].float(), size=out_i.shape[-2:]).to(torch.bool)[0]
-            outs_dict[idx] = NestedTensor(out_i, mask)
-
-        return outs_dict
-
-    def train(self, mode=True):
-        """Convert the model into training mode while keep layers freezed."""
-        super(SwinTransformer, self).train(mode)
-        self._freeze_stages()
-
-
-def build_swin_transformer(modelname, pretrain_img_size, **kw):
-    assert modelname in [
-        "swin_T_224_1k",
-        "swin_B_224_22k",
-        "swin_B_384_22k",
-        "swin_L_224_22k",
-        "swin_L_384_22k",
-    ]
-
-    model_para_dict = {
-        "swin_T_224_1k": dict(
-            embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7
-        ),
-        "swin_B_224_22k": dict(
-            embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=7
-        ),
-        "swin_B_384_22k": dict(
-            embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12
-        ),
-        "swin_L_224_22k": dict(
-            embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=7
-        ),
-        "swin_L_384_22k": dict(
-            embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12
-        ),
-    }
-    kw_cgf = model_para_dict[modelname]
-    kw_cgf.update(kw)
-    model = SwinTransformer(pretrain_img_size=pretrain_img_size, **kw_cgf)
-    return model
-
-
-if __name__ == "__main__":
-    model = build_swin_transformer("swin_L_384_22k", 384, dilation=True)
-    x = torch.rand(2, 3, 1024, 1024)
-    y = model.forward_raw(x)
-    import ipdb
-
-    ipdb.set_trace()
-    x = torch.rand(2, 3, 384, 384)
-    y = model.forward_raw(x)
diff --git a/spaces/aqlanhadi/qr-art/README.md b/spaces/aqlanhadi/qr-art/README.md
deleted file mode 100644
index 92b7269e4defb6cb7d852a8c3b6d040d2a76e244..0000000000000000000000000000000000000000
--- a/spaces/aqlanhadi/qr-art/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: QR Code AI Art Generator
-emoji: 📱🔲
-colorFrom: MediumSeaGreen
-colorTo: CornflowerBlue
-sdk: gradio
-sdk_version: 3.35.2
-app_file: app.py
-pinned: false
-suggested_hardware: t4-medium
-startup_duration_timeout: 1h
----
diff --git a/spaces/arthurdias/Webui-Cpu-ExtensionV2-Publictest-WithCivitaiHelper/README.md b/spaces/arthurdias/Webui-Cpu-ExtensionV2-Publictest-WithCivitaiHelper/README.md
deleted file mode 100644
index c0fc822771658781188165ca66c9fd721af92e2a..0000000000000000000000000000000000000000
--- a/spaces/arthurdias/Webui-Cpu-ExtensionV2-Publictest-WithCivitaiHelper/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Webui-Cpu-ExtensionV2-Publictest-WithCivitaiHelper
-emoji: 🌍
-colorFrom: green
-colorTo: blue
-sdk: gradio
-sdk_version: 3.9
-app_file: app.py
-pinned: true
-duplicated_from: Osmond141319/Webui-Cpu-ExtensionV2-Publictest-WithCivitaiHelper
----
diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/vocoder_tests/test_hifigan_train.py b/spaces/artificialguybr/video-dubbing/TTS/tests/vocoder_tests/test_hifigan_train.py
deleted file mode 100644
index c506fb48dca4dd71eb439489e0af5275b565a8a1..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/TTS/tests/vocoder_tests/test_hifigan_train.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import glob
-import os
-import shutil
-
-from tests import get_device_id, get_tests_output_path, run_cli
-from TTS.vocoder.configs import HifiganConfig
-
-config_path = os.path.join(get_tests_output_path(), "test_vocoder_config.json")
-output_path = os.path.join(get_tests_output_path(), "train_outputs")
-
-
-config = HifiganConfig(
-    batch_size=8,
-    eval_batch_size=8,
-    num_loader_workers=0,
-    num_eval_loader_workers=0,
-    run_eval=True,
-    test_delay_epochs=-1,
-    epochs=1,
-    seq_len=1024,
-    eval_split_size=1,
-    print_step=1,
-    print_eval=True,
-    data_path="tests/data/ljspeech",
-    output_path=output_path,
-)
-config.audio.do_trim_silence = True
-config.audio.trim_db = 60
-config.save_json(config_path)
-
-# train the model for one epoch
-command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --config_path {config_path} "
-run_cli(command_train)
-
-# Find latest folder
-continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime)
-
-# restore the model and continue training for one more epoch
-command_train = (
-    f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --continue_path {continue_path} "
-)
-run_cli(command_train)
-shutil.rmtree(continue_path)
diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/xtts_tests/test_xtts_gpt_train.py b/spaces/artificialguybr/video-dubbing/TTS/tests/xtts_tests/test_xtts_gpt_train.py
deleted file mode 100644
index 12c547d684b2aebdf45933512fae637be702f4b1..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/TTS/tests/xtts_tests/test_xtts_gpt_train.py
+++ /dev/null
@@ -1,161 +0,0 @@
-import os
-import shutil
-
-import torch
-from trainer import Trainer, TrainerArgs
-
-from tests import get_tests_output_path
-from TTS.config.shared_configs import BaseDatasetConfig
-from TTS.tts.datasets import load_tts_samples
-from TTS.tts.layers.xtts.dvae import DiscreteVAE
-from TTS.tts.layers.xtts.trainer.gpt_trainer import GPTArgs, GPTTrainer, GPTTrainerConfig, XttsAudioConfig
-
-config_dataset = BaseDatasetConfig(
-    formatter="ljspeech",
-    dataset_name="ljspeech",
-    path="tests/data/ljspeech/",
-    meta_file_train="metadata.csv",
-    meta_file_val="metadata.csv",
-    language="en",
-)
-
-DATASETS_CONFIG_LIST = [config_dataset]
-
-# Logging parameters
-RUN_NAME = "GPT_XTTS_LJSpeech_FT"
-PROJECT_NAME = "XTTS_trainer"
-DASHBOARD_LOGGER = "tensorboard"
-LOGGER_URI = None
-
-# Set here the path that the checkpoints will be saved. Default: ./run/training/
-OUT_PATH = os.path.join(get_tests_output_path(), "train_outputs", "xtts_tests")
-os.makedirs(OUT_PATH, exist_ok=True)
-
-# Create DVAE checkpoint and mel_norms on test time
-# DVAE parameters: For the training we need the dvae to extract the dvae tokens, given that you must provide the paths for this model
-DVAE_CHECKPOINT = os.path.join(OUT_PATH, "dvae.pth")  # DVAE checkpoint
-MEL_NORM_FILE = os.path.join(
-    OUT_PATH, "mel_stats.pth"
-)  # Mel spectrogram norms, required for dvae mel spectrogram extraction
-dvae = DiscreteVAE(
-    channels=80,
-    normalization=None,
-    positional_dims=1,
-    num_tokens=8192,
-    codebook_dim=512,
-    hidden_dim=512,
-    num_resnet_blocks=3,
-    kernel_size=3,
-    num_layers=2,
-    use_transposed_convs=False,
-)
-torch.save(dvae.state_dict(), DVAE_CHECKPOINT)
-mel_stats = torch.ones(80)
-torch.save(mel_stats, MEL_NORM_FILE)
-
-
-# XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning.
-TOKENIZER_FILE = "tests/inputs/xtts_vocab.json"  # vocab.json file
-XTTS_CHECKPOINT = None  # "/raid/edresson/dev/Checkpoints/XTTS_evaluation/xtts_style_emb_repetition_fix_gt/132500_gpt_ema_coqui_tts_with_enhanced_hifigan.pth"  # model.pth file
-
-
-# Training sentences generations
-SPEAKER_REFERENCE = ["tests/data/ljspeech/wavs/LJ001-0002.wav"]  # speaker reference to be used in training test sentences
-LANGUAGE = config_dataset.language
-
-
-# Training Parameters
-OPTIMIZER_WD_ONLY_ON_WEIGHTS = True  # for multi-gpu training please make it False
-START_WITH_EVAL = False  # if True it will star with evaluation
-BATCH_SIZE = 2  # set here the batch size
-GRAD_ACUMM_STEPS = 1  # set here the grad accumulation steps
-# Note: we recommend that BATCH_SIZE * GRAD_ACUMM_STEPS need to be at least 252 for more efficient training. You can increase/decrease BATCH_SIZE but then set GRAD_ACUMM_STEPS accordingly.
-
-
-# init args and config
-model_args = GPTArgs(
-    max_conditioning_length=132300,  # 6 secs
-    min_conditioning_length=66150,  # 3 secs
-    debug_loading_failures=False,
-    max_wav_length=255995,  # ~11.6 seconds
-    max_text_length=200,
-    mel_norm_file=MEL_NORM_FILE,
-    dvae_checkpoint=DVAE_CHECKPOINT,
-    xtts_checkpoint=XTTS_CHECKPOINT,  # checkpoint path of the model that you want to fine-tune
-    tokenizer_file=TOKENIZER_FILE,
-    gpt_num_audio_tokens=8194,
-    gpt_start_audio_token=8192,
-    gpt_stop_audio_token=8193,
-)
-audio_config = XttsAudioConfig(sample_rate=22050, dvae_sample_rate=22050, output_sample_rate=24000)
-config = GPTTrainerConfig(
-    epochs=1,
-    output_path=OUT_PATH,
-    model_args=model_args,
-    run_name=RUN_NAME,
-    project_name=PROJECT_NAME,
-    run_description="""
-        GPT XTTS training
-        """,
-    dashboard_logger=DASHBOARD_LOGGER,
-    logger_uri=LOGGER_URI,
-    audio=audio_config,
-    batch_size=BATCH_SIZE,
-    batch_group_size=48,
-    eval_batch_size=BATCH_SIZE,
-    num_loader_workers=8,
-    eval_split_max_size=256,
-    print_step=50,
-    plot_step=100,
-    log_model_step=1000,
-    save_step=10000,
-    save_n_checkpoints=1,
-    save_checkpoints=True,
-    # target_loss="loss",
-    print_eval=False,
-    # Optimizer values like tortoise, pytorch implementation with modifications to not apply WD to non-weight parameters.
-    optimizer="AdamW",
-    optimizer_wd_only_on_weights=OPTIMIZER_WD_ONLY_ON_WEIGHTS,
-    optimizer_params={"betas": [0.9, 0.96], "eps": 1e-8, "weight_decay": 1e-2},
-    lr=5e-06,  # learning rate
-    lr_scheduler="MultiStepLR",
-    # it was adjusted accordly for the new step scheme
-    lr_scheduler_params={"milestones": [50000 * 18, 150000 * 18, 300000 * 18], "gamma": 0.5, "last_epoch": -1},
-    test_sentences=[
-        {
-            "text": "This cake is great. It's so delicious and moist.",
-            "speaker_wav": SPEAKER_REFERENCE,
-            "language": LANGUAGE,
-        },
-    ],
-)
-
-# init the model from config
-model = GPTTrainer.init_from_config(config)
-
-# load training samples
-train_samples, eval_samples = load_tts_samples(
-    DATASETS_CONFIG_LIST,
-    eval_split=True,
-    eval_split_max_size=config.eval_split_max_size,
-    eval_split_size=config.eval_split_size,
-)
-
-# init the trainer and 🚀
-trainer = Trainer(
-    TrainerArgs(
-        restore_path=None,  # xtts checkpoint is restored via xtts_checkpoint key so no need of restore it using Trainer restore_path parameter
-        skip_train_epoch=False,
-        start_with_eval=True,
-        grad_accum_steps=GRAD_ACUMM_STEPS,
-    ),
-    config,
-    output_path=OUT_PATH,
-    model=model,
-    train_samples=train_samples,
-    eval_samples=eval_samples,
-)
-trainer.fit()
-
-# remove output path
-shutil.rmtree(OUT_PATH)
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/web_ws.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/web_ws.py
deleted file mode 100644
index 0d32a218b52b87ec04f36a6f95bfb303984b2e43..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/web_ws.py
+++ /dev/null
@@ -1,487 +0,0 @@
-import asyncio
-import base64
-import binascii
-import hashlib
-import json
-from typing import Any, Iterable, Optional, Tuple, cast
-
-import async_timeout
-import attr
-from multidict import CIMultiDict
-
-from . import hdrs
-from .abc import AbstractStreamWriter
-from .helpers import call_later, set_result
-from .http import (
-    WS_CLOSED_MESSAGE,
-    WS_CLOSING_MESSAGE,
-    WS_KEY,
-    WebSocketError,
-    WebSocketReader,
-    WebSocketWriter,
-    WSCloseCode,
-    WSMessage,
-    WSMsgType as WSMsgType,
-    ws_ext_gen,
-    ws_ext_parse,
-)
-from .log import ws_logger
-from .streams import EofStream, FlowControlDataQueue
-from .typedefs import Final, JSONDecoder, JSONEncoder
-from .web_exceptions import HTTPBadRequest, HTTPException
-from .web_request import BaseRequest
-from .web_response import StreamResponse
-
-__all__ = (
-    "WebSocketResponse",
-    "WebSocketReady",
-    "WSMsgType",
-)
-
-THRESHOLD_CONNLOST_ACCESS: Final[int] = 5
-
-
-@attr.s(auto_attribs=True, frozen=True, slots=True)
-class WebSocketReady:
-    ok: bool
-    protocol: Optional[str]
-
-    def __bool__(self) -> bool:
-        return self.ok
-
-
-class WebSocketResponse(StreamResponse):
-
-    _length_check = False
-
-    def __init__(
-        self,
-        *,
-        timeout: float = 10.0,
-        receive_timeout: Optional[float] = None,
-        autoclose: bool = True,
-        autoping: bool = True,
-        heartbeat: Optional[float] = None,
-        protocols: Iterable[str] = (),
-        compress: bool = True,
-        max_msg_size: int = 4 * 1024 * 1024,
-    ) -> None:
-        super().__init__(status=101)
-        self._protocols = protocols
-        self._ws_protocol: Optional[str] = None
-        self._writer: Optional[WebSocketWriter] = None
-        self._reader: Optional[FlowControlDataQueue[WSMessage]] = None
-        self._closed = False
-        self._closing = False
-        self._conn_lost = 0
-        self._close_code: Optional[int] = None
-        self._loop: Optional[asyncio.AbstractEventLoop] = None
-        self._waiting: Optional[asyncio.Future[bool]] = None
-        self._exception: Optional[BaseException] = None
-        self._timeout = timeout
-        self._receive_timeout = receive_timeout
-        self._autoclose = autoclose
-        self._autoping = autoping
-        self._heartbeat = heartbeat
-        self._heartbeat_cb: Optional[asyncio.TimerHandle] = None
-        if heartbeat is not None:
-            self._pong_heartbeat = heartbeat / 2.0
-        self._pong_response_cb: Optional[asyncio.TimerHandle] = None
-        self._compress = compress
-        self._max_msg_size = max_msg_size
-
-    def _cancel_heartbeat(self) -> None:
-        if self._pong_response_cb is not None:
-            self._pong_response_cb.cancel()
-            self._pong_response_cb = None
-
-        if self._heartbeat_cb is not None:
-            self._heartbeat_cb.cancel()
-            self._heartbeat_cb = None
-
-    def _reset_heartbeat(self) -> None:
-        self._cancel_heartbeat()
-
-        if self._heartbeat is not None:
-            assert self._loop is not None
-            self._heartbeat_cb = call_later(
-                self._send_heartbeat, self._heartbeat, self._loop
-            )
-
-    def _send_heartbeat(self) -> None:
-        if self._heartbeat is not None and not self._closed:
-            assert self._loop is not None
-            # fire-and-forget a task is not perfect but maybe ok for
-            # sending ping. Otherwise we need a long-living heartbeat
-            # task in the class.
-            self._loop.create_task(self._writer.ping())  # type: ignore[union-attr]
-
-            if self._pong_response_cb is not None:
-                self._pong_response_cb.cancel()
-            self._pong_response_cb = call_later(
-                self._pong_not_received, self._pong_heartbeat, self._loop
-            )
-
-    def _pong_not_received(self) -> None:
-        if self._req is not None and self._req.transport is not None:
-            self._closed = True
-            self._close_code = WSCloseCode.ABNORMAL_CLOSURE
-            self._exception = asyncio.TimeoutError()
-            self._req.transport.close()
-
-    async def prepare(self, request: BaseRequest) -> AbstractStreamWriter:
-        # make pre-check to don't hide it by do_handshake() exceptions
-        if self._payload_writer is not None:
-            return self._payload_writer
-
-        protocol, writer = self._pre_start(request)
-        payload_writer = await super().prepare(request)
-        assert payload_writer is not None
-        self._post_start(request, protocol, writer)
-        await payload_writer.drain()
-        return payload_writer
-
-    def _handshake(
-        self, request: BaseRequest
-    ) -> Tuple["CIMultiDict[str]", str, bool, bool]:
-        headers = request.headers
-        if "websocket" != headers.get(hdrs.UPGRADE, "").lower().strip():
-            raise HTTPBadRequest(
-                text=(
-                    "No WebSocket UPGRADE hdr: {}\n Can "
-                    '"Upgrade" only to "WebSocket".'
-                ).format(headers.get(hdrs.UPGRADE))
-            )
-
-        if "upgrade" not in headers.get(hdrs.CONNECTION, "").lower():
-            raise HTTPBadRequest(
-                text="No CONNECTION upgrade hdr: {}".format(
-                    headers.get(hdrs.CONNECTION)
-                )
-            )
-
-        # find common sub-protocol between client and server
-        protocol = None
-        if hdrs.SEC_WEBSOCKET_PROTOCOL in headers:
-            req_protocols = [
-                str(proto.strip())
-                for proto in headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",")
-            ]
-
-            for proto in req_protocols:
-                if proto in self._protocols:
-                    protocol = proto
-                    break
-            else:
-                # No overlap found: Return no protocol as per spec
-                ws_logger.warning(
-                    "Client protocols %r don’t overlap server-known ones %r",
-                    req_protocols,
-                    self._protocols,
-                )
-
-        # check supported version
-        version = headers.get(hdrs.SEC_WEBSOCKET_VERSION, "")
-        if version not in ("13", "8", "7"):
-            raise HTTPBadRequest(text=f"Unsupported version: {version}")
-
-        # check client handshake for validity
-        key = headers.get(hdrs.SEC_WEBSOCKET_KEY)
-        try:
-            if not key or len(base64.b64decode(key)) != 16:
-                raise HTTPBadRequest(text=f"Handshake error: {key!r}")
-        except binascii.Error:
-            raise HTTPBadRequest(text=f"Handshake error: {key!r}") from None
-
-        accept_val = base64.b64encode(
-            hashlib.sha1(key.encode() + WS_KEY).digest()
-        ).decode()
-        response_headers = CIMultiDict(
-            {
-                hdrs.UPGRADE: "websocket",
-                hdrs.CONNECTION: "upgrade",
-                hdrs.SEC_WEBSOCKET_ACCEPT: accept_val,
-            }
-        )
-
-        notakeover = False
-        compress = 0
-        if self._compress:
-            extensions = headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS)
-            # Server side always get return with no exception.
-            # If something happened, just drop compress extension
-            compress, notakeover = ws_ext_parse(extensions, isserver=True)
-            if compress:
-                enabledext = ws_ext_gen(
-                    compress=compress, isserver=True, server_notakeover=notakeover
-                )
-                response_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = enabledext
-
-        if protocol:
-            response_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = protocol
-        return (
-            response_headers,
-            protocol,
-            compress,
-            notakeover,
-        )  # type: ignore[return-value]
-
-    def _pre_start(self, request: BaseRequest) -> Tuple[str, WebSocketWriter]:
-        self._loop = request._loop
-
-        headers, protocol, compress, notakeover = self._handshake(request)
-
-        self.set_status(101)
-        self.headers.update(headers)
-        self.force_close()
-        self._compress = compress
-        transport = request._protocol.transport
-        assert transport is not None
-        writer = WebSocketWriter(
-            request._protocol, transport, compress=compress, notakeover=notakeover
-        )
-
-        return protocol, writer
-
-    def _post_start(
-        self, request: BaseRequest, protocol: str, writer: WebSocketWriter
-    ) -> None:
-        self._ws_protocol = protocol
-        self._writer = writer
-
-        self._reset_heartbeat()
-
-        loop = self._loop
-        assert loop is not None
-        self._reader = FlowControlDataQueue(request._protocol, 2**16, loop=loop)
-        request.protocol.set_parser(
-            WebSocketReader(self._reader, self._max_msg_size, compress=self._compress)
-        )
-        # disable HTTP keepalive for WebSocket
-        request.protocol.keep_alive(False)
-
-    def can_prepare(self, request: BaseRequest) -> WebSocketReady:
-        if self._writer is not None:
-            raise RuntimeError("Already started")
-        try:
-            _, protocol, _, _ = self._handshake(request)
-        except HTTPException:
-            return WebSocketReady(False, None)
-        else:
-            return WebSocketReady(True, protocol)
-
-    @property
-    def closed(self) -> bool:
-        return self._closed
-
-    @property
-    def close_code(self) -> Optional[int]:
-        return self._close_code
-
-    @property
-    def ws_protocol(self) -> Optional[str]:
-        return self._ws_protocol
-
-    @property
-    def compress(self) -> bool:
-        return self._compress
-
-    def exception(self) -> Optional[BaseException]:
-        return self._exception
-
-    async def ping(self, message: bytes = b"") -> None:
-        if self._writer is None:
-            raise RuntimeError("Call .prepare() first")
-        await self._writer.ping(message)
-
-    async def pong(self, message: bytes = b"") -> None:
-        # unsolicited pong
-        if self._writer is None:
-            raise RuntimeError("Call .prepare() first")
-        await self._writer.pong(message)
-
-    async def send_str(self, data: str, compress: Optional[bool] = None) -> None:
-        if self._writer is None:
-            raise RuntimeError("Call .prepare() first")
-        if not isinstance(data, str):
-            raise TypeError("data argument must be str (%r)" % type(data))
-        await self._writer.send(data, binary=False, compress=compress)
-
-    async def send_bytes(self, data: bytes, compress: Optional[bool] = None) -> None:
-        if self._writer is None:
-            raise RuntimeError("Call .prepare() first")
-        if not isinstance(data, (bytes, bytearray, memoryview)):
-            raise TypeError("data argument must be byte-ish (%r)" % type(data))
-        await self._writer.send(data, binary=True, compress=compress)
-
-    async def send_json(
-        self,
-        data: Any,
-        compress: Optional[bool] = None,
-        *,
-        dumps: JSONEncoder = json.dumps,
-    ) -> None:
-        await self.send_str(dumps(data), compress=compress)
-
-    async def write_eof(self) -> None:  # type: ignore[override]
-        if self._eof_sent:
-            return
-        if self._payload_writer is None:
-            raise RuntimeError("Response has not been started")
-
-        await self.close()
-        self._eof_sent = True
-
-    async def close(self, *, code: int = WSCloseCode.OK, message: bytes = b"") -> bool:
-        if self._writer is None:
-            raise RuntimeError("Call .prepare() first")
-
-        self._cancel_heartbeat()
-        reader = self._reader
-        assert reader is not None
-
-        # we need to break `receive()` cycle first,
-        # `close()` may be called from different task
-        if self._waiting is not None and not self._closed:
-            reader.feed_data(WS_CLOSING_MESSAGE, 0)
-            await self._waiting
-
-        if not self._closed:
-            self._closed = True
-            try:
-                await self._writer.close(code, message)
-                writer = self._payload_writer
-                assert writer is not None
-                await writer.drain()
-            except (asyncio.CancelledError, asyncio.TimeoutError):
-                self._close_code = WSCloseCode.ABNORMAL_CLOSURE
-                raise
-            except Exception as exc:
-                self._close_code = WSCloseCode.ABNORMAL_CLOSURE
-                self._exception = exc
-                return True
-
-            if self._closing:
-                return True
-
-            reader = self._reader
-            assert reader is not None
-            try:
-                async with async_timeout.timeout(self._timeout):
-                    msg = await reader.read()
-            except asyncio.CancelledError:
-                self._close_code = WSCloseCode.ABNORMAL_CLOSURE
-                raise
-            except Exception as exc:
-                self._close_code = WSCloseCode.ABNORMAL_CLOSURE
-                self._exception = exc
-                return True
-
-            if msg.type == WSMsgType.CLOSE:
-                self._close_code = msg.data
-                return True
-
-            self._close_code = WSCloseCode.ABNORMAL_CLOSURE
-            self._exception = asyncio.TimeoutError()
-            return True
-        else:
-            return False
-
-    async def receive(self, timeout: Optional[float] = None) -> WSMessage:
-        if self._reader is None:
-            raise RuntimeError("Call .prepare() first")
-
-        loop = self._loop
-        assert loop is not None
-        while True:
-            if self._waiting is not None:
-                raise RuntimeError("Concurrent call to receive() is not allowed")
-
-            if self._closed:
-                self._conn_lost += 1
-                if self._conn_lost >= THRESHOLD_CONNLOST_ACCESS:
-                    raise RuntimeError("WebSocket connection is closed.")
-                return WS_CLOSED_MESSAGE
-            elif self._closing:
-                return WS_CLOSING_MESSAGE
-
-            try:
-                self._waiting = loop.create_future()
-                try:
-                    async with async_timeout.timeout(timeout or self._receive_timeout):
-                        msg = await self._reader.read()
-                    self._reset_heartbeat()
-                finally:
-                    waiter = self._waiting
-                    set_result(waiter, True)
-                    self._waiting = None
-            except (asyncio.CancelledError, asyncio.TimeoutError):
-                self._close_code = WSCloseCode.ABNORMAL_CLOSURE
-                raise
-            except EofStream:
-                self._close_code = WSCloseCode.OK
-                await self.close()
-                return WSMessage(WSMsgType.CLOSED, None, None)
-            except WebSocketError as exc:
-                self._close_code = exc.code
-                await self.close(code=exc.code)
-                return WSMessage(WSMsgType.ERROR, exc, None)
-            except Exception as exc:
-                self._exception = exc
-                self._closing = True
-                self._close_code = WSCloseCode.ABNORMAL_CLOSURE
-                await self.close()
-                return WSMessage(WSMsgType.ERROR, exc, None)
-
-            if msg.type == WSMsgType.CLOSE:
-                self._closing = True
-                self._close_code = msg.data
-                if not self._closed and self._autoclose:
-                    await self.close()
-            elif msg.type == WSMsgType.CLOSING:
-                self._closing = True
-            elif msg.type == WSMsgType.PING and self._autoping:
-                await self.pong(msg.data)
-                continue
-            elif msg.type == WSMsgType.PONG and self._autoping:
-                continue
-
-            return msg
-
-    async def receive_str(self, *, timeout: Optional[float] = None) -> str:
-        msg = await self.receive(timeout)
-        if msg.type != WSMsgType.TEXT:
-            raise TypeError(
-                "Received message {}:{!r} is not WSMsgType.TEXT".format(
-                    msg.type, msg.data
-                )
-            )
-        return cast(str, msg.data)
-
-    async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes:
-        msg = await self.receive(timeout)
-        if msg.type != WSMsgType.BINARY:
-            raise TypeError(f"Received message {msg.type}:{msg.data!r} is not bytes")
-        return cast(bytes, msg.data)
-
-    async def receive_json(
-        self, *, loads: JSONDecoder = json.loads, timeout: Optional[float] = None
-    ) -> Any:
-        data = await self.receive_str(timeout=timeout)
-        return loads(data)
-
-    async def write(self, data: bytes) -> None:
-        raise RuntimeError("Cannot call .write() for websocket")
-
-    def __aiter__(self) -> "WebSocketResponse":
-        return self
-
-    async def __anext__(self) -> WSMessage:
-        msg = await self.receive()
-        if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED):
-            raise StopAsyncIteration
-        return msg
-
-    def _cancel(self, exc: BaseException) -> None:
-        if self._reader is not None:
-            self._reader.set_exception(exc)
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/us_employment.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/us_employment.py
deleted file mode 100644
index 719eecf4e72ecd98cbb9135bb669b704a776ef10..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/us_employment.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-The U.S. employment crash during the Great Recession
-----------------------------------------------------
-This example is a fully developed bar chart with negative values using the sample dataset of U.S. employment changes during the Great Recession.
-"""
-# category: case studies
-import altair as alt
-import pandas as pd
-from vega_datasets import data
-
-source = data.us_employment()
-presidents = pd.DataFrame([
-    {
-        "start": "2006-01-01",
-        "end": "2009-01-19",
-        "president": "Bush"
-    },
-    {
-        "start": "2009-01-20",
-        "end": "2015-12-31",
-        "president": "Obama"
-    }
-])
-
-bars = alt.Chart(
-    source,
-    title="The U.S. employment crash during the Great Recession"
-).mark_bar().encode(
-    x=alt.X("month:T", title=""),
-    y=alt.Y("nonfarm_change:Q", title="Change in non-farm employment (in thousands)"),
-    color=alt.condition(
-        alt.datum.nonfarm_change > 0,
-        alt.value("steelblue"),
-        alt.value("orange")
-    )
-)
-
-rule = alt.Chart(presidents).mark_rule(
-    color="black",
-    strokeWidth=2
-).encode(
-    x='end:T'
-).transform_filter(alt.datum.president == "Bush")
-
-text = alt.Chart(presidents).mark_text(
-    align='left',
-    baseline='middle',
-    dx=7,
-    dy=-135,
-    size=11
-).encode(
-    x='start:T',
-    x2='end:T',
-    text='president',
-    color=alt.value('#000000')
-)
-
-(bars + rule + text).properties(width=600)
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/utils/tests/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/utils/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/bucket_pad_length_dataset.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/bucket_pad_length_dataset.py
deleted file mode 100644
index 0f9410014845873bb0344fca6478c231c88e9dea..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/bucket_pad_length_dataset.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import numpy as np
-import torch.nn.functional as F
-from fairseq.data import BaseWrapperDataset
-from fairseq.data.data_utils import get_buckets, get_bucketed_sizes
-
-
-class BucketPadLengthDataset(BaseWrapperDataset):
-    """
-    Bucket and pad item lengths to the nearest bucket size. This can be used to
-    reduce the number of unique batch shapes, which is important on TPUs since
-    each new batch shape requires a recompilation.
-
-    Args:
-        dataset (FairseqDatset): dataset to bucket
-        sizes (List[int]): all item sizes
-        num_buckets (int): number of buckets to create
-        pad_idx (int): padding symbol
-        left_pad (bool): if True, pad on the left; otherwise right pad
-    """
-
-    def __init__(
-        self,
-        dataset,
-        sizes,
-        num_buckets,
-        pad_idx,
-        left_pad,
-        tensor_key=None,
-    ):
-        super().__init__(dataset)
-        self.pad_idx = pad_idx
-        self.left_pad = left_pad
-
-        assert num_buckets > 0
-        self.buckets = get_buckets(sizes, num_buckets)
-        self._bucketed_sizes = get_bucketed_sizes(sizes, self.buckets)
-        self._tensor_key = tensor_key
-
-    def _set_tensor(self, item, val):
-        if self._tensor_key is None:
-            return val
-        item[self._tensor_key] = val
-        return item
-
-    def _get_tensor(self, item):
-        if self._tensor_key is None:
-            return item
-        return item[self._tensor_key]
-
-    def _pad(self, tensor, bucket_size, dim=-1):
-        num_pad = bucket_size - tensor.size(dim)
-        return F.pad(
-            tensor,
-            (num_pad if self.left_pad else 0, 0 if self.left_pad else num_pad),
-            value=self.pad_idx,
-        )
-
-    def __getitem__(self, index):
-        item = self.dataset[index]
-        bucket_size = self._bucketed_sizes[index]
-        tensor = self._get_tensor(item)
-        padded = self._pad(tensor, bucket_size)
-        return self._set_tensor(item, padded)
-
-    @property
-    def sizes(self):
-        return self._bucketed_sizes
-
-    def num_tokens(self, index):
-        return self._bucketed_sizes[index]
-
-    def size(self, index):
-        return self._bucketed_sizes[index]
diff --git a/spaces/ashpepel/ashpepel/README.md b/spaces/ashpepel/ashpepel/README.md
deleted file mode 100644
index 475acd0951f3b26fdf6550adaf3dd992319ae9e6..0000000000000000000000000000000000000000
--- a/spaces/ashpepel/ashpepel/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Ashpepel
-emoji: 🚀
-colorFrom: blue
-colorTo: red
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/augmented-surveys/retrodict/app.py b/spaces/augmented-surveys/retrodict/app.py
deleted file mode 100644
index a477c01ca6460bee944dd56492bc7291a64db2e5..0000000000000000000000000000000000000000
--- a/spaces/augmented-surveys/retrodict/app.py
+++ /dev/null
@@ -1,293 +0,0 @@
-import re
-import streamlit as st
-import numpy as np
-import pandas as pd
-import matplotlib
-import matplotlib.pyplot as plt
-from matplotlib.ticker import MultipleLocator
-from pygam import GAM
-from rapidfuzz import fuzz, utils
-
-
-@st.cache_data
-def convert_df(df):
-    '''dataframe to csv'''
-    return df.to_csv(index=False).encode('utf-8')
-
-def highlight(s, query):
-    '''find the query and highlight it'''
-    highlighted_query = '<span style="background-color: #FFFF00">{}</span>'
-    result = re.sub(f'(?i){query}', lambda m: highlighted_query.format(m.group(0)), s)
-    return result
-
-def score_similarity(df, column, query):
-    '''similarity between query and each value in the column'''
-    df = df[df[column].str.lower().str.contains(query)]
-    df['wratio'] = df[column].apply(lambda x: fuzz.WRatio(query, x))
-    df = df.sort_values('wratio', ascending=False)
-    df[column] = df[column].apply(lambda x: highlight(str(x), query))
-    return df
-
-def search_data(query, search_criteria, search_filter):
-    '''search query in the data and return the relevant rows. 
-       search_criteria: column to find query, search_filter: subject'''
-    if len(search_filter) == 0:
-        data_sub = data
-    else:
-        selected_subjects = count_meta.loc[count_meta.option.isin(search_filter), 'subject']
-        selected_vars = subject.loc[subject.subject.isin(selected_subjects), 'var_name']
-        data_sub = data.loc[data.var_name.isin(selected_vars)]
-    if len(query) == 0:
-        return data_sub
-    query = query.lower()
-    r = [score_similarity(data_sub, col, query) for col in ['var_name', 'var_description', 'question', 'subject']]
-    results = {
-        'All': pd.concat(r, axis=0).drop_duplicates('original_var_name').reset_index(drop=True),
-        'Variable Names': r[0],
-        'Variable Descriptions': r[1],
-        'Survey Questions': r[2],
-        'GSS Tags': r[3]
-    }
-    return results.get(search_criteria)
-
-def get_table_for_figure(var):
-    '''Create the table that will be used to draw the chart'''
-    tab_pred = dt_summary1.loc[(dt_summary1.variable == var) & (dt_summary1.pred_type == 'rescale')]
-    tab_obs = dt_summary1.loc[(dt_summary1.variable == var) & (dt_summary1.pred_type == 'obsbin')]
-    tab_obs = tab_obs.merge(tab_pred[['year']], how='right').rename({'mean': 'obs_mean', 'lci': 'obs_lci', 'uci': 'obs_uci'}, axis=1)
-    tab_pred = pd.merge(tab_pred, tab_obs[['year','obs_mean','obs_lci','obs_uci']], on = 'year')
-    tab_pred['lci'] = tab_pred['mean'] - 0.03
-    tab_pred['uci'] = tab_pred['mean'] + 0.03
-    tab_pred.loc[(tab_pred.lci <= tab_pred.obs_mean) & (tab_pred.obs_mean <= tab_pred.uci) , 'overlap'] = 0
-    tab_pred.loc[pd.notnull(tab_pred.obs_mean) & ~((tab_pred.lci <= tab_pred.obs_mean) & (tab_pred.obs_mean <= tab_pred.uci)) , 'overlap'] = 1
-    tab_pred.loc[pd.isnull(tab_pred.obs_mean), 'overlap'] = 2
-    tab_pred[['mean', 'lci', 'uci', 'obs_mean', 'obs_lci', 'obs_uci']] *= 100
-    if pd.notnull(tab_pred['obs_mean']).sum() == 1:
-        tab_pred.loc[pd.notnull(tab_pred.obs_mean), ['mean', 'lci', 'uci']] = None
-    return tab_pred
-
-def get_figure(var, var_desc):
-    df = get_table_for_figure(var)
-    range_y = (min(df['lci'].min(), df.obs_lci.min()) - 15, 
-               max(df['uci'].max(), df.obs_uci.max()) + 15)
-    df['mean_gam'] = df['mean']
-    df.loc[pd.isnull(df['mean']), 'mean_gam'] = df.loc[pd.isnull(df['mean']), 'obs_mean']
-    gam = GAM().gridsearch(np.array(df[['year']]), np.array(df[['mean_gam']])) # GAM to draw the trend line
-    XX = gam.generate_X_grid(term=0)
-    
-    plt.figure(figsize=(10, 6))
-    fig, ax = plt.subplots()
-    
-    marker_dict = {0: 'o', 1: 'o', 2: (8, 2, 0)}
-    color_dict = {0: 'black', 1: 'black', 2: 'b'}
-    fill_color_dict = {0: 'black', 1: 'none', 2: 'b'}
-    label_dict = {0: 'correct prediction', 1: 'incorrect prediction', 2: 'novel prediction'}
-    size_dict = {0: 5.5, 1: 5.8, 2: 8}
-
-    plt.plot(XX[:, 0].flatten(), gam.predict(XX), 'black') # GAM plot
-    plt.fill_between(XX[:, 0].flatten(), gam.confidence_intervals(XX, width=.95)[:, 0], gam.confidence_intervals(XX, width=.95)[:, 1], color='lightgray')
-    
-    for i in range(3): # Plot predictions
-        plt.plot(df.loc[df.overlap == i, 'year'], df.loc[df.overlap == i, 'mean'], label=label_dict[i], marker=marker_dict[i], 
-                 c=color_dict[i], markerfacecolor=fill_color_dict[i], ms=size_dict[i], mew=1, linestyle = 'None')
-        plt.errorbar(df.loc[df.overlap == i, 'year'], df.loc[df.overlap == i, 'mean'], 
-                     yerr=[df.loc[df.overlap == i, 'mean']-df.loc[df.overlap == i, 'lci'], df.loc[df.overlap == i, 'uci']-df.loc[df.overlap == i, 'mean']], 
-                    fmt="none", color=color_dict[i], ecolor=color_dict[i], elinewidth=1, capsize=0)
-        
-    plt.plot(df['year'], df['obs_mean'], marker="+", c='r', ms=7, mew=1.1, linestyle = 'None', label='observed') # Plot observations
-    plt.errorbar(df['year'], df['obs_mean'], yerr=[df['obs_mean']-df['obs_lci'], df['obs_uci']-df['obs_mean']], 
-                fmt='none', color='r', ecolor='r', elinewidth=1, capsize=0)
-
-    # Plot styles
-    plt.xlabel('Year')
-    plt.ylabel('Positive Response (%)')
-    plt.legend(ncol=4, loc="upper center", bbox_to_anchor=(0.5, 1.09), 
-               frameon=False, shadow=False, prop = { "size": 10 }, columnspacing=0.1, handletextpad=0.01)
-    plt.title(r"$\bf{" + f'({var})' + "}$" + f' {var_desc}', loc='left', fontdict={'fontsize': 10}, wrap=True, y=1.06)
-    plt.grid(which='major', color='lightgray', linestyle='-', alpha=0.2)
-    plt.grid(which='minor', color='lightgray', linestyle='-', alpha=0.4)
-    plt.minorticks_on()
-    # plt.yticks(np.arange(20, 108, 20))
-    ax.yaxis.set_minor_locator(MultipleLocator(10))
-    ax.xaxis.set_minor_locator(MultipleLocator(5))
-    plt.ylim(range_y)
-    plt.xlim((1970, 2023))
-    fn = f'{var}.png'
-    
-    plt.savefig(fn)
-    return fig, df, fn
-
-def assign_current_var(x):
-    st.session_state['current_var'] = x
-
-def view_help():
-    st.session_state['current_var'] = None
-    st.session_state['search_query'] = None
-    st.session_state['current_page'] = 1
-
-def main():
-    '''Main function to run the Streamlit app'''
-    st.set_page_config(page_title="AI-Augmented Surveys: Retrodiction Demo", layout="wide")
-    # CSS to inject contained in a string
-    hide_table_row_index = """
-                <style>
-                thead tr th:first-child {display:none}
-                tbody th {display:none}
-                </style>
-                """
-    # Inject CSS with Markdown to remove index from dataframes
-    st.markdown(hide_table_row_index, unsafe_allow_html=True)
-
-    with st.sidebar:
-        st.title("AI-Augmented Surveys: Leveraging Large Language Models for Opinion Prediction in Nationally Representative Surveys")
-        st.markdown("*Retrodiction Demo*<br />Junsol Kim, Byungkyu Lee<br />Paper: https://arxiv.org/abs/2305.09620", unsafe_allow_html=True)
-        help_click = st.button("Help", on_click=view_help)
-
-    n_var_per_page = 25 # parameters for search results
-    n_cards_per_row = 4
-
-    if st.session_state['current_var'] is None: # When there is no variable that the user clicked in the search results
-        col1, col2, col3 = st.columns([3, 1, 1])
-        with col1:
-            search_query = st.text_input('Search GSS variables')
-        with col2:
-            search_criteria = st.selectbox("Criteria", ['All', 'Variable names', 'Variable descriptions', 'Survey questions', 'GSS tags'])
-        with col3:
-            search_filter = st.multiselect("Filter by GSS tags", count_meta['option'])
-        search_button = st.button("Search")
-        
-        if search_query or search_button: # Perform search when the search button is clicked
-            st.session_state['search_query'] = search_query
-            st.session_state['current_page'] = 1
-
-        if help_click or st.session_state['search_query'] is None:
-            help1, help2, help3, help4 = st.columns([1, 1, 1, 1])
-            with help1:
-                st.write("---")
-                st.subheader("📈")
-                st.markdown("This demo site presents public opinion trends as predicted by our General Social Survey (GSS)-based AI language models, specifically Alpaca-7b fine-tuned for retrodiction tasks. Users can search for variables of interest using keywords (e.g., “gay”) or by filtering using GSS tags. After selecting a variable, click on “View this variable” to explore further.")
-            with help2:
-                st.write("---")
-                st.subheader("🗃")
-                st.markdown("Take advantage of the different tabs available! Here, you’ll find information about the predictive accuracy of our models, and how we binarize response options into 1 or 0. You can download data in CSV or image format as well.")
-            with help3:
-                st.write("---")
-                st.subheader("🧐")
-                st.markdown("Please note that not all GSS variables are included in our system. We only feature those with binarized options, excluding those with numerous categories and many others (see our paper for more details). If you can’t find the variables that interest you, don’t hesitate to send us an email at junsol@uchicago.edu. We’d be glad to investigate why those variables aren’t visible.")
-            with help4:
-                st.write("---")
-                st.subheader("⚙️")
-                st.markdown("Currently, we’re refining our model to incorporate the 2022 GSS survey data and additional GSS variables. Stay tuned for updates!")
-        else:
-            results = search_data(st.session_state['search_query'], search_criteria, search_filter).reset_index(drop=True)
-            if results.empty:
-                st.warning('No variables found.')
-            else:
-                page = st.session_state['current_page'] # print search results
-                st.success(f'Found {len(results)} variable(s).')
-                col21, col22, col23, col24 = st.columns([4, 2, 2, 2])
-                with col23:
-                    n_var_per_page = st.selectbox('Variables per page', [25, 50, 100, 200])
-                maxpage = int(np.ceil(len(results) / n_var_per_page))
-                with col24:
-                    st.session_state['current_page'] = st.selectbox('Move to another page', list(range(1, maxpage+1)))
-                    page = st.session_state['current_page']
-                with col22:
-                    sortby = st.selectbox('Sort by', ['Relevance', 'AUC'])
-                with col21:
-                    st.markdown(f'Page {page} of {maxpage}')
-                button_list = []
-                if sortby == 'AUC':
-                    results = results.loc[pd.notnull(results['var_name'])]
-                    results = results.merge(performance_partial[['var_name', 'auc']], on='var_name', how='left').sort_values('auc', ascending=False).reset_index(drop=True)
-                else:
-                    results = results.loc[pd.notnull(results['var_name'])]
-                for n_row, row in results.loc[(page-1)*n_var_per_page:page*n_var_per_page-1].reset_index().iterrows():
-                    i = n_row % n_cards_per_row
-                    if i == 0:
-                        cols = st.columns(n_cards_per_row, gap="large")
-                    # draw the card
-                    with cols[n_row % n_cards_per_row]:
-                        st.write("---")
-                        st.markdown(f"**{row['var_name'].strip()}**", unsafe_allow_html=True)
-                        st.markdown(f"**Variable description:** *{row['var_description'].strip()}*", unsafe_allow_html=True)
-                        st.markdown(f"**Survey question:** {row['question']}", unsafe_allow_html=True)
-                        st.markdown(f"**GSS tags:** {row['subject']}", unsafe_allow_html=True)
-                        button_list.append(st.button('View this variable', key=row['var_name'], 
-                                                     on_click=assign_current_var, 
-                                                     args=(row['original_var_name'], )))
-                for i in range(len(button_list)):
-                    if button_list[i]:
-                        current_variable = str(results.loc[i, 'var_name'])
-
-
-    else:  # When there is a variable that the user clicked in the search results
-        st.button('Back', on_click=assign_current_var, args=(None, )) # If user click back, return to the search results
-        col31, col32 = st.columns([2, 3])
-        with col31:
-            st.subheader(f"**{st.session_state['current_var']}**")
-            var_desc = data.loc[data['var_name'] == st.session_state['current_var'], 'var_description'].tolist()[0]
-            st.markdown(f"**Variable description:** *{var_desc}*", unsafe_allow_html=True)
-            st.markdown(f"**Survey question:** {data.loc[data['var_name'] == st.session_state['current_var'], 'question'].tolist()[0]}", unsafe_allow_html=True)
-            st.markdown(f"**GSS tags:** {data.loc[data['var_name'] == st.session_state['current_var'], 'subject'].tolist()[0]}", unsafe_allow_html=True)
-        with col32:
-            tab1, tab2, tab3, tab4 = st.tabs(["📈 Retrodiction Chart", "🏆 Accuracy", "✏️ Binarization", "🗃 Retrodiction Data"])
-            var = st.session_state['current_var']
-            fig, df, fn = get_figure(var, var_desc)
-            tab1.pyplot(fig)
-            tab1.markdown("*Note:* The generalized additive model has been used to estimate the trend. We define the correct prediction when the prediction interval within 3% margin of error includes the observed estimate. ")
-            with open(fn, "rb") as img:
-                tab1.download_button(
-                    label="Download image",
-                    data=img,
-                    file_name=fn,
-                    mime="image/png"
-                )
-            tab4.table(df[['year', 'mean', 'obs_mean']])
-            tab4.download_button(
-                label="Download data as CSV",
-                data=convert_df(df[['year', 'mean', 'obs_mean']]),
-                file_name=f'{var}.csv',
-                mime='text/csv',
-            )
-            current_var_perform = performance_partial.loc[performance_partial.var_name == var, ['auc', 'accuracy', 'f1']].reset_index(drop=True)
-            tab2.table(current_var_perform)
-            if len(current_var_perform) == 0:
-                tab2.markdown('*Note:* If the variable has only been measured for a single year, there are no accuracy metrics available for retrodiction tasks. For information regarding the simulated AUC of a variable measured once, please refer to the paper.')
-            tab3.table(binary.loc[binary.variable == var, ['binarized', 'response']].sort_values(['binarized', 'response']).reset_index(drop=True))
-
-
-if __name__ == '__main__':
-    matplotlib.rcParams.update({'font.size': 14})
-    
-    if 'current_var' not in st.session_state:
-        st.session_state['current_var'] = None
-    if 'search_query' not in st.session_state:
-        st.session_state['search_query'] = None
-    if 'current_page' not in st.session_state:
-        st.session_state['current_page'] = 1
-    if 'show_help' not in st.session_state:
-        st.session_state['show_help'] = False
-
-    # load data
-    data = pd.read_parquet('var_meta.parquet')
-    subject = pd.read_parquet('subject_meta.parquet')
-    count_meta = pd.read_parquet('count_meta.parquet')
-    dt_summary1 = pd.read_parquet('dt_summary1_alpaca.parquet')
-    performance_partial = pd.read_parquet('performance_partial.parquet')
-    binary = pd.read_parquet('binary.parquet')
-    binary['binarized'] = binary['binarized'].astype(int)
-    df_prop = pd.read_parquet('df_prop.parquet')
-
-    # exclude variables if retrodiction cases are not available
-    to_be_excluded = dt_summary1.loc[(dt_summary1.pred_type == 'obsbin')].groupby('variable')['pred_type'].count().reset_index()
-    to_be_excluded = to_be_excluded.loc[to_be_excluded.pred_type == 1, 'variable']
-    performance_partial = performance_partial.loc[~performance_partial.var_name.isin(to_be_excluded)]
-    to_be_excluded = df_prop.loc[(df_prop.prop >= 0.5) & (df_prop.sb == False), 'variable'].tolist()
-    to_be_excluded += ['partyid1', 'partyid2', 'partyid3']
-    data = data.loc[~data.var_name.isin(to_be_excluded)]
-    dt_count = subject.loc[subject.var_name.isin(data.var_name)].subject.value_counts().reset_index()
-    dt_count['option'] = dt_count.subject + ' (' + dt_count['count'].astype(str) + ')'
-    count_meta = dt_count.sort_values('subject')
-
-    main()
diff --git a/spaces/auto-academic/auto-draft/README-en.md b/spaces/auto-academic/auto-draft/README-en.md
deleted file mode 100644
index a80c6063c0896b0f9430524caedfa36cb60c99ba..0000000000000000000000000000000000000000
--- a/spaces/auto-academic/auto-draft/README-en.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# Auto-Draft: Literatures Summarization Assistant
-
-This project aims to automatically summarize a field of research. 
-
-***Features***: 
-* Automatically search related papers. 
-* Every claim has an appropriate citation.
-* Output LaTeX template.   
-
-This script requires GPT-4 access. One round of generation will take around 10 minutes and 15,000 tokens (0.5 to 0.8 US dollars).  
-
-# Demo
-
-The following link provides a free demo of basic functions. 
-If you need more customized features, please refer to *Usage* for local deployment and modification. 
-
-https://huggingface.co/spaces/auto-academic/auto-draft
-
-# Usage
-1. Clone this repo:
-```angular2html
-git clone https://github.com/CCCBora/auto-draft
-```
-2. Install dependencies:
-```angular2html
-pip install -r requirements.txt
-```
-3. Set the `OPENAI_API_KEY` in the environment variables.
-4. Edit `auto_backgrounds.py` to customize the topic you want to explore, and then run
-```angular2html
-python auto_backgrounds.py
-```
-
-# Example Outputs
-The `outputs` folder contains some original outputs for given inputs. 
-They can be directly compiled using Overleaf. 
-
-Page 1            |  Page 2
-:-------------------------:|:-------------------------:
-![](assets/page1.png "Page-1") |  ![](assets/page2.png "Page-2") 
-
-
-
diff --git a/spaces/avivdm1/AutoGPT/autogpt/agent/__init__.py b/spaces/avivdm1/AutoGPT/autogpt/agent/__init__.py
deleted file mode 100644
index e928af2205b1c52d19dc89ec4246e8c1d2c20e3f..0000000000000000000000000000000000000000
--- a/spaces/avivdm1/AutoGPT/autogpt/agent/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from autogpt.agent.agent import Agent
-from autogpt.agent.agent_manager import AgentManager
-
-__all__ = ["Agent", "AgentManager"]
diff --git a/spaces/awacke1/AnimationUsingLottie/README.md b/spaces/awacke1/AnimationUsingLottie/README.md
deleted file mode 100644
index eb6dd041685d8a3626102b2e103ea33a36ff5f0c..0000000000000000000000000000000000000000
--- a/spaces/awacke1/AnimationUsingLottie/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: AnimationUsingLottie
-emoji: 😻
-colorFrom: pink
-colorTo: red
-sdk: streamlit
-sdk_version: 1.10.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/awacke1/File-Memory-Human-Feedback-Streamlit/app.py b/spaces/awacke1/File-Memory-Human-Feedback-Streamlit/app.py
deleted file mode 100644
index d02c7c0964e5ff598a8738a3fb2e62ffdf0c6c92..0000000000000000000000000000000000000000
--- a/spaces/awacke1/File-Memory-Human-Feedback-Streamlit/app.py
+++ /dev/null
@@ -1,100 +0,0 @@
-import os
-import streamlit as st
-
-def list_files():
-    icon_csv = "📄 "
-    icon_txt = "📑 "
-
-    current_directory = os.getcwd()
-    file_list = []
-    for filename in os.listdir(current_directory):
-        if filename.endswith(".csv"):
-            file_list.append(icon_csv + filename)
-        elif filename.endswith(".txt"):
-            file_list.append(icon_txt + filename)
-    return file_list
-
-def read_file(file_path):
-    try:
-        with open(file_path, "r") as file:
-            contents = file.read()
-            return f"{contents}"
-    except FileNotFoundError:
-        return "File not found."
-
-def delete_file(file_path):
-    try:
-        os.remove(file_path)
-        return f"{file_path} has been deleted."
-    except FileNotFoundError:
-        return "File not found."
-
-def write_file(file_path, content):
-    try:
-        with open(file_path, "w") as file:
-            file.write(content)
-        return f"Successfully written to {file_path}."
-    except:
-        return "Error occurred while writing to file."
-
-def append_file(file_path, content):
-    try:
-        with open(file_path, "a") as file:
-            file.write(content)
-        return f"Successfully appended to {file_path}."
-    except:
-        return "Error occurred while appending to file."
-
-st.set_page_config(layout='wide')
-st.title("AI Feedback Memory System for Smart Communities")
-
-# Sidebar
-with st.sidebar:
-    st.subheader("Download Files")
-    file_list = list_files()
-    if file_list:
-        for file in file_list:
-            if st.button(file):
-                file_path = os.path.join(os.getcwd(), file[2:])
-                st.markdown(f"### {file}")
-                st.markdown(f"[Download]({file_path})")
-    else:
-        st.markdown("No .csv or .txt files found in the current directory.")
-
-# Main content
-fileName = st.text_input("Filename")
-fileContent = st.text_area("File Content")
-completedMessage_placeholder = st.empty()
-
-col1, col2, col3, col4, col5 = st.columns(5)
-listFiles = col1.button("📄 List File(s)")
-readFile = col2.button("📖 Read File")
-saveFile = col3.button("💾 Save File")
-deleteFile = col4.button("🗑️ Delete File")
-appendFile = col5.button("➕ Append File")
-
-if listFiles:
-    fileContent = "\n".join(list_files())
-    st.text_area("File Content", fileContent)
-elif readFile:
-    fileContent = read_file(fileName)
-    st.text_area("File Content", fileContent)
-elif saveFile:
-    completedMessage = write_file(fileName, fileContent)
-    completedMessage_placeholder.text(completedMessage)
-elif deleteFile:
-    completedMessage = delete_file(fileName)
-    completedMessage_placeholder.text(completedMessage)
-elif appendFile:
-    completedMessage = append_file(fileName, fileContent)
-    completedMessage_placeholder.text(completedMessage)
-
-st.markdown("""    
-🏡🧠🚀
-🤖💭📈
-📝🤣🌞
-💯👨‍💼💬
-👋😄🌇
-📱💻🔜
-The new 🤖 AI Feedback Memory System for Smart Communities 🏡🧠🚀 
-""")
diff --git a/spaces/awacke1/GPU-Memory-Detector/README.md b/spaces/awacke1/GPU-Memory-Detector/README.md
deleted file mode 100644
index 1cfdb7e63f206b9f804ce6aab164ffd008f3ff75..0000000000000000000000000000000000000000
--- a/spaces/awacke1/GPU-Memory-Detector/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: GPU Memory Detector
-emoji: 🏃
-colorFrom: gray
-colorTo: red
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/awacke1/Gradio-Blocks-Demo/app.py b/spaces/awacke1/Gradio-Blocks-Demo/app.py
deleted file mode 100644
index 1ed2f316fa4b8d79e41c70387895d8cdbf064463..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Gradio-Blocks-Demo/app.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import gradio as gr
-from transformers import pipeline
-title = "📗❤️-Story Generator❤️📗- 🦄Myths and Legends🦸"
-examples = [
-    ["Cernunnos the Gaelic god of beasts and wild places"],
-    ["Often called the Horned One, Cernunnos was a mediator of man and nature"],
-    ["able to tame predator and prey so they might lie down together"],
-    ["He remains a mysterious deity, as his original mythos has been lost to history"],
-    ["It was believed that ringing a bell on Samhain kept away evil spirits"],
-    ["Burying animal bones in front of your house on the night of Samhain will"],
-    ["keep evil away, according to some legends of eastern Europe"],
-    ["Samhain is a good time of year to work on communicating with the spirit world"],
-    ["In some Pacific Northwest tribes, elk are also considered to be"],
-    ["particular protectors of women, and in some legends elk lead women who had been "],
-    ["captured by enemy warriors back to their homes"],
-    ["In Plains Indian tribes, elk were associated with masculinity, endurance, and bravery, and elks eyeteeth were highly valued both as objects of adornment and as the symbol of a mans hunting prowess."],
-    ["In some Plains tribes, men saved the eyeteeth from their first elk kill to make into engagement jewelry for their sweetheart. In others, the number of elk teeth sewn onto a womans dress showed off the wealth and skill of her husband or father."],
-    ["Ah Puch is one of the names associated with a god of death in the ancient Mayan religion. He was known as a god of death, darkness, and disaster. But he was also a god of childbirth and beginnings. The Quiche Maya believed that he ruled over Metnal, the underworld and the Yucatec Maya believed that he was just one of the lords of Xibaba, that translates to place of fear in the underworld."],
-    ["Nuwa was the one who patched the holes in Heaven with five colored stones, and she used the legs of a tortoise to mend the pillars. There are many instances of her in literature across China which detail her in creation stories, and today remains a figure important to Chinese culture."]
-]
-from gradio import inputs
-from gradio.inputs import Textbox
-from gradio import outputs
-
-generator2 = gr.Interface.load("huggingface/EleutherAI/gpt-neo-2.7B")
-generator3 = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B")
-generator1 = gr.Interface.load("huggingface/gpt2-large")
-
-#gr.Parallel(generator1, generator2, generator3, inputs=gr.inputs.Textbox(lines=6, label="Enter a sentence to get another sentence."),title=title, examples=examples).launch()
-
-def complete_with_gpt(text):
-    # Use the last 50 characters of the text as context
-    return text[:-50] + generator1(text[-50:])
-
-with gr.Blocks() as demo:
-    textbox = gr.Textbox(placeholder="Type here and press enter...", lines=4)
-    btn = gr.Button("Generate")
-
-    btn.click(complete_with_gpt, textbox, textbox)
-
-demo.launch()
diff --git a/spaces/awacke1/MadLibs/README.md b/spaces/awacke1/MadLibs/README.md
deleted file mode 100644
index 68ea6e32900418c65efe8cdf5d0c8ee82d3dd5a4..0000000000000000000000000000000000000000
--- a/spaces/awacke1/MadLibs/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: 🏢 MadLibs Streamlit
-emoji: 🏢
-colorFrom: pink
-colorTo: yellow
-sdk: streamlit
-sdk_version: 1.15.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/awacke1/MultiPDF-QA-ChatGPT-Langchain/htmlTemplates.py b/spaces/awacke1/MultiPDF-QA-ChatGPT-Langchain/htmlTemplates.py
deleted file mode 100644
index 3d55ded9c07afe6b571a99f57d50883c63cc37b3..0000000000000000000000000000000000000000
--- a/spaces/awacke1/MultiPDF-QA-ChatGPT-Langchain/htmlTemplates.py
+++ /dev/null
@@ -1,44 +0,0 @@
-css = '''
-<style>
-.chat-message {
-    padding: 1.5rem; border-radius: 0.5rem; margin-bottom: 1rem; display: flex
-}
-.chat-message.user {
-    background-color: #2b313e
-}
-.chat-message.bot {
-    background-color: #475063
-}
-.chat-message .avatar {
-  width: 20%;
-}
-.chat-message .avatar img {
-  max-width: 78px;
-  max-height: 78px;
-  border-radius: 50%;
-  object-fit: cover;
-}
-.chat-message .message {
-  width: 80%;
-  padding: 0 1.5rem;
-  color: #fff;
-}
-'''
-
-bot_template = '''
-<div class="chat-message bot">
-    <div class="avatar">
-        <img src="https://i.ibb.co/cN0nmSj/Screenshot-2023-05-28-at-02-37-21.png" style="max-height: 78px; max-width: 78px; border-radius: 50%; object-fit: cover;">
-    </div>
-    <div class="message">{{MSG}}</div>
-</div>
-'''
-
-user_template = '''
-<div class="chat-message user">
-    <div class="avatar">
-        <img src="https://i.ibb.co/rdZC7LZ/Photo-logo-1.png">
-    </div>    
-    <div class="message">{{MSG}}</div>
-</div>
-'''
\ No newline at end of file
diff --git a/spaces/awacke1/Wikipedia.Chat.Multiplayer/app.py b/spaces/awacke1/Wikipedia.Chat.Multiplayer/app.py
deleted file mode 100644
index 7cf61d9575bf30b009a28d965558d86c62790245..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Wikipedia.Chat.Multiplayer/app.py
+++ /dev/null
@@ -1,271 +0,0 @@
-import streamlit as st
-import spacy
-import wikipediaapi
-import wikipedia
-from wikipedia.exceptions import DisambiguationError
-from transformers import TFAutoModel, AutoTokenizer
-import numpy as np
-import pandas as pd
-import faiss
-import datetime
-import time
-
-
-st.markdown("""
-
-Scene 1: The Enchanted Castle
-
-You arrive at the enchanted castle, surrounded by a forest of thorns. You have heard stories of a beautiful princess asleep within, waiting for someone to awaken her.
-
-Option 1: Try to make your way through the thorns.
-Option 2: Look for another way in.
-
-Sentiment: Feels like harsher trials after passive sleep.
-
----
-
-Scene 2: The Castle's Secrets
-
-If you made it past the thorns, you discover that the castle is full of hidden chambers, each containing a different trial. 
-
-These trials are designed to test your limits, reveal your inner most desires, and help you understand the suffering of humankind.
-
-Option 1: Enter the first chamber.
-Option 2: Continue exploring the castle.
-
-Sentiment: Comedy ending in marriage.
-
----
-
-Scene 3: The Princess's Awakening
-
-After navigating the castle's trials, you finally reach the chamber where the princess lies sleeping. 
-
-You are faced with the decision of how to awaken her, knowing that your actions will determine the nature of your relationship with her.
-
-Option 1: Awaken her with a gentle kiss.
-Option 2: Awaken her through a more assertive act like lifting her up.
-
-Sentiment: Heart forged awakening with different implications depending on context.
-
-""")
-
-try:
-    nlp = spacy.load("en_core_web_sm")
-except:
-    spacy.cli.download("en_core_web_sm")
-    nlp = spacy.load("en_core_web_sm")
-
-wh_words = ['what', 'who', 'how', 'when', 'which']
-
-def get_concepts(text):
-    text = text.lower()
-    doc = nlp(text)
-    concepts = []
-    for chunk in doc.noun_chunks:
-        if chunk.text not in wh_words:
-            concepts.append(chunk.text)
-    return concepts
-
-def get_passages(text, k=100):
-    doc = nlp(text)
-    passages = []
-    passage_len = 0
-    passage = ""
-    sents = list(doc.sents)
-    for i in range(len(sents)):
-        sen = sents[i]
-        passage_len += len(sen)
-        if passage_len >= k:
-            passages.append(passage)
-            passage = sen.text
-            passage_len = len(sen)
-            continue
-        elif i == (len(sents) - 1):
-            passage += " " + sen.text
-            passages.append(passage)
-            passage = ""
-            passage_len = 0
-            continue
-        passage += " " + sen.text
-    return passages
-
-def get_dicts_for_dpr(concepts, n_results=20, k=100):
-    dicts = []
-    for concept in concepts:
-        wikis = wikipedia.search(concept, results=n_results)
-        st.write(f"{concept} No of Wikis: {len(wikis)}")
-        for wiki in wikis:
-            try:
-                html_page = wikipedia.page(title=wiki, auto_suggest=False)
-            except DisambiguationError:
-                continue
-            htmlResults = html_page.content
-            passages = get_passages(htmlResults, k=k)
-            for passage in passages:
-                i_dicts = {}
-                i_dicts['text'] = passage
-                i_dicts['title'] = wiki
-                dicts.append(i_dicts)
-    return dicts
-
-passage_encoder = TFAutoModel.from_pretrained("nlpconnect/dpr-ctx_encoder_bert_uncased_L-2_H-128_A-2")
-query_encoder = TFAutoModel.from_pretrained("nlpconnect/dpr-question_encoder_bert_uncased_L-2_H-128_A-2")
-p_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/dpr-ctx_encoder_bert_uncased_L-2_H-128_A-2")
-q_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/dpr-question_encoder_bert_uncased_L-2_H-128_A-2")
-
-def get_title_text_combined(passage_dicts):
-    res = []
-    for p in passage_dicts:
-        res.append(tuple((p['title'], p['text'])))
-    return res
-
-def extracted_passage_embeddings(processed_passages, max_length=156):
-    passage_inputs = p_tokenizer.batch_encode_plus(
-                    processed_passages,
-                    add_special_tokens=True,
-                    truncation=True,
-                    padding="max_length",
-                    max_length=max_length,
-                    return_token_type_ids=True
-                )
-    passage_embeddings = passage_encoder.predict([np.array(passage_inputs['input_ids']), np.array(passage_inputs['attention_mask']), 
-                                            np.array(passage_inputs['token_type_ids'])], 
-                                            batch_size=64, 
-                                            verbose=1)
-    return passage_embeddings
-
-def extracted_query_embeddings(queries, max_length=64):
-    query_inputs = q_tokenizer.batch_encode_plus(
-        queries,
-        add_special_tokens=True,
-        truncation=True,
-        padding="max_length",
-        max_length=max_length,
-        return_token_type_ids=True
-    )
-    
-    query_embeddings = query_encoder.predict([np.array(query_inputs['input_ids']),
-        np.array(query_inputs['attention_mask']),
-        np.array(query_inputs['token_type_ids'])],
-        batch_size=1,
-        verbose=1)
-    return query_embeddings
-
-def get_pagetext(page):
-    s = str(page).replace("/t","")
-    return s
-
-def get_wiki_summary(search):
-    wiki_wiki = wikipediaapi.Wikipedia('en')
-    page = wiki_wiki.page(search)                                   
-
-
-def get_wiki_summaryDF(search):
-    wiki_wiki = wikipediaapi.Wikipedia('en')
-    page = wiki_wiki.page(search)
-
-    isExist = page.exists()
-    if not isExist:
-        return isExist, "Not found", "Not found", "Not found", "Not found"
-
-    pageurl = page.fullurl
-    pagetitle = page.title
-    pagesummary = page.summary[0:60]
-    pagetext = get_pagetext(page.text)
-
-    backlinks = page.backlinks
-    linklist = ""
-    for link in backlinks.items():
-      pui = link[0]
-      linklist += pui + " ,  "
-      a=1 
-      
-    categories = page.categories
-    categorylist = ""
-    for category in categories.items():
-      pui = category[0]
-      categorylist += pui + " ,  "
-      a=1     
-    
-    links = page.links
-    linklist2 = ""
-    for link in links.items():
-      pui = link[0]
-      linklist2 += pui + " ,  "
-      a=1 
-      
-    sections = page.sections
-    
-    ex_dic = {
-      'Entity' : ["URL","Title","Summary", "Text", "Backlinks", "Links", "Categories"],
-      'Value': [pageurl, pagetitle, pagesummary, pagetext, linklist,linklist2, categorylist ]
-    }
-
-    df = pd.DataFrame(ex_dic)
-    
-    return df
-
-
-def save_message(name, message):
-    now = datetime.datetime.now()
-    timestamp = now.strftime("%Y-%m-%d %H:%M:%S")
-    with open("chat.txt", "a") as f:
-        f.write(f"{timestamp} - {name}: {message}\n")
-
-def press_release():
-    st.markdown("""🎉🎊 Breaking News! 📢📣
-Introducing StreamlitWikipediaChat - the ultimate way to chat with Wikipedia and the whole world at the same time! 🌎📚👋
-Are you tired of reading boring articles on Wikipedia? Do you want to have some fun while learning new things? Then StreamlitWikipediaChat is just the thing for you! 😃💻
-With StreamlitWikipediaChat, you can ask Wikipedia anything you want and get instant responses! Whether you want to know the capital of Madagascar or how to make a delicious chocolate cake, Wikipedia has got you covered. 🍰🌍
-But that's not all! You can also chat with other people from around the world who are using StreamlitWikipediaChat at the same time. It's like a virtual classroom where you can learn from and teach others. 🌐👨‍🏫👩‍🏫
-And the best part? StreamlitWikipediaChat is super easy to use! All you have to do is type in your question and hit send. That's it! 🤯🙌
-So, what are you waiting for? Join the fun and start chatting with Wikipedia and the world today! 😎🎉
-StreamlitWikipediaChat - where learning meets fun! 🤓🎈""")
-
-
-def main():
-    st.title("Streamlit Chat")
-
-    name = st.text_input("Enter your name")
-    message = st.text_input("Enter a topic to share from Wikipedia")
-    if st.button("Submit"):
-        
-        # wiki
-        df = get_wiki_summaryDF(message)
-        
-        save_message(name, message)
-        save_message(name, df)
-        
-        st.text("Message sent!")
-
-    
-    st.text("Chat history:")
-    with open("chat.txt", "a+") as f:
-        f.seek(0)
-        chat_history = f.read()
-    #st.text(chat_history)
-    st.markdown(chat_history)
-
-    countdown = st.empty()
-    t = 60
-    while t:
-        mins, secs = divmod(t, 60)
-        countdown.text(f"Time remaining: {mins:02d}:{secs:02d}")
-        time.sleep(1)
-        t -= 1
-        if t == 0:
-            countdown.text("Time's up!")
-            with open("chat.txt", "a+") as f:
-                f.seek(0)
-                chat_history = f.read()
-            #st.text(chat_history)
-            st.markdown(chat_history)
-
-            press_release()
-            
-            t = 60
-
-if __name__ == "__main__":
-    main()
-
diff --git a/spaces/ayaanzaveri/mnist/README.md b/spaces/ayaanzaveri/mnist/README.md
deleted file mode 100644
index d09ffcda891db9a71681506564820e26192001e2..0000000000000000000000000000000000000000
--- a/spaces/ayaanzaveri/mnist/README.md
+++ /dev/null
@@ -1,45 +0,0 @@
----
-title: Mnist
-emoji: 👁
-colorFrom: green
-colorTo: yellow
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio`, `streamlit`, or `static`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.  
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
-Path is relative to the root of the repository.
-
-`models`: _List[string]_
-HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space.
-Will be parsed automatically from your code if not specified here.
-
-`datasets`: _List[string]_
-HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space.
-Will be parsed automatically from your code if not specified here.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/azusarang/so-vits-svc-models-ba_P/diffusion/onnx_export.py b/spaces/azusarang/so-vits-svc-models-ba_P/diffusion/onnx_export.py
deleted file mode 100644
index 5deda785cf22b341f7d2e6399ef5fcdad6fe129e..0000000000000000000000000000000000000000
--- a/spaces/azusarang/so-vits-svc-models-ba_P/diffusion/onnx_export.py
+++ /dev/null
@@ -1,226 +0,0 @@
-from diffusion_onnx import GaussianDiffusion
-import os
-import yaml
-import torch
-import torch.nn as nn
-import numpy as np
-from wavenet import WaveNet
-import torch.nn.functional as F
-import diffusion
-
-class DotDict(dict):
-    def __getattr__(*args):         
-        val = dict.get(*args)         
-        return DotDict(val) if type(val) is dict else val   
-
-    __setattr__ = dict.__setitem__    
-    __delattr__ = dict.__delitem__
-
-    
-def load_model_vocoder(
-        model_path,
-        device='cpu'):
-    config_file = os.path.join(os.path.split(model_path)[0], 'config.yaml')
-    with open(config_file, "r") as config:
-        args = yaml.safe_load(config)
-    args = DotDict(args)
-    
-    # load model
-    model = Unit2Mel(
-                args.data.encoder_out_channels, 
-                args.model.n_spk,
-                args.model.use_pitch_aug,
-                128,
-                args.model.n_layers,
-                args.model.n_chans,
-                args.model.n_hidden)
-    
-    print(' [Loading] ' + model_path)
-    ckpt = torch.load(model_path, map_location=torch.device(device))
-    model.to(device)
-    model.load_state_dict(ckpt['model'])
-    model.eval()
-    return model, args
-
-
-class Unit2Mel(nn.Module):
-    def __init__(
-            self,
-            input_channel,
-            n_spk,
-            use_pitch_aug=False,
-            out_dims=128,
-            n_layers=20, 
-            n_chans=384, 
-            n_hidden=256):
-        super().__init__()
-        self.unit_embed = nn.Linear(input_channel, n_hidden)
-        self.f0_embed = nn.Linear(1, n_hidden)
-        self.volume_embed = nn.Linear(1, n_hidden)
-        if use_pitch_aug:
-            self.aug_shift_embed = nn.Linear(1, n_hidden, bias=False)
-        else:
-            self.aug_shift_embed = None
-        self.n_spk = n_spk
-        if n_spk is not None and n_spk > 1:
-            self.spk_embed = nn.Embedding(n_spk, n_hidden)
-            
-        # diffusion
-        self.decoder = GaussianDiffusion(out_dims, n_layers, n_chans, n_hidden)
-        self.hidden_size = n_hidden
-        self.speaker_map = torch.zeros((self.n_spk,1,1,n_hidden))
-    
-        
-
-    def forward(self, units, mel2ph, f0, volume, g = None):
-        
-        '''
-        input: 
-            B x n_frames x n_unit
-        return: 
-            dict of B x n_frames x feat
-        '''
-
-        decoder_inp = F.pad(units, [0, 0, 1, 0])
-        mel2ph_ = mel2ph.unsqueeze(2).repeat([1, 1, units.shape[-1]])
-        units = torch.gather(decoder_inp, 1, mel2ph_)  # [B, T, H]
-
-        x = self.unit_embed(units) + self.f0_embed((1 + f0.unsqueeze(-1) / 700).log()) + self.volume_embed(volume.unsqueeze(-1))
-
-        if self.n_spk is not None and self.n_spk > 1:   # [N, S]  *  [S, B, 1, H]
-            g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1))  # [N, S, B, 1, 1]
-            g = g * self.speaker_map  # [N, S, B, 1, H]
-            g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
-            g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
-            x = x.transpose(1, 2) + g
-            return x
-        else:
-            return x.transpose(1, 2)
-        
-
-    def init_spkembed(self, units, f0, volume, spk_id = None, spk_mix_dict = None, aug_shift = None,
-                gt_spec=None, infer=True, infer_speedup=10, method='dpm-solver', k_step=300, use_tqdm=True):
-        
-        '''
-        input: 
-            B x n_frames x n_unit
-        return: 
-            dict of B x n_frames x feat
-        '''
-        x = self.unit_embed(units) + self.f0_embed((1+ f0 / 700).log()) + self.volume_embed(volume)
-        if self.n_spk is not None and self.n_spk > 1:
-            if spk_mix_dict is not None:
-                spk_embed_mix = torch.zeros((1,1,self.hidden_size))
-                for k, v in spk_mix_dict.items():
-                    spk_id_torch = torch.LongTensor(np.array([[k]])).to(units.device)
-                    spk_embeddd = self.spk_embed(spk_id_torch)
-                    self.speaker_map[k] = spk_embeddd
-                    spk_embed_mix = spk_embed_mix + v * spk_embeddd
-                x = x + spk_embed_mix
-            else:
-                x = x + self.spk_embed(spk_id - 1)
-        self.speaker_map = self.speaker_map.unsqueeze(0)
-        self.speaker_map = self.speaker_map.detach()
-        return x.transpose(1, 2)
-
-    def OnnxExport(self, project_name=None, init_noise=None, export_encoder=True, export_denoise=True, export_pred=True, export_after=True):
-        hubert_hidden_size = 768
-        n_frames = 100
-        hubert = torch.randn((1, n_frames, hubert_hidden_size))
-        mel2ph = torch.arange(end=n_frames).unsqueeze(0).long()
-        f0 = torch.randn((1, n_frames))
-        volume = torch.randn((1, n_frames))
-        spk_mix = []
-        spks = {}
-        if self.n_spk is not None and self.n_spk > 1:
-            for i in range(self.n_spk):
-                spk_mix.append(1.0/float(self.n_spk))
-                spks.update({i:1.0/float(self.n_spk)})
-        spk_mix = torch.tensor(spk_mix)
-        spk_mix = spk_mix.repeat(n_frames, 1)
-        orgouttt = self.init_spkembed(hubert, f0.unsqueeze(-1), volume.unsqueeze(-1), spk_mix_dict=spks)
-        outtt = self.forward(hubert, mel2ph, f0, volume, spk_mix)
-        if export_encoder:
-            torch.onnx.export(
-                self,
-                (hubert, mel2ph, f0, volume, spk_mix),
-                f"{project_name}_encoder.onnx",
-                input_names=["hubert", "mel2ph", "f0", "volume", "spk_mix"],
-                output_names=["mel_pred"],
-                dynamic_axes={
-                    "hubert": [1],
-                    "f0": [1],
-                    "volume": [1],
-                    "mel2ph": [1],
-                    "spk_mix": [0],
-                },
-                opset_version=16
-            )
-        
-        self.decoder.OnnxExport(project_name, init_noise=init_noise, export_denoise=export_denoise, export_pred=export_pred, export_after=export_after)
-
-    def ExportOnnx(self, project_name=None):
-        hubert_hidden_size = 768
-        n_frames = 100
-        hubert = torch.randn((1, n_frames, hubert_hidden_size))
-        mel2ph = torch.arange(end=n_frames).unsqueeze(0).long()
-        f0 = torch.randn((1, n_frames))
-        volume = torch.randn((1, n_frames))
-        spk_mix = []
-        spks = {}
-        if self.n_spk is not None and self.n_spk > 1:
-            for i in range(self.n_spk):
-                spk_mix.append(1.0/float(self.n_spk))
-                spks.update({i:1.0/float(self.n_spk)})
-        spk_mix = torch.tensor(spk_mix)
-        orgouttt = self.orgforward(hubert, f0.unsqueeze(-1), volume.unsqueeze(-1), spk_mix_dict=spks)
-        outtt = self.forward(hubert, mel2ph, f0, volume, spk_mix)
-
-        torch.onnx.export(
-                self,
-                (hubert, mel2ph, f0, volume, spk_mix),
-                f"{project_name}_encoder.onnx",
-                input_names=["hubert", "mel2ph", "f0", "volume", "spk_mix"],
-                output_names=["mel_pred"],
-                dynamic_axes={
-                    "hubert": [1],
-                    "f0": [1],
-                    "volume": [1],
-                    "mel2ph": [1]
-                },
-                opset_version=16
-            )
-
-        condition = torch.randn(1,self.decoder.n_hidden,n_frames)
-        noise = torch.randn((1, 1, self.decoder.mel_bins, condition.shape[2]), dtype=torch.float32)
-        pndm_speedup = torch.LongTensor([100])
-        K_steps = torch.LongTensor([1000])
-        self.decoder = torch.jit.script(self.decoder)
-        self.decoder(condition, noise, pndm_speedup, K_steps)
-
-        torch.onnx.export(
-                self.decoder,
-                (condition, noise, pndm_speedup, K_steps),
-                f"{project_name}_diffusion.onnx",
-                input_names=["condition", "noise", "pndm_speedup", "K_steps"],
-                output_names=["mel"],
-                dynamic_axes={
-                    "condition": [2],
-                    "noise": [3],
-                },
-                opset_version=16
-            )
-
-
-if __name__ == "__main__":
-    project_name = "dddsp"
-    model_path = f'{project_name}/model_500000.pt'
-
-    model, _ = load_model_vocoder(model_path)
-
-    # 分开Diffusion导出(需要使用MoeSS/MoeVoiceStudio或者自己编写Pndm/Dpm采样)
-    model.OnnxExport(project_name, export_encoder=True, export_denoise=True, export_pred=True, export_after=True)
-
-    # 合并Diffusion导出(Encoder和Diffusion分开,直接将Encoder的结果和初始噪声输入Diffusion即可)
-    # model.ExportOnnx(project_name)
-
diff --git a/spaces/azusarang/so-vits-svc-models-ba_P/modules/mel_processing.py b/spaces/azusarang/so-vits-svc-models-ba_P/modules/mel_processing.py
deleted file mode 100644
index 99c5b35beb83f3b288af0fac5b49ebf2c69f062c..0000000000000000000000000000000000000000
--- a/spaces/azusarang/so-vits-svc-models-ba_P/modules/mel_processing.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import math
-import os
-import random
-import torch
-from torch import nn
-import torch.nn.functional as F
-import torch.utils.data
-import numpy as np
-import librosa
-import librosa.util as librosa_util
-from librosa.util import normalize, pad_center, tiny
-from scipy.signal import get_window
-from scipy.io.wavfile import read
-from librosa.filters import mel as librosa_mel_fn
-
-MAX_WAV_VALUE = 32768.0
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
-    """
-    PARAMS
-    ------
-    C: compression factor
-    """
-    return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
-    """
-    PARAMS
-    ------
-    C: compression factor used to compress
-    """
-    return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
-    output = dynamic_range_compression_torch(magnitudes)
-    return output
-
-
-def spectral_de_normalize_torch(magnitudes):
-    output = dynamic_range_decompression_torch(magnitudes)
-    return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
-    if torch.min(y) < -1.:
-        print('min value is ', torch.min(y))
-    if torch.max(y) > 1.:
-        print('max value is ', torch.max(y))
-
-    global hann_window
-    dtype_device = str(y.dtype) + '_' + str(y.device)
-    wnsize_dtype_device = str(win_size) + '_' + dtype_device
-    if wnsize_dtype_device not in hann_window:
-        hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
-    y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
-    y = y.squeeze(1)
-
-    spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
-                      center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
-    spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
-    return spec
-
-
-def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
-    global mel_basis
-    dtype_device = str(spec.dtype) + '_' + str(spec.device)
-    fmax_dtype_device = str(fmax) + '_' + dtype_device
-    if fmax_dtype_device not in mel_basis:
-        mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
-        mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
-    spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
-    spec = spectral_normalize_torch(spec)
-    return spec
-
-
-def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
-    if torch.min(y) < -1.:
-        print('min value is ', torch.min(y))
-    if torch.max(y) > 1.:
-        print('max value is ', torch.max(y))
-
-    global mel_basis, hann_window
-    dtype_device = str(y.dtype) + '_' + str(y.device)
-    fmax_dtype_device = str(fmax) + '_' + dtype_device
-    wnsize_dtype_device = str(win_size) + '_' + dtype_device
-    if fmax_dtype_device not in mel_basis:
-        mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
-        mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
-    if wnsize_dtype_device not in hann_window:
-        hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
-    y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
-    y = y.squeeze(1)
-
-    spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
-                      center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
-    spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
-
-    spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
-    spec = spectral_normalize_torch(spec)
-
-    return spec
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/ToneMapShader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/ToneMapShader.js
deleted file mode 100644
index c6db65a3c2d7dd2292866985ab164cf79404dbe4..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/ToneMapShader.js
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * @author miibond
- *
- * Full-screen tone-mapping shader based on http://www.cis.rit.edu/people/faculty/ferwerda/publications/sig02_paper.pdf
- */
-
-THREE.ToneMapShader = {
-
-	uniforms: {
-
-		"tDiffuse": { value: null },
-		"averageLuminance":  { value: 1.0 },
-		"luminanceMap":  { value: null },
-		"maxLuminance":  { value: 16.0 },
-		"minLuminance":  { value: 0.01 },
-		"middleGrey":  { value: 0.6 }
-	},
-
-	vertexShader: [
-
-		"varying vec2 vUv;",
-
-		"void main() {",
-
-			"vUv = uv;",
-			"gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );",
-
-		"}"
-
-	].join( "\n" ),
-
-	fragmentShader: [
-
-		"#include <common>",
-
-		"uniform sampler2D tDiffuse;",
-
-		"varying vec2 vUv;",
-
-		"uniform float middleGrey;",
-		"uniform float minLuminance;",
-		"uniform float maxLuminance;",
-		"#ifdef ADAPTED_LUMINANCE",
-			"uniform sampler2D luminanceMap;",
-		"#else",
-			"uniform float averageLuminance;",
-		"#endif",
-
-		"vec3 ToneMap( vec3 vColor ) {",
-			"#ifdef ADAPTED_LUMINANCE",
-				// Get the calculated average luminance
-				"float fLumAvg = texture2D(luminanceMap, vec2(0.5, 0.5)).r;",
-			"#else",
-				"float fLumAvg = averageLuminance;",
-			"#endif",
-
-			// Calculate the luminance of the current pixel
-			"float fLumPixel = linearToRelativeLuminance( vColor );",
-
-			// Apply the modified operator (Eq. 4)
-			"float fLumScaled = (fLumPixel * middleGrey) / max( minLuminance, fLumAvg );",
-
-			"float fLumCompressed = (fLumScaled * (1.0 + (fLumScaled / (maxLuminance * maxLuminance)))) / (1.0 + fLumScaled);",
-			"return fLumCompressed * vColor;",
-		"}",
-
-		"void main() {",
-
-			"vec4 texel = texture2D( tDiffuse, vUv );",
-
-			"gl_FragColor = vec4( ToneMap( texel.xyz ), texel.w );",
-
-		"}"
-
-	].join( "\n" )
-
-};
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/Three.Legacy.js b/spaces/banana-projects/web3d/node_modules/three/src/Three.Legacy.js
deleted file mode 100644
index cd91a3c916129f8ffe28eca9f83820acb5e61b78..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/Three.Legacy.js
+++ /dev/null
@@ -1,1963 +0,0 @@
-/**
- * @author mrdoob / http://mrdoob.com/
- */
-
-import { Audio } from './audio/Audio.js';
-import { AudioAnalyser } from './audio/AudioAnalyser.js';
-import { PerspectiveCamera } from './cameras/PerspectiveCamera.js';
-import { FlatShading } from './constants.js';
-import {
-	Float64BufferAttribute,
-	Float32BufferAttribute,
-	Uint32BufferAttribute,
-	Int32BufferAttribute,
-	Uint16BufferAttribute,
-	Int16BufferAttribute,
-	Uint8ClampedBufferAttribute,
-	Uint8BufferAttribute,
-	Int8BufferAttribute,
-	BufferAttribute
-} from './core/BufferAttribute.js';
-import { BufferGeometry } from './core/BufferGeometry.js';
-import { Face3 } from './core/Face3.js';
-import { Geometry } from './core/Geometry.js';
-import { Object3D } from './core/Object3D.js';
-import { Uniform } from './core/Uniform.js';
-import { Curve } from './extras/core/Curve.js';
-import { CurvePath } from './extras/core/CurvePath.js';
-import { Path } from './extras/core/Path.js';
-import { CatmullRomCurve3 } from './extras/curves/CatmullRomCurve3.js';
-import { AxesHelper } from './helpers/AxesHelper.js';
-import { BoxHelper } from './helpers/BoxHelper.js';
-import { GridHelper } from './helpers/GridHelper.js';
-import { SkeletonHelper } from './helpers/SkeletonHelper.js';
-import { BoxGeometry } from './geometries/BoxGeometry.js';
-import { EdgesGeometry } from './geometries/EdgesGeometry.js';
-import { ExtrudeGeometry } from './geometries/ExtrudeGeometry.js';
-import { ExtrudeBufferGeometry } from './geometries/ExtrudeGeometry.js';
-import { ShapeGeometry } from './geometries/ShapeGeometry.js';
-import { WireframeGeometry } from './geometries/WireframeGeometry.js';
-import { Light } from './lights/Light.js';
-import { Loader } from './loaders/Loader.js';
-import { LoaderUtils } from './loaders/LoaderUtils.js';
-import { FileLoader } from './loaders/FileLoader.js';
-import { AudioLoader } from './loaders/AudioLoader.js';
-import { CubeTextureLoader } from './loaders/CubeTextureLoader.js';
-import { DataTextureLoader } from './loaders/DataTextureLoader.js';
-import { ObjectLoader } from './loaders/ObjectLoader.js';
-import { TextureLoader } from './loaders/TextureLoader.js';
-import { Material } from './materials/Material.js';
-import { LineBasicMaterial } from './materials/LineBasicMaterial.js';
-import { MeshPhongMaterial } from './materials/MeshPhongMaterial.js';
-import { PointsMaterial } from './materials/PointsMaterial.js';
-import { ShaderMaterial } from './materials/ShaderMaterial.js';
-import { Box2 } from './math/Box2.js';
-import { Box3 } from './math/Box3.js';
-import { Color } from './math/Color.js';
-import { Line3 } from './math/Line3.js';
-import { _Math } from './math/Math.js';
-import { Matrix3 } from './math/Matrix3.js';
-import { Matrix4 } from './math/Matrix4.js';
-import { Plane } from './math/Plane.js';
-import { Quaternion } from './math/Quaternion.js';
-import { Ray } from './math/Ray.js';
-import { Triangle } from './math/Triangle.js';
-import { Vector2 } from './math/Vector2.js';
-import { Vector3 } from './math/Vector3.js';
-import { Vector4 } from './math/Vector4.js';
-import { LineSegments } from './objects/LineSegments.js';
-import { LOD } from './objects/LOD.js';
-import { Points } from './objects/Points.js';
-import { Sprite } from './objects/Sprite.js';
-import { Skeleton } from './objects/Skeleton.js';
-import { SkinnedMesh } from './objects/SkinnedMesh.js';
-import { WebGLRenderer } from './renderers/WebGLRenderer.js';
-import { WebGLRenderTarget } from './renderers/WebGLRenderTarget.js';
-import { WebGLRenderTargetCube } from './renderers/WebGLRenderTargetCube.js';
-import { WebGLShadowMap } from './renderers/webgl/WebGLShadowMap.js';
-import { WebVRManager } from './renderers/webvr/WebVRManager.js';
-import { ImageUtils } from './extras/ImageUtils.js';
-import { Shape } from './extras/core/Shape.js';
-import { CubeCamera } from './cameras/CubeCamera.js';
-
-export { BoxGeometry as CubeGeometry };
-
-export function Face4( a, b, c, d, normal, color, materialIndex ) {
-
-	console.warn( 'THREE.Face4 has been removed. A THREE.Face3 will be created instead.' );
-	return new Face3( a, b, c, normal, color, materialIndex );
-
-}
-
-export var LineStrip = 0;
-
-export var LinePieces = 1;
-
-export function MeshFaceMaterial( materials ) {
-
-	console.warn( 'THREE.MeshFaceMaterial has been removed. Use an Array instead.' );
-	return materials;
-
-}
-
-export function MultiMaterial( materials ) {
-
-	if ( materials === undefined ) materials = [];
-
-	console.warn( 'THREE.MultiMaterial has been removed. Use an Array instead.' );
-	materials.isMultiMaterial = true;
-	materials.materials = materials;
-	materials.clone = function () {
-
-		return materials.slice();
-
-	};
-	return materials;
-
-}
-
-export function PointCloud( geometry, material ) {
-
-	console.warn( 'THREE.PointCloud has been renamed to THREE.Points.' );
-	return new Points( geometry, material );
-
-}
-
-export function Particle( material ) {
-
-	console.warn( 'THREE.Particle has been renamed to THREE.Sprite.' );
-	return new Sprite( material );
-
-}
-
-export function ParticleSystem( geometry, material ) {
-
-	console.warn( 'THREE.ParticleSystem has been renamed to THREE.Points.' );
-	return new Points( geometry, material );
-
-}
-
-export function PointCloudMaterial( parameters ) {
-
-	console.warn( 'THREE.PointCloudMaterial has been renamed to THREE.PointsMaterial.' );
-	return new PointsMaterial( parameters );
-
-}
-
-export function ParticleBasicMaterial( parameters ) {
-
-	console.warn( 'THREE.ParticleBasicMaterial has been renamed to THREE.PointsMaterial.' );
-	return new PointsMaterial( parameters );
-
-}
-
-export function ParticleSystemMaterial( parameters ) {
-
-	console.warn( 'THREE.ParticleSystemMaterial has been renamed to THREE.PointsMaterial.' );
-	return new PointsMaterial( parameters );
-
-}
-
-export function Vertex( x, y, z ) {
-
-	console.warn( 'THREE.Vertex has been removed. Use THREE.Vector3 instead.' );
-	return new Vector3( x, y, z );
-
-}
-
-//
-
-export function DynamicBufferAttribute( array, itemSize ) {
-
-	console.warn( 'THREE.DynamicBufferAttribute has been removed. Use new THREE.BufferAttribute().setDynamic( true ) instead.' );
-	return new BufferAttribute( array, itemSize ).setDynamic( true );
-
-}
-
-export function Int8Attribute( array, itemSize ) {
-
-	console.warn( 'THREE.Int8Attribute has been removed. Use new THREE.Int8BufferAttribute() instead.' );
-	return new Int8BufferAttribute( array, itemSize );
-
-}
-
-export function Uint8Attribute( array, itemSize ) {
-
-	console.warn( 'THREE.Uint8Attribute has been removed. Use new THREE.Uint8BufferAttribute() instead.' );
-	return new Uint8BufferAttribute( array, itemSize );
-
-}
-
-export function Uint8ClampedAttribute( array, itemSize ) {
-
-	console.warn( 'THREE.Uint8ClampedAttribute has been removed. Use new THREE.Uint8ClampedBufferAttribute() instead.' );
-	return new Uint8ClampedBufferAttribute( array, itemSize );
-
-}
-
-export function Int16Attribute( array, itemSize ) {
-
-	console.warn( 'THREE.Int16Attribute has been removed. Use new THREE.Int16BufferAttribute() instead.' );
-	return new Int16BufferAttribute( array, itemSize );
-
-}
-
-export function Uint16Attribute( array, itemSize ) {
-
-	console.warn( 'THREE.Uint16Attribute has been removed. Use new THREE.Uint16BufferAttribute() instead.' );
-	return new Uint16BufferAttribute( array, itemSize );
-
-}
-
-export function Int32Attribute( array, itemSize ) {
-
-	console.warn( 'THREE.Int32Attribute has been removed. Use new THREE.Int32BufferAttribute() instead.' );
-	return new Int32BufferAttribute( array, itemSize );
-
-}
-
-export function Uint32Attribute( array, itemSize ) {
-
-	console.warn( 'THREE.Uint32Attribute has been removed. Use new THREE.Uint32BufferAttribute() instead.' );
-	return new Uint32BufferAttribute( array, itemSize );
-
-}
-
-export function Float32Attribute( array, itemSize ) {
-
-	console.warn( 'THREE.Float32Attribute has been removed. Use new THREE.Float32BufferAttribute() instead.' );
-	return new Float32BufferAttribute( array, itemSize );
-
-}
-
-export function Float64Attribute( array, itemSize ) {
-
-	console.warn( 'THREE.Float64Attribute has been removed. Use new THREE.Float64BufferAttribute() instead.' );
-	return new Float64BufferAttribute( array, itemSize );
-
-}
-
-//
-
-Curve.create = function ( construct, getPoint ) {
-
-	console.log( 'THREE.Curve.create() has been deprecated' );
-
-	construct.prototype = Object.create( Curve.prototype );
-	construct.prototype.constructor = construct;
-	construct.prototype.getPoint = getPoint;
-
-	return construct;
-
-};
-
-//
-
-Object.assign( CurvePath.prototype, {
-
-	createPointsGeometry: function ( divisions ) {
-
-		console.warn( 'THREE.CurvePath: .createPointsGeometry() has been removed. Use new THREE.Geometry().setFromPoints( points ) instead.' );
-
-		// generate geometry from path points (for Line or Points objects)
-
-		var pts = this.getPoints( divisions );
-		return this.createGeometry( pts );
-
-	},
-
-	createSpacedPointsGeometry: function ( divisions ) {
-
-		console.warn( 'THREE.CurvePath: .createSpacedPointsGeometry() has been removed. Use new THREE.Geometry().setFromPoints( points ) instead.' );
-
-		// generate geometry from equidistant sampling along the path
-
-		var pts = this.getSpacedPoints( divisions );
-		return this.createGeometry( pts );
-
-	},
-
-	createGeometry: function ( points ) {
-
-		console.warn( 'THREE.CurvePath: .createGeometry() has been removed. Use new THREE.Geometry().setFromPoints( points ) instead.' );
-
-		var geometry = new Geometry();
-
-		for ( var i = 0, l = points.length; i < l; i ++ ) {
-
-			var point = points[ i ];
-			geometry.vertices.push( new Vector3( point.x, point.y, point.z || 0 ) );
-
-		}
-
-		return geometry;
-
-	}
-
-} );
-
-//
-
-Object.assign( Path.prototype, {
-
-	fromPoints: function ( points ) {
-
-		console.warn( 'THREE.Path: .fromPoints() has been renamed to .setFromPoints().' );
-		this.setFromPoints( points );
-
-	}
-
-} );
-
-//
-
-export function ClosedSplineCurve3( points ) {
-
-	console.warn( 'THREE.ClosedSplineCurve3 has been deprecated. Use THREE.CatmullRomCurve3 instead.' );
-
-	CatmullRomCurve3.call( this, points );
-	this.type = 'catmullrom';
-	this.closed = true;
-
-}
-
-ClosedSplineCurve3.prototype = Object.create( CatmullRomCurve3.prototype );
-
-//
-
-export function SplineCurve3( points ) {
-
-	console.warn( 'THREE.SplineCurve3 has been deprecated. Use THREE.CatmullRomCurve3 instead.' );
-
-	CatmullRomCurve3.call( this, points );
-	this.type = 'catmullrom';
-
-}
-
-SplineCurve3.prototype = Object.create( CatmullRomCurve3.prototype );
-
-//
-
-export function Spline( points ) {
-
-	console.warn( 'THREE.Spline has been removed. Use THREE.CatmullRomCurve3 instead.' );
-
-	CatmullRomCurve3.call( this, points );
-	this.type = 'catmullrom';
-
-}
-
-Spline.prototype = Object.create( CatmullRomCurve3.prototype );
-
-Object.assign( Spline.prototype, {
-
-	initFromArray: function ( /* a */ ) {
-
-		console.error( 'THREE.Spline: .initFromArray() has been removed.' );
-
-	},
-	getControlPointsArray: function ( /* optionalTarget */ ) {
-
-		console.error( 'THREE.Spline: .getControlPointsArray() has been removed.' );
-
-	},
-	reparametrizeByArcLength: function ( /* samplingCoef */ ) {
-
-		console.error( 'THREE.Spline: .reparametrizeByArcLength() has been removed.' );
-
-	}
-
-} );
-
-//
-
-export function AxisHelper( size ) {
-
-	console.warn( 'THREE.AxisHelper has been renamed to THREE.AxesHelper.' );
-	return new AxesHelper( size );
-
-}
-
-export function BoundingBoxHelper( object, color ) {
-
-	console.warn( 'THREE.BoundingBoxHelper has been deprecated. Creating a THREE.BoxHelper instead.' );
-	return new BoxHelper( object, color );
-
-}
-
-export function EdgesHelper( object, hex ) {
-
-	console.warn( 'THREE.EdgesHelper has been removed. Use THREE.EdgesGeometry instead.' );
-	return new LineSegments( new EdgesGeometry( object.geometry ), new LineBasicMaterial( { color: hex !== undefined ? hex : 0xffffff } ) );
-
-}
-
-GridHelper.prototype.setColors = function () {
-
-	console.error( 'THREE.GridHelper: setColors() has been deprecated, pass them in the constructor instead.' );
-
-};
-
-SkeletonHelper.prototype.update = function () {
-
-	console.error( 'THREE.SkeletonHelper: update() no longer needs to be called.' );
-
-};
-
-export function WireframeHelper( object, hex ) {
-
-	console.warn( 'THREE.WireframeHelper has been removed. Use THREE.WireframeGeometry instead.' );
-	return new LineSegments( new WireframeGeometry( object.geometry ), new LineBasicMaterial( { color: hex !== undefined ? hex : 0xffffff } ) );
-
-}
-
-//
-
-Object.assign( Loader.prototype, {
-
-	extractUrlBase: function ( url ) {
-
-		console.warn( 'THREE.Loader: .extractUrlBase() has been deprecated. Use THREE.LoaderUtils.extractUrlBase() instead.' );
-		return LoaderUtils.extractUrlBase( url );
-
-	}
-
-} );
-
-export function XHRLoader( manager ) {
-
-	console.warn( 'THREE.XHRLoader has been renamed to THREE.FileLoader.' );
-	return new FileLoader( manager );
-
-}
-
-export function BinaryTextureLoader( manager ) {
-
-	console.warn( 'THREE.BinaryTextureLoader has been renamed to THREE.DataTextureLoader.' );
-	return new DataTextureLoader( manager );
-
-}
-
-Object.assign( ObjectLoader.prototype, {
-
-	setTexturePath: function ( value ) {
-
-		console.warn( 'THREE.ObjectLoader: .setTexturePath() has been renamed to .setResourcePath().' );
-		return this.setResourcePath( value );
-
-	}
-
-} );
-
-//
-
-Object.assign( Box2.prototype, {
-
-	center: function ( optionalTarget ) {
-
-		console.warn( 'THREE.Box2: .center() has been renamed to .getCenter().' );
-		return this.getCenter( optionalTarget );
-
-	},
-	empty: function () {
-
-		console.warn( 'THREE.Box2: .empty() has been renamed to .isEmpty().' );
-		return this.isEmpty();
-
-	},
-	isIntersectionBox: function ( box ) {
-
-		console.warn( 'THREE.Box2: .isIntersectionBox() has been renamed to .intersectsBox().' );
-		return this.intersectsBox( box );
-
-	},
-	size: function ( optionalTarget ) {
-
-		console.warn( 'THREE.Box2: .size() has been renamed to .getSize().' );
-		return this.getSize( optionalTarget );
-
-	}
-} );
-
-Object.assign( Box3.prototype, {
-
-	center: function ( optionalTarget ) {
-
-		console.warn( 'THREE.Box3: .center() has been renamed to .getCenter().' );
-		return this.getCenter( optionalTarget );
-
-	},
-	empty: function () {
-
-		console.warn( 'THREE.Box3: .empty() has been renamed to .isEmpty().' );
-		return this.isEmpty();
-
-	},
-	isIntersectionBox: function ( box ) {
-
-		console.warn( 'THREE.Box3: .isIntersectionBox() has been renamed to .intersectsBox().' );
-		return this.intersectsBox( box );
-
-	},
-	isIntersectionSphere: function ( sphere ) {
-
-		console.warn( 'THREE.Box3: .isIntersectionSphere() has been renamed to .intersectsSphere().' );
-		return this.intersectsSphere( sphere );
-
-	},
-	size: function ( optionalTarget ) {
-
-		console.warn( 'THREE.Box3: .size() has been renamed to .getSize().' );
-		return this.getSize( optionalTarget );
-
-	}
-} );
-
-Line3.prototype.center = function ( optionalTarget ) {
-
-	console.warn( 'THREE.Line3: .center() has been renamed to .getCenter().' );
-	return this.getCenter( optionalTarget );
-
-};
-
-Object.assign( _Math, {
-
-	random16: function () {
-
-		console.warn( 'THREE.Math: .random16() has been deprecated. Use Math.random() instead.' );
-		return Math.random();
-
-	},
-
-	nearestPowerOfTwo: function ( value ) {
-
-		console.warn( 'THREE.Math: .nearestPowerOfTwo() has been renamed to .floorPowerOfTwo().' );
-		return _Math.floorPowerOfTwo( value );
-
-	},
-
-	nextPowerOfTwo: function ( value ) {
-
-		console.warn( 'THREE.Math: .nextPowerOfTwo() has been renamed to .ceilPowerOfTwo().' );
-		return _Math.ceilPowerOfTwo( value );
-
-	}
-
-} );
-
-Object.assign( Matrix3.prototype, {
-
-	flattenToArrayOffset: function ( array, offset ) {
-
-		console.warn( "THREE.Matrix3: .flattenToArrayOffset() has been deprecated. Use .toArray() instead." );
-		return this.toArray( array, offset );
-
-	},
-	multiplyVector3: function ( vector ) {
-
-		console.warn( 'THREE.Matrix3: .multiplyVector3() has been removed. Use vector.applyMatrix3( matrix ) instead.' );
-		return vector.applyMatrix3( this );
-
-	},
-	multiplyVector3Array: function ( /* a */ ) {
-
-		console.error( 'THREE.Matrix3: .multiplyVector3Array() has been removed.' );
-
-	},
-	applyToBuffer: function ( buffer /*, offset, length */ ) {
-
-		console.warn( 'THREE.Matrix3: .applyToBuffer() has been removed. Use matrix.applyToBufferAttribute( attribute ) instead.' );
-		return this.applyToBufferAttribute( buffer );
-
-	},
-	applyToVector3Array: function ( /* array, offset, length */ ) {
-
-		console.error( 'THREE.Matrix3: .applyToVector3Array() has been removed.' );
-
-	}
-
-} );
-
-Object.assign( Matrix4.prototype, {
-
-	extractPosition: function ( m ) {
-
-		console.warn( 'THREE.Matrix4: .extractPosition() has been renamed to .copyPosition().' );
-		return this.copyPosition( m );
-
-	},
-	flattenToArrayOffset: function ( array, offset ) {
-
-		console.warn( "THREE.Matrix4: .flattenToArrayOffset() has been deprecated. Use .toArray() instead." );
-		return this.toArray( array, offset );
-
-	},
-	getPosition: function () {
-
-		var v1;
-
-		return function getPosition() {
-
-			if ( v1 === undefined ) v1 = new Vector3();
-			console.warn( 'THREE.Matrix4: .getPosition() has been removed. Use Vector3.setFromMatrixPosition( matrix ) instead.' );
-			return v1.setFromMatrixColumn( this, 3 );
-
-		};
-
-	}(),
-	setRotationFromQuaternion: function ( q ) {
-
-		console.warn( 'THREE.Matrix4: .setRotationFromQuaternion() has been renamed to .makeRotationFromQuaternion().' );
-		return this.makeRotationFromQuaternion( q );
-
-	},
-	multiplyToArray: function () {
-
-		console.warn( 'THREE.Matrix4: .multiplyToArray() has been removed.' );
-
-	},
-	multiplyVector3: function ( vector ) {
-
-		console.warn( 'THREE.Matrix4: .multiplyVector3() has been removed. Use vector.applyMatrix4( matrix ) instead.' );
-		return vector.applyMatrix4( this );
-
-	},
-	multiplyVector4: function ( vector ) {
-
-		console.warn( 'THREE.Matrix4: .multiplyVector4() has been removed. Use vector.applyMatrix4( matrix ) instead.' );
-		return vector.applyMatrix4( this );
-
-	},
-	multiplyVector3Array: function ( /* a */ ) {
-
-		console.error( 'THREE.Matrix4: .multiplyVector3Array() has been removed.' );
-
-	},
-	rotateAxis: function ( v ) {
-
-		console.warn( 'THREE.Matrix4: .rotateAxis() has been removed. Use Vector3.transformDirection( matrix ) instead.' );
-		v.transformDirection( this );
-
-	},
-	crossVector: function ( vector ) {
-
-		console.warn( 'THREE.Matrix4: .crossVector() has been removed. Use vector.applyMatrix4( matrix ) instead.' );
-		return vector.applyMatrix4( this );
-
-	},
-	translate: function () {
-
-		console.error( 'THREE.Matrix4: .translate() has been removed.' );
-
-	},
-	rotateX: function () {
-
-		console.error( 'THREE.Matrix4: .rotateX() has been removed.' );
-
-	},
-	rotateY: function () {
-
-		console.error( 'THREE.Matrix4: .rotateY() has been removed.' );
-
-	},
-	rotateZ: function () {
-
-		console.error( 'THREE.Matrix4: .rotateZ() has been removed.' );
-
-	},
-	rotateByAxis: function () {
-
-		console.error( 'THREE.Matrix4: .rotateByAxis() has been removed.' );
-
-	},
-	applyToBuffer: function ( buffer /*, offset, length */ ) {
-
-		console.warn( 'THREE.Matrix4: .applyToBuffer() has been removed. Use matrix.applyToBufferAttribute( attribute ) instead.' );
-		return this.applyToBufferAttribute( buffer );
-
-	},
-	applyToVector3Array: function ( /* array, offset, length */ ) {
-
-		console.error( 'THREE.Matrix4: .applyToVector3Array() has been removed.' );
-
-	},
-	makeFrustum: function ( left, right, bottom, top, near, far ) {
-
-		console.warn( 'THREE.Matrix4: .makeFrustum() has been removed. Use .makePerspective( left, right, top, bottom, near, far ) instead.' );
-		return this.makePerspective( left, right, top, bottom, near, far );
-
-	}
-
-} );
-
-Plane.prototype.isIntersectionLine = function ( line ) {
-
-	console.warn( 'THREE.Plane: .isIntersectionLine() has been renamed to .intersectsLine().' );
-	return this.intersectsLine( line );
-
-};
-
-Quaternion.prototype.multiplyVector3 = function ( vector ) {
-
-	console.warn( 'THREE.Quaternion: .multiplyVector3() has been removed. Use is now vector.applyQuaternion( quaternion ) instead.' );
-	return vector.applyQuaternion( this );
-
-};
-
-Object.assign( Ray.prototype, {
-
-	isIntersectionBox: function ( box ) {
-
-		console.warn( 'THREE.Ray: .isIntersectionBox() has been renamed to .intersectsBox().' );
-		return this.intersectsBox( box );
-
-	},
-	isIntersectionPlane: function ( plane ) {
-
-		console.warn( 'THREE.Ray: .isIntersectionPlane() has been renamed to .intersectsPlane().' );
-		return this.intersectsPlane( plane );
-
-	},
-	isIntersectionSphere: function ( sphere ) {
-
-		console.warn( 'THREE.Ray: .isIntersectionSphere() has been renamed to .intersectsSphere().' );
-		return this.intersectsSphere( sphere );
-
-	}
-
-} );
-
-Object.assign( Triangle.prototype, {
-
-	area: function () {
-
-		console.warn( 'THREE.Triangle: .area() has been renamed to .getArea().' );
-		return this.getArea();
-
-	},
-	barycoordFromPoint: function ( point, target ) {
-
-		console.warn( 'THREE.Triangle: .barycoordFromPoint() has been renamed to .getBarycoord().' );
-		return this.getBarycoord( point, target );
-
-	},
-	midpoint: function ( target ) {
-
-		console.warn( 'THREE.Triangle: .midpoint() has been renamed to .getMidpoint().' );
-		return this.getMidpoint( target );
-
-	},
-	normal: function ( target ) {
-
-		console.warn( 'THREE.Triangle: .normal() has been renamed to .getNormal().' );
-		return this.getNormal( target );
-
-	},
-	plane: function ( target ) {
-
-		console.warn( 'THREE.Triangle: .plane() has been renamed to .getPlane().' );
-		return this.getPlane( target );
-
-	}
-
-} );
-
-Object.assign( Triangle, {
-
-	barycoordFromPoint: function ( point, a, b, c, target ) {
-
-		console.warn( 'THREE.Triangle: .barycoordFromPoint() has been renamed to .getBarycoord().' );
-		return Triangle.getBarycoord( point, a, b, c, target );
-
-	},
-	normal: function ( a, b, c, target ) {
-
-		console.warn( 'THREE.Triangle: .normal() has been renamed to .getNormal().' );
-		return Triangle.getNormal( a, b, c, target );
-
-	}
-
-} );
-
-Object.assign( Shape.prototype, {
-
-	extractAllPoints: function ( divisions ) {
-
-		console.warn( 'THREE.Shape: .extractAllPoints() has been removed. Use .extractPoints() instead.' );
-		return this.extractPoints( divisions );
-
-	},
-	extrude: function ( options ) {
-
-		console.warn( 'THREE.Shape: .extrude() has been removed. Use ExtrudeGeometry() instead.' );
-		return new ExtrudeGeometry( this, options );
-
-	},
-	makeGeometry: function ( options ) {
-
-		console.warn( 'THREE.Shape: .makeGeometry() has been removed. Use ShapeGeometry() instead.' );
-		return new ShapeGeometry( this, options );
-
-	}
-
-} );
-
-Object.assign( Vector2.prototype, {
-
-	fromAttribute: function ( attribute, index, offset ) {
-
-		console.warn( 'THREE.Vector2: .fromAttribute() has been renamed to .fromBufferAttribute().' );
-		return this.fromBufferAttribute( attribute, index, offset );
-
-	},
-	distanceToManhattan: function ( v ) {
-
-		console.warn( 'THREE.Vector2: .distanceToManhattan() has been renamed to .manhattanDistanceTo().' );
-		return this.manhattanDistanceTo( v );
-
-	},
-	lengthManhattan: function () {
-
-		console.warn( 'THREE.Vector2: .lengthManhattan() has been renamed to .manhattanLength().' );
-		return this.manhattanLength();
-
-	}
-
-} );
-
-Object.assign( Vector3.prototype, {
-
-	setEulerFromRotationMatrix: function () {
-
-		console.error( 'THREE.Vector3: .setEulerFromRotationMatrix() has been removed. Use Euler.setFromRotationMatrix() instead.' );
-
-	},
-	setEulerFromQuaternion: function () {
-
-		console.error( 'THREE.Vector3: .setEulerFromQuaternion() has been removed. Use Euler.setFromQuaternion() instead.' );
-
-	},
-	getPositionFromMatrix: function ( m ) {
-
-		console.warn( 'THREE.Vector3: .getPositionFromMatrix() has been renamed to .setFromMatrixPosition().' );
-		return this.setFromMatrixPosition( m );
-
-	},
-	getScaleFromMatrix: function ( m ) {
-
-		console.warn( 'THREE.Vector3: .getScaleFromMatrix() has been renamed to .setFromMatrixScale().' );
-		return this.setFromMatrixScale( m );
-
-	},
-	getColumnFromMatrix: function ( index, matrix ) {
-
-		console.warn( 'THREE.Vector3: .getColumnFromMatrix() has been renamed to .setFromMatrixColumn().' );
-		return this.setFromMatrixColumn( matrix, index );
-
-	},
-	applyProjection: function ( m ) {
-
-		console.warn( 'THREE.Vector3: .applyProjection() has been removed. Use .applyMatrix4( m ) instead.' );
-		return this.applyMatrix4( m );
-
-	},
-	fromAttribute: function ( attribute, index, offset ) {
-
-		console.warn( 'THREE.Vector3: .fromAttribute() has been renamed to .fromBufferAttribute().' );
-		return this.fromBufferAttribute( attribute, index, offset );
-
-	},
-	distanceToManhattan: function ( v ) {
-
-		console.warn( 'THREE.Vector3: .distanceToManhattan() has been renamed to .manhattanDistanceTo().' );
-		return this.manhattanDistanceTo( v );
-
-	},
-	lengthManhattan: function () {
-
-		console.warn( 'THREE.Vector3: .lengthManhattan() has been renamed to .manhattanLength().' );
-		return this.manhattanLength();
-
-	}
-
-} );
-
-Object.assign( Vector4.prototype, {
-
-	fromAttribute: function ( attribute, index, offset ) {
-
-		console.warn( 'THREE.Vector4: .fromAttribute() has been renamed to .fromBufferAttribute().' );
-		return this.fromBufferAttribute( attribute, index, offset );
-
-	},
-	lengthManhattan: function () {
-
-		console.warn( 'THREE.Vector4: .lengthManhattan() has been renamed to .manhattanLength().' );
-		return this.manhattanLength();
-
-	}
-
-} );
-
-//
-
-Object.assign( Geometry.prototype, {
-
-	computeTangents: function () {
-
-		console.error( 'THREE.Geometry: .computeTangents() has been removed.' );
-
-	},
-	computeLineDistances: function () {
-
-		console.error( 'THREE.Geometry: .computeLineDistances() has been removed. Use THREE.Line.computeLineDistances() instead.' );
-
-	}
-
-} );
-
-Object.assign( Object3D.prototype, {
-
-	getChildByName: function ( name ) {
-
-		console.warn( 'THREE.Object3D: .getChildByName() has been renamed to .getObjectByName().' );
-		return this.getObjectByName( name );
-
-	},
-	renderDepth: function () {
-
-		console.warn( 'THREE.Object3D: .renderDepth has been removed. Use .renderOrder, instead.' );
-
-	},
-	translate: function ( distance, axis ) {
-
-		console.warn( 'THREE.Object3D: .translate() has been removed. Use .translateOnAxis( axis, distance ) instead.' );
-		return this.translateOnAxis( axis, distance );
-
-	},
-	getWorldRotation: function () {
-
-		console.error( 'THREE.Object3D: .getWorldRotation() has been removed. Use THREE.Object3D.getWorldQuaternion( target ) instead.' );
-
-	}
-
-} );
-
-Object.defineProperties( Object3D.prototype, {
-
-	eulerOrder: {
-		get: function () {
-
-			console.warn( 'THREE.Object3D: .eulerOrder is now .rotation.order.' );
-			return this.rotation.order;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.Object3D: .eulerOrder is now .rotation.order.' );
-			this.rotation.order = value;
-
-		}
-	},
-	useQuaternion: {
-		get: function () {
-
-			console.warn( 'THREE.Object3D: .useQuaternion has been removed. The library now uses quaternions by default.' );
-
-		},
-		set: function () {
-
-			console.warn( 'THREE.Object3D: .useQuaternion has been removed. The library now uses quaternions by default.' );
-
-		}
-	}
-
-} );
-
-Object.defineProperties( LOD.prototype, {
-
-	objects: {
-		get: function () {
-
-			console.warn( 'THREE.LOD: .objects has been renamed to .levels.' );
-			return this.levels;
-
-		}
-	}
-
-} );
-
-Object.defineProperty( Skeleton.prototype, 'useVertexTexture', {
-
-	get: function () {
-
-		console.warn( 'THREE.Skeleton: useVertexTexture has been removed.' );
-
-	},
-	set: function () {
-
-		console.warn( 'THREE.Skeleton: useVertexTexture has been removed.' );
-
-	}
-
-} );
-
-SkinnedMesh.prototype.initBones = function () {
-
-	console.error( 'THREE.SkinnedMesh: initBones() has been removed.' );
-
-};
-
-Object.defineProperty( Curve.prototype, '__arcLengthDivisions', {
-
-	get: function () {
-
-		console.warn( 'THREE.Curve: .__arcLengthDivisions is now .arcLengthDivisions.' );
-		return this.arcLengthDivisions;
-
-	},
-	set: function ( value ) {
-
-		console.warn( 'THREE.Curve: .__arcLengthDivisions is now .arcLengthDivisions.' );
-		this.arcLengthDivisions = value;
-
-	}
-
-} );
-
-//
-
-PerspectiveCamera.prototype.setLens = function ( focalLength, filmGauge ) {
-
-	console.warn( "THREE.PerspectiveCamera.setLens is deprecated. " +
-			"Use .setFocalLength and .filmGauge for a photographic setup." );
-
-	if ( filmGauge !== undefined ) this.filmGauge = filmGauge;
-	this.setFocalLength( focalLength );
-
-};
-
-//
-
-Object.defineProperties( Light.prototype, {
-	onlyShadow: {
-		set: function () {
-
-			console.warn( 'THREE.Light: .onlyShadow has been removed.' );
-
-		}
-	},
-	shadowCameraFov: {
-		set: function ( value ) {
-
-			console.warn( 'THREE.Light: .shadowCameraFov is now .shadow.camera.fov.' );
-			this.shadow.camera.fov = value;
-
-		}
-	},
-	shadowCameraLeft: {
-		set: function ( value ) {
-
-			console.warn( 'THREE.Light: .shadowCameraLeft is now .shadow.camera.left.' );
-			this.shadow.camera.left = value;
-
-		}
-	},
-	shadowCameraRight: {
-		set: function ( value ) {
-
-			console.warn( 'THREE.Light: .shadowCameraRight is now .shadow.camera.right.' );
-			this.shadow.camera.right = value;
-
-		}
-	},
-	shadowCameraTop: {
-		set: function ( value ) {
-
-			console.warn( 'THREE.Light: .shadowCameraTop is now .shadow.camera.top.' );
-			this.shadow.camera.top = value;
-
-		}
-	},
-	shadowCameraBottom: {
-		set: function ( value ) {
-
-			console.warn( 'THREE.Light: .shadowCameraBottom is now .shadow.camera.bottom.' );
-			this.shadow.camera.bottom = value;
-
-		}
-	},
-	shadowCameraNear: {
-		set: function ( value ) {
-
-			console.warn( 'THREE.Light: .shadowCameraNear is now .shadow.camera.near.' );
-			this.shadow.camera.near = value;
-
-		}
-	},
-	shadowCameraFar: {
-		set: function ( value ) {
-
-			console.warn( 'THREE.Light: .shadowCameraFar is now .shadow.camera.far.' );
-			this.shadow.camera.far = value;
-
-		}
-	},
-	shadowCameraVisible: {
-		set: function () {
-
-			console.warn( 'THREE.Light: .shadowCameraVisible has been removed. Use new THREE.CameraHelper( light.shadow.camera ) instead.' );
-
-		}
-	},
-	shadowBias: {
-		set: function ( value ) {
-
-			console.warn( 'THREE.Light: .shadowBias is now .shadow.bias.' );
-			this.shadow.bias = value;
-
-		}
-	},
-	shadowDarkness: {
-		set: function () {
-
-			console.warn( 'THREE.Light: .shadowDarkness has been removed.' );
-
-		}
-	},
-	shadowMapWidth: {
-		set: function ( value ) {
-
-			console.warn( 'THREE.Light: .shadowMapWidth is now .shadow.mapSize.width.' );
-			this.shadow.mapSize.width = value;
-
-		}
-	},
-	shadowMapHeight: {
-		set: function ( value ) {
-
-			console.warn( 'THREE.Light: .shadowMapHeight is now .shadow.mapSize.height.' );
-			this.shadow.mapSize.height = value;
-
-		}
-	}
-} );
-
-//
-
-Object.defineProperties( BufferAttribute.prototype, {
-
-	length: {
-		get: function () {
-
-			console.warn( 'THREE.BufferAttribute: .length has been deprecated. Use .count instead.' );
-			return this.array.length;
-
-		}
-	},
-	copyIndicesArray: function ( /* indices */ ) {
-
-		console.error( 'THREE.BufferAttribute: .copyIndicesArray() has been removed.' );
-
-	}
-
-} );
-
-Object.assign( BufferGeometry.prototype, {
-
-	addIndex: function ( index ) {
-
-		console.warn( 'THREE.BufferGeometry: .addIndex() has been renamed to .setIndex().' );
-		this.setIndex( index );
-
-	},
-	addDrawCall: function ( start, count, indexOffset ) {
-
-		if ( indexOffset !== undefined ) {
-
-			console.warn( 'THREE.BufferGeometry: .addDrawCall() no longer supports indexOffset.' );
-
-		}
-		console.warn( 'THREE.BufferGeometry: .addDrawCall() is now .addGroup().' );
-		this.addGroup( start, count );
-
-	},
-	clearDrawCalls: function () {
-
-		console.warn( 'THREE.BufferGeometry: .clearDrawCalls() is now .clearGroups().' );
-		this.clearGroups();
-
-	},
-	computeTangents: function () {
-
-		console.warn( 'THREE.BufferGeometry: .computeTangents() has been removed.' );
-
-	},
-	computeOffsets: function () {
-
-		console.warn( 'THREE.BufferGeometry: .computeOffsets() has been removed.' );
-
-	}
-
-} );
-
-Object.defineProperties( BufferGeometry.prototype, {
-
-	drawcalls: {
-		get: function () {
-
-			console.error( 'THREE.BufferGeometry: .drawcalls has been renamed to .groups.' );
-			return this.groups;
-
-		}
-	},
-	offsets: {
-		get: function () {
-
-			console.warn( 'THREE.BufferGeometry: .offsets has been renamed to .groups.' );
-			return this.groups;
-
-		}
-	}
-
-} );
-
-//
-
-Object.assign( ExtrudeBufferGeometry.prototype, {
-
-	getArrays: function () {
-
-		console.error( 'THREE.ExtrudeBufferGeometry: .getArrays() has been removed.' );
-
-	},
-
-	addShapeList: function () {
-
-		console.error( 'THREE.ExtrudeBufferGeometry: .addShapeList() has been removed.' );
-
-	},
-
-	addShape: function () {
-
-		console.error( 'THREE.ExtrudeBufferGeometry: .addShape() has been removed.' );
-
-	}
-
-} );
-
-//
-
-Object.defineProperties( Uniform.prototype, {
-
-	dynamic: {
-		set: function () {
-
-			console.warn( 'THREE.Uniform: .dynamic has been removed. Use object.onBeforeRender() instead.' );
-
-		}
-	},
-	onUpdate: {
-		value: function () {
-
-			console.warn( 'THREE.Uniform: .onUpdate() has been removed. Use object.onBeforeRender() instead.' );
-			return this;
-
-		}
-	}
-
-} );
-
-//
-
-Object.defineProperties( Material.prototype, {
-
-	wrapAround: {
-		get: function () {
-
-			console.warn( 'THREE.Material: .wrapAround has been removed.' );
-
-		},
-		set: function () {
-
-			console.warn( 'THREE.Material: .wrapAround has been removed.' );
-
-		}
-	},
-
-	overdraw: {
-		get: function () {
-
-			console.warn( 'THREE.Material: .overdraw has been removed.' );
-
-		},
-		set: function () {
-
-			console.warn( 'THREE.Material: .overdraw has been removed.' );
-
-		}
-	},
-
-	wrapRGB: {
-		get: function () {
-
-			console.warn( 'THREE.Material: .wrapRGB has been removed.' );
-			return new Color();
-
-		}
-	},
-
-	shading: {
-		get: function () {
-
-			console.error( 'THREE.' + this.type + ': .shading has been removed. Use the boolean .flatShading instead.' );
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.' + this.type + ': .shading has been removed. Use the boolean .flatShading instead.' );
-			this.flatShading = ( value === FlatShading );
-
-		}
-	}
-
-} );
-
-Object.defineProperties( MeshPhongMaterial.prototype, {
-
-	metal: {
-		get: function () {
-
-			console.warn( 'THREE.MeshPhongMaterial: .metal has been removed. Use THREE.MeshStandardMaterial instead.' );
-			return false;
-
-		},
-		set: function () {
-
-			console.warn( 'THREE.MeshPhongMaterial: .metal has been removed. Use THREE.MeshStandardMaterial instead' );
-
-		}
-	}
-
-} );
-
-Object.defineProperties( ShaderMaterial.prototype, {
-
-	derivatives: {
-		get: function () {
-
-			console.warn( 'THREE.ShaderMaterial: .derivatives has been moved to .extensions.derivatives.' );
-			return this.extensions.derivatives;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE. ShaderMaterial: .derivatives has been moved to .extensions.derivatives.' );
-			this.extensions.derivatives = value;
-
-		}
-	}
-
-} );
-
-//
-
-Object.assign( WebGLRenderer.prototype, {
-
-	clearTarget: function ( renderTarget, color, depth, stencil ) {
-
-		console.warn( 'THREE.WebGLRenderer: .clearTarget() has been deprecated. Use .setRenderTarget() and .clear() instead.' );
-		this.setRenderTarget( renderTarget );
-		this.clear( color, depth, stencil );
-
-	},
-	animate: function ( callback ) {
-
-		console.warn( 'THREE.WebGLRenderer: .animate() is now .setAnimationLoop().' );
-		this.setAnimationLoop( callback );
-
-	},
-	getCurrentRenderTarget: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .getCurrentRenderTarget() is now .getRenderTarget().' );
-		return this.getRenderTarget();
-
-	},
-	getMaxAnisotropy: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .getMaxAnisotropy() is now .capabilities.getMaxAnisotropy().' );
-		return this.capabilities.getMaxAnisotropy();
-
-	},
-	getPrecision: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .getPrecision() is now .capabilities.precision.' );
-		return this.capabilities.precision;
-
-	},
-	resetGLState: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .resetGLState() is now .state.reset().' );
-		return this.state.reset();
-
-	},
-	supportsFloatTextures: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .supportsFloatTextures() is now .extensions.get( \'OES_texture_float\' ).' );
-		return this.extensions.get( 'OES_texture_float' );
-
-	},
-	supportsHalfFloatTextures: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .supportsHalfFloatTextures() is now .extensions.get( \'OES_texture_half_float\' ).' );
-		return this.extensions.get( 'OES_texture_half_float' );
-
-	},
-	supportsStandardDerivatives: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .supportsStandardDerivatives() is now .extensions.get( \'OES_standard_derivatives\' ).' );
-		return this.extensions.get( 'OES_standard_derivatives' );
-
-	},
-	supportsCompressedTextureS3TC: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .supportsCompressedTextureS3TC() is now .extensions.get( \'WEBGL_compressed_texture_s3tc\' ).' );
-		return this.extensions.get( 'WEBGL_compressed_texture_s3tc' );
-
-	},
-	supportsCompressedTexturePVRTC: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .supportsCompressedTexturePVRTC() is now .extensions.get( \'WEBGL_compressed_texture_pvrtc\' ).' );
-		return this.extensions.get( 'WEBGL_compressed_texture_pvrtc' );
-
-	},
-	supportsBlendMinMax: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .supportsBlendMinMax() is now .extensions.get( \'EXT_blend_minmax\' ).' );
-		return this.extensions.get( 'EXT_blend_minmax' );
-
-	},
-	supportsVertexTextures: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .supportsVertexTextures() is now .capabilities.vertexTextures.' );
-		return this.capabilities.vertexTextures;
-
-	},
-	supportsInstancedArrays: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .supportsInstancedArrays() is now .extensions.get( \'ANGLE_instanced_arrays\' ).' );
-		return this.extensions.get( 'ANGLE_instanced_arrays' );
-
-	},
-	enableScissorTest: function ( boolean ) {
-
-		console.warn( 'THREE.WebGLRenderer: .enableScissorTest() is now .setScissorTest().' );
-		this.setScissorTest( boolean );
-
-	},
-	initMaterial: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .initMaterial() has been removed.' );
-
-	},
-	addPrePlugin: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .addPrePlugin() has been removed.' );
-
-	},
-	addPostPlugin: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .addPostPlugin() has been removed.' );
-
-	},
-	updateShadowMap: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .updateShadowMap() has been removed.' );
-
-	},
-	setFaceCulling: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .setFaceCulling() has been removed.' );
-
-	},
-	allocTextureUnit: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .allocTextureUnit() has been removed.' );
-
-	},
-	setTexture: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .setTexture() has been removed.' );
-
-	},
-	setTexture2D: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .setTexture2D() has been removed.' );
-
-	},
-	setTextureCube: function () {
-
-		console.warn( 'THREE.WebGLRenderer: .setTextureCube() has been removed.' );
-
-	}
-
-} );
-
-Object.defineProperties( WebGLRenderer.prototype, {
-
-	shadowMapEnabled: {
-		get: function () {
-
-			return this.shadowMap.enabled;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.WebGLRenderer: .shadowMapEnabled is now .shadowMap.enabled.' );
-			this.shadowMap.enabled = value;
-
-		}
-	},
-	shadowMapType: {
-		get: function () {
-
-			return this.shadowMap.type;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.WebGLRenderer: .shadowMapType is now .shadowMap.type.' );
-			this.shadowMap.type = value;
-
-		}
-	},
-	shadowMapCullFace: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderer: .shadowMapCullFace has been removed. Set Material.shadowSide instead.' );
-			return undefined;
-
-		},
-		set: function ( /* value */ ) {
-
-			console.warn( 'THREE.WebGLRenderer: .shadowMapCullFace has been removed. Set Material.shadowSide instead.' );
-
-		}
-	}
-} );
-
-Object.defineProperties( WebGLShadowMap.prototype, {
-
-	cullFace: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderer: .shadowMap.cullFace has been removed. Set Material.shadowSide instead.' );
-			return undefined;
-
-		},
-		set: function ( /* cullFace */ ) {
-
-			console.warn( 'THREE.WebGLRenderer: .shadowMap.cullFace has been removed. Set Material.shadowSide instead.' );
-
-		}
-	},
-	renderReverseSided: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderer: .shadowMap.renderReverseSided has been removed. Set Material.shadowSide instead.' );
-			return undefined;
-
-		},
-		set: function () {
-
-			console.warn( 'THREE.WebGLRenderer: .shadowMap.renderReverseSided has been removed. Set Material.shadowSide instead.' );
-
-		}
-	},
-	renderSingleSided: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderer: .shadowMap.renderSingleSided has been removed. Set Material.shadowSide instead.' );
-			return undefined;
-
-		},
-		set: function () {
-
-			console.warn( 'THREE.WebGLRenderer: .shadowMap.renderSingleSided has been removed. Set Material.shadowSide instead.' );
-
-		}
-	}
-
-} );
-
-//
-
-Object.defineProperties( WebGLRenderTargetCube.prototype, {
-
-	activeCubeFace: {
-		set: function ( /* value */ ) {
-
-			console.warn( 'THREE.WebGLRenderTargetCube: .activeCubeFace has been removed. It is now the second parameter of WebGLRenderer.setRenderTarget().' );
-
-		}
-	},
-	activeMipMapLevel: {
-		set: function ( /* value */ ) {
-
-			console.warn( 'THREE.WebGLRenderTargetCube: .activeMipMapLevel has been removed. It is now the third parameter of WebGLRenderer.setRenderTarget().' );
-
-		}
-	}
-
-} );
-
-//
-
-Object.defineProperties( WebGLRenderTarget.prototype, {
-
-	wrapS: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderTarget: .wrapS is now .texture.wrapS.' );
-			return this.texture.wrapS;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.WebGLRenderTarget: .wrapS is now .texture.wrapS.' );
-			this.texture.wrapS = value;
-
-		}
-	},
-	wrapT: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderTarget: .wrapT is now .texture.wrapT.' );
-			return this.texture.wrapT;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.WebGLRenderTarget: .wrapT is now .texture.wrapT.' );
-			this.texture.wrapT = value;
-
-		}
-	},
-	magFilter: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderTarget: .magFilter is now .texture.magFilter.' );
-			return this.texture.magFilter;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.WebGLRenderTarget: .magFilter is now .texture.magFilter.' );
-			this.texture.magFilter = value;
-
-		}
-	},
-	minFilter: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderTarget: .minFilter is now .texture.minFilter.' );
-			return this.texture.minFilter;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.WebGLRenderTarget: .minFilter is now .texture.minFilter.' );
-			this.texture.minFilter = value;
-
-		}
-	},
-	anisotropy: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderTarget: .anisotropy is now .texture.anisotropy.' );
-			return this.texture.anisotropy;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.WebGLRenderTarget: .anisotropy is now .texture.anisotropy.' );
-			this.texture.anisotropy = value;
-
-		}
-	},
-	offset: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderTarget: .offset is now .texture.offset.' );
-			return this.texture.offset;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.WebGLRenderTarget: .offset is now .texture.offset.' );
-			this.texture.offset = value;
-
-		}
-	},
-	repeat: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderTarget: .repeat is now .texture.repeat.' );
-			return this.texture.repeat;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.WebGLRenderTarget: .repeat is now .texture.repeat.' );
-			this.texture.repeat = value;
-
-		}
-	},
-	format: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderTarget: .format is now .texture.format.' );
-			return this.texture.format;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.WebGLRenderTarget: .format is now .texture.format.' );
-			this.texture.format = value;
-
-		}
-	},
-	type: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderTarget: .type is now .texture.type.' );
-			return this.texture.type;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.WebGLRenderTarget: .type is now .texture.type.' );
-			this.texture.type = value;
-
-		}
-	},
-	generateMipmaps: {
-		get: function () {
-
-			console.warn( 'THREE.WebGLRenderTarget: .generateMipmaps is now .texture.generateMipmaps.' );
-			return this.texture.generateMipmaps;
-
-		},
-		set: function ( value ) {
-
-			console.warn( 'THREE.WebGLRenderTarget: .generateMipmaps is now .texture.generateMipmaps.' );
-			this.texture.generateMipmaps = value;
-
-		}
-	}
-
-} );
-
-//
-
-Object.defineProperties( WebVRManager.prototype, {
-
-	standing: {
-		set: function ( /* value */ ) {
-
-			console.warn( 'THREE.WebVRManager: .standing has been removed.' );
-
-		}
-	},
-	userHeight: {
-		set: function ( /* value */ ) {
-
-			console.warn( 'THREE.WebVRManager: .userHeight has been removed.' );
-
-		}
-	}
-
-} );
-
-//
-
-Audio.prototype.load = function ( file ) {
-
-	console.warn( 'THREE.Audio: .load has been deprecated. Use THREE.AudioLoader instead.' );
-	var scope = this;
-	var audioLoader = new AudioLoader();
-	audioLoader.load( file, function ( buffer ) {
-
-		scope.setBuffer( buffer );
-
-	} );
-	return this;
-
-};
-
-AudioAnalyser.prototype.getData = function () {
-
-	console.warn( 'THREE.AudioAnalyser: .getData() is now .getFrequencyData().' );
-	return this.getFrequencyData();
-
-};
-
-//
-
-CubeCamera.prototype.updateCubeMap = function ( renderer, scene ) {
-
-	console.warn( 'THREE.CubeCamera: .updateCubeMap() is now .update().' );
-	return this.update( renderer, scene );
-
-};
-
-//
-
-export var GeometryUtils = {
-
-	merge: function ( geometry1, geometry2, materialIndexOffset ) {
-
-		console.warn( 'THREE.GeometryUtils: .merge() has been moved to Geometry. Use geometry.merge( geometry2, matrix, materialIndexOffset ) instead.' );
-		var matrix;
-
-		if ( geometry2.isMesh ) {
-
-			geometry2.matrixAutoUpdate && geometry2.updateMatrix();
-
-			matrix = geometry2.matrix;
-			geometry2 = geometry2.geometry;
-
-		}
-
-		geometry1.merge( geometry2, matrix, materialIndexOffset );
-
-	},
-
-	center: function ( geometry ) {
-
-		console.warn( 'THREE.GeometryUtils: .center() has been moved to Geometry. Use geometry.center() instead.' );
-		return geometry.center();
-
-	}
-
-};
-
-ImageUtils.crossOrigin = undefined;
-
-ImageUtils.loadTexture = function ( url, mapping, onLoad, onError ) {
-
-	console.warn( 'THREE.ImageUtils.loadTexture has been deprecated. Use THREE.TextureLoader() instead.' );
-
-	var loader = new TextureLoader();
-	loader.setCrossOrigin( this.crossOrigin );
-
-	var texture = loader.load( url, onLoad, undefined, onError );
-
-	if ( mapping ) texture.mapping = mapping;
-
-	return texture;
-
-};
-
-ImageUtils.loadTextureCube = function ( urls, mapping, onLoad, onError ) {
-
-	console.warn( 'THREE.ImageUtils.loadTextureCube has been deprecated. Use THREE.CubeTextureLoader() instead.' );
-
-	var loader = new CubeTextureLoader();
-	loader.setCrossOrigin( this.crossOrigin );
-
-	var texture = loader.load( urls, onLoad, undefined, onError );
-
-	if ( mapping ) texture.mapping = mapping;
-
-	return texture;
-
-};
-
-ImageUtils.loadCompressedTexture = function () {
-
-	console.error( 'THREE.ImageUtils.loadCompressedTexture has been removed. Use THREE.DDSLoader instead.' );
-
-};
-
-ImageUtils.loadCompressedTextureCube = function () {
-
-	console.error( 'THREE.ImageUtils.loadCompressedTextureCube has been removed. Use THREE.DDSLoader instead.' );
-
-};
-
-//
-
-export function Projector() {
-
-	console.error( 'THREE.Projector has been moved to /examples/js/renderers/Projector.js.' );
-
-	this.projectVector = function ( vector, camera ) {
-
-		console.warn( 'THREE.Projector: .projectVector() is now vector.project().' );
-		vector.project( camera );
-
-	};
-
-	this.unprojectVector = function ( vector, camera ) {
-
-		console.warn( 'THREE.Projector: .unprojectVector() is now vector.unproject().' );
-		vector.unproject( camera );
-
-	};
-
-	this.pickingRay = function () {
-
-		console.error( 'THREE.Projector: .pickingRay() is now raycaster.setFromCamera().' );
-
-	};
-
-}
-
-//
-
-export function CanvasRenderer() {
-
-	console.error( 'THREE.CanvasRenderer has been removed' );
-
-}
-
-//
-
-export function JSONLoader() {
-
-	console.error( 'THREE.JSONLoader has been removed.' );
-
-}
-
-//
-
-export var SceneUtils = {
-
-	createMultiMaterialObject: function ( /* geometry, materials */ ) {
-
-		console.error( 'THREE.SceneUtils has been moved to /examples/js/utils/SceneUtils.js' );
-
-	},
-
-	detach: function ( /* child, parent, scene */ ) {
-
-		console.error( 'THREE.SceneUtils has been moved to /examples/js/utils/SceneUtils.js' );
-
-	},
-
-	attach: function ( /* child, scene, parent */ ) {
-
-		console.error( 'THREE.SceneUtils has been moved to /examples/js/utils/SceneUtils.js' );
-
-	}
-
-};
-
-//
-
-export function LensFlare() {
-
-	console.error( 'THREE.LensFlare has been moved to /examples/js/objects/Lensflare.js' );
-
-}
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/core/BufferAttribute.js b/spaces/banana-projects/web3d/node_modules/three/src/core/BufferAttribute.js
deleted file mode 100644
index ec5b7544095e690a12d96afded39045d2d0c96ae..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/core/BufferAttribute.js
+++ /dev/null
@@ -1,430 +0,0 @@
-import { Vector4 } from '../math/Vector4.js';
-import { Vector3 } from '../math/Vector3.js';
-import { Vector2 } from '../math/Vector2.js';
-import { Color } from '../math/Color.js';
-
-/**
- * @author mrdoob / http://mrdoob.com/
- */
-
-function BufferAttribute( array, itemSize, normalized ) {
-
-	if ( Array.isArray( array ) ) {
-
-		throw new TypeError( 'THREE.BufferAttribute: array should be a Typed Array.' );
-
-	}
-
-	this.name = '';
-
-	this.array = array;
-	this.itemSize = itemSize;
-	this.count = array !== undefined ? array.length / itemSize : 0;
-	this.normalized = normalized === true;
-
-	this.dynamic = false;
-	this.updateRange = { offset: 0, count: - 1 };
-
-	this.version = 0;
-
-}
-
-Object.defineProperty( BufferAttribute.prototype, 'needsUpdate', {
-
-	set: function ( value ) {
-
-		if ( value === true ) this.version ++;
-
-	}
-
-} );
-
-Object.assign( BufferAttribute.prototype, {
-
-	isBufferAttribute: true,
-
-	onUploadCallback: function () {},
-
-	setArray: function ( array ) {
-
-		if ( Array.isArray( array ) ) {
-
-			throw new TypeError( 'THREE.BufferAttribute: array should be a Typed Array.' );
-
-		}
-
-		this.count = array !== undefined ? array.length / this.itemSize : 0;
-		this.array = array;
-
-		return this;
-
-	},
-
-	setDynamic: function ( value ) {
-
-		this.dynamic = value;
-
-		return this;
-
-	},
-
-	copy: function ( source ) {
-
-		this.name = source.name;
-		this.array = new source.array.constructor( source.array );
-		this.itemSize = source.itemSize;
-		this.count = source.count;
-		this.normalized = source.normalized;
-
-		this.dynamic = source.dynamic;
-
-		return this;
-
-	},
-
-	copyAt: function ( index1, attribute, index2 ) {
-
-		index1 *= this.itemSize;
-		index2 *= attribute.itemSize;
-
-		for ( var i = 0, l = this.itemSize; i < l; i ++ ) {
-
-			this.array[ index1 + i ] = attribute.array[ index2 + i ];
-
-		}
-
-		return this;
-
-	},
-
-	copyArray: function ( array ) {
-
-		this.array.set( array );
-
-		return this;
-
-	},
-
-	copyColorsArray: function ( colors ) {
-
-		var array = this.array, offset = 0;
-
-		for ( var i = 0, l = colors.length; i < l; i ++ ) {
-
-			var color = colors[ i ];
-
-			if ( color === undefined ) {
-
-				console.warn( 'THREE.BufferAttribute.copyColorsArray(): color is undefined', i );
-				color = new Color();
-
-			}
-
-			array[ offset ++ ] = color.r;
-			array[ offset ++ ] = color.g;
-			array[ offset ++ ] = color.b;
-
-		}
-
-		return this;
-
-	},
-
-	copyVector2sArray: function ( vectors ) {
-
-		var array = this.array, offset = 0;
-
-		for ( var i = 0, l = vectors.length; i < l; i ++ ) {
-
-			var vector = vectors[ i ];
-
-			if ( vector === undefined ) {
-
-				console.warn( 'THREE.BufferAttribute.copyVector2sArray(): vector is undefined', i );
-				vector = new Vector2();
-
-			}
-
-			array[ offset ++ ] = vector.x;
-			array[ offset ++ ] = vector.y;
-
-		}
-
-		return this;
-
-	},
-
-	copyVector3sArray: function ( vectors ) {
-
-		var array = this.array, offset = 0;
-
-		for ( var i = 0, l = vectors.length; i < l; i ++ ) {
-
-			var vector = vectors[ i ];
-
-			if ( vector === undefined ) {
-
-				console.warn( 'THREE.BufferAttribute.copyVector3sArray(): vector is undefined', i );
-				vector = new Vector3();
-
-			}
-
-			array[ offset ++ ] = vector.x;
-			array[ offset ++ ] = vector.y;
-			array[ offset ++ ] = vector.z;
-
-		}
-
-		return this;
-
-	},
-
-	copyVector4sArray: function ( vectors ) {
-
-		var array = this.array, offset = 0;
-
-		for ( var i = 0, l = vectors.length; i < l; i ++ ) {
-
-			var vector = vectors[ i ];
-
-			if ( vector === undefined ) {
-
-				console.warn( 'THREE.BufferAttribute.copyVector4sArray(): vector is undefined', i );
-				vector = new Vector4();
-
-			}
-
-			array[ offset ++ ] = vector.x;
-			array[ offset ++ ] = vector.y;
-			array[ offset ++ ] = vector.z;
-			array[ offset ++ ] = vector.w;
-
-		}
-
-		return this;
-
-	},
-
-	set: function ( value, offset ) {
-
-		if ( offset === undefined ) offset = 0;
-
-		this.array.set( value, offset );
-
-		return this;
-
-	},
-
-	getX: function ( index ) {
-
-		return this.array[ index * this.itemSize ];
-
-	},
-
-	setX: function ( index, x ) {
-
-		this.array[ index * this.itemSize ] = x;
-
-		return this;
-
-	},
-
-	getY: function ( index ) {
-
-		return this.array[ index * this.itemSize + 1 ];
-
-	},
-
-	setY: function ( index, y ) {
-
-		this.array[ index * this.itemSize + 1 ] = y;
-
-		return this;
-
-	},
-
-	getZ: function ( index ) {
-
-		return this.array[ index * this.itemSize + 2 ];
-
-	},
-
-	setZ: function ( index, z ) {
-
-		this.array[ index * this.itemSize + 2 ] = z;
-
-		return this;
-
-	},
-
-	getW: function ( index ) {
-
-		return this.array[ index * this.itemSize + 3 ];
-
-	},
-
-	setW: function ( index, w ) {
-
-		this.array[ index * this.itemSize + 3 ] = w;
-
-		return this;
-
-	},
-
-	setXY: function ( index, x, y ) {
-
-		index *= this.itemSize;
-
-		this.array[ index + 0 ] = x;
-		this.array[ index + 1 ] = y;
-
-		return this;
-
-	},
-
-	setXYZ: function ( index, x, y, z ) {
-
-		index *= this.itemSize;
-
-		this.array[ index + 0 ] = x;
-		this.array[ index + 1 ] = y;
-		this.array[ index + 2 ] = z;
-
-		return this;
-
-	},
-
-	setXYZW: function ( index, x, y, z, w ) {
-
-		index *= this.itemSize;
-
-		this.array[ index + 0 ] = x;
-		this.array[ index + 1 ] = y;
-		this.array[ index + 2 ] = z;
-		this.array[ index + 3 ] = w;
-
-		return this;
-
-	},
-
-	onUpload: function ( callback ) {
-
-		this.onUploadCallback = callback;
-
-		return this;
-
-	},
-
-	clone: function () {
-
-		return new this.constructor( this.array, this.itemSize ).copy( this );
-
-	}
-
-} );
-
-//
-
-function Int8BufferAttribute( array, itemSize, normalized ) {
-
-	BufferAttribute.call( this, new Int8Array( array ), itemSize, normalized );
-
-}
-
-Int8BufferAttribute.prototype = Object.create( BufferAttribute.prototype );
-Int8BufferAttribute.prototype.constructor = Int8BufferAttribute;
-
-
-function Uint8BufferAttribute( array, itemSize, normalized ) {
-
-	BufferAttribute.call( this, new Uint8Array( array ), itemSize, normalized );
-
-}
-
-Uint8BufferAttribute.prototype = Object.create( BufferAttribute.prototype );
-Uint8BufferAttribute.prototype.constructor = Uint8BufferAttribute;
-
-
-function Uint8ClampedBufferAttribute( array, itemSize, normalized ) {
-
-	BufferAttribute.call( this, new Uint8ClampedArray( array ), itemSize, normalized );
-
-}
-
-Uint8ClampedBufferAttribute.prototype = Object.create( BufferAttribute.prototype );
-Uint8ClampedBufferAttribute.prototype.constructor = Uint8ClampedBufferAttribute;
-
-
-function Int16BufferAttribute( array, itemSize, normalized ) {
-
-	BufferAttribute.call( this, new Int16Array( array ), itemSize, normalized );
-
-}
-
-Int16BufferAttribute.prototype = Object.create( BufferAttribute.prototype );
-Int16BufferAttribute.prototype.constructor = Int16BufferAttribute;
-
-
-function Uint16BufferAttribute( array, itemSize, normalized ) {
-
-	BufferAttribute.call( this, new Uint16Array( array ), itemSize, normalized );
-
-}
-
-Uint16BufferAttribute.prototype = Object.create( BufferAttribute.prototype );
-Uint16BufferAttribute.prototype.constructor = Uint16BufferAttribute;
-
-
-function Int32BufferAttribute( array, itemSize, normalized ) {
-
-	BufferAttribute.call( this, new Int32Array( array ), itemSize, normalized );
-
-}
-
-Int32BufferAttribute.prototype = Object.create( BufferAttribute.prototype );
-Int32BufferAttribute.prototype.constructor = Int32BufferAttribute;
-
-
-function Uint32BufferAttribute( array, itemSize, normalized ) {
-
-	BufferAttribute.call( this, new Uint32Array( array ), itemSize, normalized );
-
-}
-
-Uint32BufferAttribute.prototype = Object.create( BufferAttribute.prototype );
-Uint32BufferAttribute.prototype.constructor = Uint32BufferAttribute;
-
-
-function Float32BufferAttribute( array, itemSize, normalized ) {
-
-	BufferAttribute.call( this, new Float32Array( array ), itemSize, normalized );
-
-}
-
-Float32BufferAttribute.prototype = Object.create( BufferAttribute.prototype );
-Float32BufferAttribute.prototype.constructor = Float32BufferAttribute;
-
-
-function Float64BufferAttribute( array, itemSize, normalized ) {
-
-	BufferAttribute.call( this, new Float64Array( array ), itemSize, normalized );
-
-}
-
-Float64BufferAttribute.prototype = Object.create( BufferAttribute.prototype );
-Float64BufferAttribute.prototype.constructor = Float64BufferAttribute;
-
-//
-
-export {
-	Float64BufferAttribute,
-	Float32BufferAttribute,
-	Uint32BufferAttribute,
-	Int32BufferAttribute,
-	Uint16BufferAttribute,
-	Int16BufferAttribute,
-	Uint8ClampedBufferAttribute,
-	Uint8BufferAttribute,
-	Int8BufferAttribute,
-	BufferAttribute
-};
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/helpers/PolarGridHelper.js b/spaces/banana-projects/web3d/node_modules/three/src/helpers/PolarGridHelper.js
deleted file mode 100644
index bfbb0f0f197e9a034d56b5c859870b1cfce2eec1..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/helpers/PolarGridHelper.js
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * @author mrdoob / http://mrdoob.com/
- * @author Mugen87 / http://github.com/Mugen87
- * @author Hectate / http://www.github.com/Hectate
- */
-
-import { LineSegments } from '../objects/LineSegments.js';
-import { VertexColors } from '../constants.js';
-import { LineBasicMaterial } from '../materials/LineBasicMaterial.js';
-import { Float32BufferAttribute } from '../core/BufferAttribute.js';
-import { BufferGeometry } from '../core/BufferGeometry.js';
-import { Color } from '../math/Color.js';
-
-function PolarGridHelper( radius, radials, circles, divisions, color1, color2 ) {
-
-	radius = radius || 10;
-	radials = radials || 16;
-	circles = circles || 8;
-	divisions = divisions || 64;
-	color1 = new Color( color1 !== undefined ? color1 : 0x444444 );
-	color2 = new Color( color2 !== undefined ? color2 : 0x888888 );
-
-	var vertices = [];
-	var colors = [];
-
-	var x, z;
-	var v, i, j, r, color;
-
-	// create the radials
-
-	for ( i = 0; i <= radials; i ++ ) {
-
-		v = ( i / radials ) * ( Math.PI * 2 );
-
-		x = Math.sin( v ) * radius;
-		z = Math.cos( v ) * radius;
-
-		vertices.push( 0, 0, 0 );
-		vertices.push( x, 0, z );
-
-		color = ( i & 1 ) ? color1 : color2;
-
-		colors.push( color.r, color.g, color.b );
-		colors.push( color.r, color.g, color.b );
-
-	}
-
-	// create the circles
-
-	for ( i = 0; i <= circles; i ++ ) {
-
-		color = ( i & 1 ) ? color1 : color2;
-
-		r = radius - ( radius / circles * i );
-
-		for ( j = 0; j < divisions; j ++ ) {
-
-			// first vertex
-
-			v = ( j / divisions ) * ( Math.PI * 2 );
-
-			x = Math.sin( v ) * r;
-			z = Math.cos( v ) * r;
-
-			vertices.push( x, 0, z );
-			colors.push( color.r, color.g, color.b );
-
-			// second vertex
-
-			v = ( ( j + 1 ) / divisions ) * ( Math.PI * 2 );
-
-			x = Math.sin( v ) * r;
-			z = Math.cos( v ) * r;
-
-			vertices.push( x, 0, z );
-			colors.push( color.r, color.g, color.b );
-
-		}
-
-	}
-
-	var geometry = new BufferGeometry();
-	geometry.addAttribute( 'position', new Float32BufferAttribute( vertices, 3 ) );
-	geometry.addAttribute( 'color', new Float32BufferAttribute( colors, 3 ) );
-
-	var material = new LineBasicMaterial( { vertexColors: VertexColors } );
-
-	LineSegments.call( this, geometry, material );
-
-}
-
-PolarGridHelper.prototype = Object.create( LineSegments.prototype );
-PolarGridHelper.prototype.constructor = PolarGridHelper;
-
-export { PolarGridHelper };
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/materials/MeshDepthMaterial.js b/spaces/banana-projects/web3d/node_modules/three/src/materials/MeshDepthMaterial.js
deleted file mode 100644
index 2a143dfde5bcda3775a43e642ed0d7fee63df8e9..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/materials/MeshDepthMaterial.js
+++ /dev/null
@@ -1,86 +0,0 @@
-import { Material } from './Material.js';
-import { BasicDepthPacking } from '../constants.js';
-
-/**
- * @author mrdoob / http://mrdoob.com/
- * @author alteredq / http://alteredqualia.com/
- * @author bhouston / https://clara.io
- * @author WestLangley / http://github.com/WestLangley
- *
- * parameters = {
- *
- *  opacity: <float>,
- *
- *  map: new THREE.Texture( <Image> ),
- *
- *  alphaMap: new THREE.Texture( <Image> ),
- *
- *  displacementMap: new THREE.Texture( <Image> ),
- *  displacementScale: <float>,
- *  displacementBias: <float>,
- *
- *  wireframe: <boolean>,
- *  wireframeLinewidth: <float>
- * }
- */
-
-function MeshDepthMaterial( parameters ) {
-
-	Material.call( this );
-
-	this.type = 'MeshDepthMaterial';
-
-	this.depthPacking = BasicDepthPacking;
-
-	this.skinning = false;
-	this.morphTargets = false;
-
-	this.map = null;
-
-	this.alphaMap = null;
-
-	this.displacementMap = null;
-	this.displacementScale = 1;
-	this.displacementBias = 0;
-
-	this.wireframe = false;
-	this.wireframeLinewidth = 1;
-
-	this.fog = false;
-	this.lights = false;
-
-	this.setValues( parameters );
-
-}
-
-MeshDepthMaterial.prototype = Object.create( Material.prototype );
-MeshDepthMaterial.prototype.constructor = MeshDepthMaterial;
-
-MeshDepthMaterial.prototype.isMeshDepthMaterial = true;
-
-MeshDepthMaterial.prototype.copy = function ( source ) {
-
-	Material.prototype.copy.call( this, source );
-
-	this.depthPacking = source.depthPacking;
-
-	this.skinning = source.skinning;
-	this.morphTargets = source.morphTargets;
-
-	this.map = source.map;
-
-	this.alphaMap = source.alphaMap;
-
-	this.displacementMap = source.displacementMap;
-	this.displacementScale = source.displacementScale;
-	this.displacementBias = source.displacementBias;
-
-	this.wireframe = source.wireframe;
-	this.wireframeLinewidth = source.wireframeLinewidth;
-
-	return this;
-
-};
-
-
-export { MeshDepthMaterial };
diff --git a/spaces/bigscience-data/bigscience-tokenizer/README.md b/spaces/bigscience-data/bigscience-tokenizer/README.md
deleted file mode 100644
index c0ab13c48f9c6c158d4647bc52d1441168bb4252..0000000000000000000000000000000000000000
--- a/spaces/bigscience-data/bigscience-tokenizer/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Bigscience Tokenizer
-emoji: 🔣
-colorFrom: pink
-colorTo: pink
-sdk: streamlit
-sdk_version: 1.2.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/bingbing520/ChatGPT/modules/overwrites.py b/spaces/bingbing520/ChatGPT/modules/overwrites.py
deleted file mode 100644
index 035a4a52722d66ee28af1c05231ad1cea3339ef5..0000000000000000000000000000000000000000
--- a/spaces/bingbing520/ChatGPT/modules/overwrites.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from __future__ import annotations
-import logging
-
-from llama_index import Prompt
-from typing import List, Tuple
-import mdtex2html
-from gradio_client import utils as client_utils
-
-from modules.presets import *
-from modules.llama_func import *
-
-
-def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]:
-    logging.debug("Compacting text chunks...🚀🚀🚀")
-    combined_str = [c.strip() for c in text_chunks if c.strip()]
-    combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)]
-    combined_str = "\n\n".join(combined_str)
-    # resplit based on self.max_chunk_overlap
-    text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1)
-    return text_splitter.split_text(combined_str)
-
-
-def postprocess(
-        self,
-        y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple],
-    ) -> List[List[str | Dict | None]]:
-        """
-        Parameters:
-            y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.  It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
-        Returns:
-            List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed.
-        """
-        if y is None:
-            return []
-        processed_messages = []
-        for message_pair in y:
-            assert isinstance(
-                message_pair, (tuple, list)
-            ), f"Expected a list of lists or list of tuples. Received: {message_pair}"
-            assert (
-                len(message_pair) == 2
-            ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
-
-            processed_messages.append(
-                [
-                    self._postprocess_chat_messages(message_pair[0], "user"),
-                    self._postprocess_chat_messages(message_pair[1], "bot"),
-                ]
-            )
-        return processed_messages
-
-def postprocess_chat_messages(
-        self, chat_message: str | Tuple | List | None, message_type: str
-    ) -> str | Dict | None:
-        if chat_message is None:
-            return None
-        elif isinstance(chat_message, (tuple, list)):
-            filepath = chat_message[0]
-            mime_type = client_utils.get_mimetype(filepath)
-            filepath = self.make_temp_copy_if_needed(filepath)
-            return {
-                "name": filepath,
-                "mime_type": mime_type,
-                "alt_text": chat_message[1] if len(chat_message) > 1 else None,
-                "data": None,  # These last two fields are filled in by the frontend
-                "is_file": True,
-            }
-        elif isinstance(chat_message, str):
-            if message_type == "bot":
-                if not detect_converted_mark(chat_message):
-                    chat_message = convert_mdtext(chat_message)
-            elif message_type == "user":
-                if not detect_converted_mark(chat_message):
-                    chat_message = convert_asis(chat_message)
-            return chat_message
-        else:
-            raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
-
-with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2:
-    customJS = f.read()
-    kelpyCodos = f2.read()
-
-def reload_javascript():
-    print("Reloading javascript...")
-    js = f'<script>{customJS}</script><script>{kelpyCodos}</script>'
-    def template_response(*args, **kwargs):
-        res = GradioTemplateResponseOriginal(*args, **kwargs)
-        res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
-        res.init_headers()
-        return res
-
-    gr.routes.templates.TemplateResponse = template_response
-
-GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/Download The Fault in Our Stars Movie from Fzmovies Hollywood A Moving Adaptation of the Popular Book.md b/spaces/bioriAsaeru/text-to-voice/Download The Fault in Our Stars Movie from Fzmovies Hollywood A Moving Adaptation of the Popular Book.md
deleted file mode 100644
index a761737221e750e65f6e582c96cf3578acb81fbf..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Download The Fault in Our Stars Movie from Fzmovies Hollywood A Moving Adaptation of the Popular Book.md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<br />
-<p>XXx: The Return Of Xander Cage (English) Tamil Dubbed Watch Online ???? ???? LINK > xXx: The Return of Xander Cage (2017) hd. Watch xXx: Return of Xander Cage 2017 tamil dubbed movie in HD quality online. Watch xXx: The Return of Xander Cage (2017) hd. Watch xXx: The Return of Xander Cage (2017) in hd quality.xXx Return of Xander Cage Movie Watch Online Dailymotion English Subtitles from United States, Date Added: 2017-01-08.xXx: Return of Xander Cage Telugu 2017 Nandini Movie Watch Online. Action. Watch xXx: The Return of Xander Cage 2017 tamil dubbed movie in HD quality online. Watch xXx: The Return of Xander Cage (2017).Watch xXx Return of Xander Cage (2017) hd size | Download | HD Video | Best 720p. 2017 Action Movie. Tigress Return of Xander Cage 2017 Hindi Tamil Download. xXx: The Return of Xander Cage (2017) tamil dubbed movie in HD quality online. Watch xXx: The Return of Xander Cage (2017) tamil dubbed movie in HD quality online. Watch xXx: The Return of Xander Cage (2017) tamil dubbed movie in HD quality online. xXx: Return of Xander Cage (2017) Hindi Dubbed Torrent Info: Download Torrent For.Prince (Tamil Dubbed) - with Eng Subtitle Vivek Oberoi Nandana Sen Aruna. New Martial Arts Kung Fu Movies English Subtitles - Best Chinese Movies 2018 Full HD. xXx 3: Return of Xander Cage (2017) Vin Diesel Nipple Clip Action Movie HD. Movie. Independent Online Cinema.Prince (Tamil Dubbed) - with Eng Subtitle Vivek Oberoi Nandana Sen Aruna. New Martial Arts Kung Fu Movies English Subtitles - Best Chinese Movies 2018 Full HD. Watch xXx 3: Return of Xander Cage (2017) Vin Diesel Nipple Clip Action Movie HD. Movie. Independent Online Cinema.Watch xXx: The Return of Xander Cage (2017) full movie in hd quality free download from search, Download Full Movie, Download 720p movie, Watch. xXx: Return of Xander Cage (2017) tamil dubbed movie in HD quality online. Watch xXx: The Return of Xander Cage (2017) tamil dubbed movie in HD quality online.Full movie is streaming online in HD ee730c9e81 -mixcraft-51149-portable11 -aplikasi-kamera-tembus-pandang-pakaian-jar -fu-in-english -fever-download</p>
-<p>Official Samsung Galaxy S5 Prime LTE-A SM-G906K Stock RomDownload File ===== Galaxy S5 Prime LTE-A SM-G906K. ROM.. SM-J200GU Original Firmware Download.Carrier:. Samsung SM-G906K & SM-G906L Firmware Download (Full).. Snapdragon 820, Poco F2, Honor 8C. Samsung Galaxy S5 Prime LTE A SM-G906K Firmware For LG U+ /.Flashing Firmware for Samsung Galaxy S5 Prime LTE A SM-G906K G906L / G906S. Samsung G906s Galaxy Core Prime Hard LineageOS.. Flashing Official Samsung Firmware ROM Samsung Galaxy S5 LTE A SM-G906K [CM14.1], [CWM] [G920M]. Official Samsung Galaxy S5 Firmware ROM, Download Original Firmware and Root for Galaxy S5 Prime SM G906L and G906K.Samsung SM-G906L Firmware Download for SAMSUNG GALAXY NOTE 4 LTE SM-N910S.. Samsung SM-G906S First Impressions | Turbo, Charge, and Battery Life.. The Samsung Galaxy S5 is a bit newer, packing in features like an always-on display, better. Original Firmware Download.1 for LG G3 Download APK For LG G3 Download APK For LG G3L. LG G5 bootloader 4.3.27 unlock by LG google Nexus 5 To 5.0.1. Get TWRP Recovery For LG G5 For HTC One M8S Download.SM-G906L Firmware Download for SAMSUNG GALAXY S5 Prime SM-G906L. Download firmware for Galaxy G5 for various areas, such as Wi-Fi, mobile data, and signal strength.. Performance Testing for LG G5 Octa-Core and Exynos 7420/7433. Samsung SM-G906L Firmware For All SM.1 for LG G3 Download APK For LG G3 Download APK For LG G3L. LG G5 bootloader 4.3.27 unlock by LG google Nexus 5 To 5.0.1. Get TWRP Recovery For LG G5 For HTC One M8S Download. SM-N9006 Watches SM-N9006 (SM-N9006I).Download Download Firmware SM-J200GU ee730c9e81 -and-achieve-by-remez-sasson-pdf-download -san-andreas-cool-mod-indir-gezginler -thelastemperor1987bluray720ph264golkes -alegre/escape-from-new-york-tamil-dubbed-movie-free-download -reader-writer-software-47</p>
-<h2>the fault in our stars movie download fzmovies hollywood</h2><br /><p><b><b>Download Zip</b>  <a href="https://urloso.com/2uyRFp">https://urloso.com/2uyRFp</a></b></p><br /><br /> aaccfb2cb3<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/Expired Drivers License Army Counseling Form Tips and Best Practices.md b/spaces/bioriAsaeru/text-to-voice/Expired Drivers License Army Counseling Form Tips and Best Practices.md
deleted file mode 100644
index c7a342bd301588fd44937662beefb47be8cd9228..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Expired Drivers License Army Counseling Form Tips and Best Practices.md	
+++ /dev/null
@@ -1,23 +0,0 @@
-<br />
-<p>5-13. Sometimes counseling is tied to specific instances of superior or substandard duty performance. For example, you tell your soldier whether or not the performance met the standard and what the soldier did right or wrong. The key to successful counseling for specific performance is to conduct the counseling session as close to the time of the event as possible.</p>
-<p>Tell the soldier the purpose of the counseling, what was expected and how they failed to meet the standard.Address the specific unacceptable behavior or action, not the person's character.Tell the soldier the effect of the performance on the rest of the unit.Actively listen to the soldier's response.Remain unemotional.Teach the soldier how to meet the standard.Be prepared to do some personal counseling since the lack of performance may be related to or the result of a personal problem.Explain to the soldier what will be done to improve performance (plan of action). Identify your responsibilities in implementing the plan of action.Continue to assess and follow-up on the soldier's progress. Adjust the plan of action as necessary.<br></p>
-<h2>Expired Drivers License Army Counseling Form</h2><br /><p><b><b>Download Zip</b> ---> <a href="https://urloso.com/2uyPBi">https://urloso.com/2uyPBi</a></b></p><br /><br />
-<p>5-17. Referral counseling helps soldiers work through a personal situation and may follow crisis counseling. Referral counseling also acts as preventative counseling before the situation becomes a problem. Usually, the leader assists the soldier in identifying the problem.5-18. Outside agencies can help leaders resolve problems. Although it is generally in an individual's best interest to seek help first from his first line leader, leaders must always respect an individual's right to contact these agencies on their own. Leaders can refer the soldier to the appropriate resource, such as Army Community Services, a Chaplain, or a substance abuse counselor. Additional information on support activities can be found in Appendix B, Army Programs or in FM 6-22 (22-100), Appendix C.Helping soldiers cope with personal problems means more than referring the soldier to another person- the chaplain, a doctor, or counselor. Until the problem is resolved, you have a soldier with a problem in your unit, so it's your problem. Let your soldiers know what you're doing to help them solve their problems.</p>
-<p>5-19. Commanders or their designated representatives must conduct promotion counseling for all specialists, corporals and sergeants who are eligible for advancement without waiver, but are not recommended for promotion to the next higher grade. Army regulations require that soldiers within this category receive initial (event-oriented) counseling when they attain full eligibility and then periodic (performance and personal growth) counseling at least quarterly.<br></p>
-<p>5-20. Adverse separation counseling may involve informing the soldier of the administrative actions available to the commander in the event substandard performance continues and of the consequences associated with those administrative actions. (See AR 635-200, Chapter 1, paragraph 1-16 and Chapter 17.) 5-21. Developmental counseling may not apply when a soldier has engaged in more serious acts of misconduct. In those situations, the leader should refer the matter to the commander and the servicing staff judge advocate's office. When the leader's rehabilitative efforts fail, counseling with a view towards separation fills an administrative prerequisite to many administrative discharges and serves as a final warning to the soldier to improve performance or face discharge. In many cases, it may be beneficial to involve the chain of command as soon as you determine that adverse separation counseling might be required. The first sergeant or commander should inform the soldier of the notification requirements outlined in AR 635-200. To contribute examples, enter them below and click <i>Submit</i>. Thanks!</p>
-<p><strong>Lost ID card:</strong> Present a police report from the Tobyhanna Army Depot Security Office (Building 20) and two valid forms of ID (see Number 2 above). For military CAC you will need DD form 4856, counseling statement from unit. Retiree military and dependent ID's don't need lost report.</p>
-<p><strong>Spouse Enrollment:</strong> Original or certified copy of marriage certificate from the state, birth certificate, social security card, Medicare card and driver's license. If the spouse is changing his/her last name, the primary and secondary forms of identification must have the new last name.</p>
-<p>So while there is some flexibility with the law concerning an expired drivers license while serving as an active duty service member, it is best to obey the law of the home of record and the laws of the state in which the service member is serving.</p>
-<p>A "Licensed Clinical Mental Health Counselor Associate" (LCMHCA) is a person engaged in the supervised practice of counseling who holds a license as a licensed clinical mental health counselor associate issued under the provisions of the LCMHC Act.</p>
-<p></p>
-<p>Supervision contract must be completed using the Supervision Contract form provided by the Board. <strong>LCMHCAs must receive approval of supervision contract prior to rendering counseling services.</strong> Notifications of approvals (or denials) are sent to the Supervisor. However, LCMHCAs can view their approved supervisors by logging in to their online update. Contracts are usually reviewed within 2 weeks of receipt.</p>
-<p>The licensee shall inform the Board of any change in his or her mailing address within 60 days after any change. Updated address information shall be submitted online via the licensee portal or by mail using the change of address form.</p>
-<p>The licensee shall inform the Board of any change in his or her name within 60 days after any change. A name change form shall be submitted and must include any required legal documentation, such as a marriage certificate, divorce decree, or court order.</p>
-<p>A current copy of this statement shall be provided to each client prior to the performance of professional counseling services. The counselor shall retain a file copy of the Professional Disclosure Statement signed by each client. An updated Professional Disclosure Statement (PDS) shall be submitted at the time of renewal to the Board's office.</p>
-<p>In response to questions about ordering additional or replacement certificates, you may do so by sending a letter to the Board office stating your reasons for the request. Each additional certificate is $15. Your written request should include a check for the number of certificates needed, made payable to NCBLCMHC, and the Board payment form. The certificate remains the property of the North Carolina Board of Licensed Clinical Mental Health Counselors, which has the authority to suspend or revoke licenses for legal and/or ethical violations.</p>
-<p>Licensure can be verified online for free using the public information service provided by the Board. Citizens may utilize the online verification service to review the status of licensees in the state of North Carolina. If you need the Board to provide written verification of your license you may request license verifications by using the payment form or the online portal. License verifications are $5 each and take up to 2 weeks to process.</p>
-<p>Montana drivers can choose from a number of license plates, ranging from the standard plate to designs that reflect their military service or show their support for a Montana college or community organization.</p>
-<p>Yes. The chief election official of each State must make mail voter registration applications available for distribution through governmental and private entities, with particular emphasis on making them available for organized voter registration programs. Most states satisfy these requirements by, among other things, making applications available at local registrar offices, driver license offices, public assistance offices and disability-service offices, to groups doing voter registration drives, and through the internet on the website of the chief election official. These forms are also available on the website of the U.S. Election Assistance Commission.</p>
-<p>How do I get an occupational driver's license? <br />You will need to apply to a court for an order granting you an ODL. To apply, follow the steps and use the forms available here.If the judge grants the order, you take it to DPS, which will issue the ODL.</p> aaccfb2cb3<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/High Pr Blog Commenting Sites List Free 2020 [Do-Follow] A Comprehensive Resource for Bloggers and Marketers.md b/spaces/bioriAsaeru/text-to-voice/High Pr Blog Commenting Sites List Free 2020 [Do-Follow] A Comprehensive Resource for Bloggers and Marketers.md
deleted file mode 100644
index b684523c836e5e0438c98c06891d60f174292de9..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/High Pr Blog Commenting Sites List Free 2020 [Do-Follow] A Comprehensive Resource for Bloggers and Marketers.md	
+++ /dev/null
@@ -1,39 +0,0 @@
-
-<p>A blog that allows blog commenting can face its repercussions, such as spamming. However, it gives a chance of growth to other websites. This builds opportunities for websites operating in the same niche.</p>
-<p>According to backlinks, a do-follow link allows a search engine to track back to your site. This type of off-site backlinks can be created by different methods, one of which is blog commenting. And that is why this term is relevant here.</p>
-<h2>High Pr Blog Commenting Sites List Free 2020 [Do-Follow]</h2><br /><p><b><b>DOWNLOAD</b> --->>> <a href="https://urloso.com/2uyP13">https://urloso.com/2uyP13</a></b></p><br /><br />
-<p>Should you need more information or help with driving quality backlinks to your site, feel free to write to us. Our SEO experts would love to talk to you and suggest something that can help you with SEO blog commenting and other aspects of SEO of your site. Offering our SEO (including stellar blog commenting services that get you the required traction) to our clients in Australia, Canada, the U.S.A, U.K, and the rest of the world alike, CANZ Marketing is making a difference in how you earn your revenue.</p>
-<p>Having a blog section website is very beneficial. Publishing relevant blogs frequently will give you a good ranking and traffic. Blog commenting also plays a vital role in getting high traffic.<br />The quality content of the blog is very important to get good comments.</p>
-<p>There are several blog commenting sites available in the internet world, but always choose high page rank do follow blog commenting sites creating do follow backlinks. The do follow backlinks pass the link juice to your own blog or website.</p>
-<p>It helps to create more web traffic to your blog or website. There are several benefits of using blog commenting sites to grow your blog or website. See below, the benefits of do follow blog commenting sites:</p>
-<p>These are the most vital benefits of instant approval blog commenting sites list 2019. Many bloggers consider this blog commenting method is the best method to get do-follow backlinks to your blog or site.</p>
-<p></p>
-<p>In this way, you can find a renowned identity of your business or brand and make popular your brand everywhere in the world. These sites help to get conversions by commenting to your business relevant posts, which means if you are doing shoe business, so you should comment all shoe business blog posts, you can also post your business details in the comment.</p>
-<p>There are so many folks not aware with the actual process of blog commenting, but this method is not as hard as you think about it. To solve your problem, here we bring the exact ways to use these sites for commenting on blogs.</p>
-<p>I have added the list of the tops technology blog for blog commenting , these blogs are top rated blog in technology. I think these blogs will work for you to get the quality link, so submit your comment on top technology blog earn the natural back link for your technology site.</p>
-<p>Nothing is better than to blog commenting sites when you are looking for the natural link for your food blog. Following are some dofollow blog commenting sites that can help you to earn the quality link for improve the website ranking.</p>
-<p>Improving the ranking of the real estate business is not easy thing in SERP. But some extra effort like submitting the comment on high quality real estate blog and earn the quality backink from there. I always keep the first priority to the blog commenting off page SEO technique to improve the search engine ranking.</p>
-<p>Looking for dofollow EDU blog commenting sites then you are on right page to get the list of education blog commenting sites. These blog commenting sites help you to earn the natural and quality link for your website. EDU backlink is considered as powerful backlink for good ranking in SERP in SEO era.</p>
-<p>Check out the following fashion blog commenting sites if you have decided to use the blog commenting technique for your fashion website. Also keep the blog commenting guideline when you submit the comment on these high quality fashion blog.</p>
-<p>I have one suggestion for you; if you are going to submit the blog commenting on Dofollow blog commenting sites is that check the all out bound link of the page as many marketers adds the abuse link here. You should avoid this type blog. Below is the list of Instance Approve Dofollow blog commenting sites in all niche for which you are looking.</p>
-<p>We should also avoid to use the email id that is not connected to Gravatar. it can become the big mistake, so that your comment is not approved by Amdin. if you want to drive the traffic using the wordpress blog commenting, generate the lead and improve your subscriber list then you should have to link your email id to Gravatar. Linking the email address to Gravatar is very easy thing. You can easily setup the Gravatar if you have WordPress.com account.</p>
-<p>I think using the generic comment has become the big issue when it comes to blog commenting. Some people have list of generic comment that use these copied comment again and again on all blog to earn natural back link.</p>
-<p>This SEO off page technique will always work if you will do it with proper way. I always keep this seo technique on priority and commenting on high quality blog and get the quality back from there. You can also check by blog back links, 70% back links, I have got through blog commenting SEO technique.</p>
-<p>You can get both type link from blog commenting. But I have noticed one thing that I would like to share with you. Almost all sites webmaster offer the Nofollow link from their blog as it help to protect the website from spamming attack.</p>
-<p>Blog commenting is one of the effective methods if you are looking for natural backlink that is approved by the admin and it would be live for life time, but firstly, you need to find out the blogs that relevant to your business, then start to submit the comment on these sites that are in your list.</p>
-<p>It is clear from the above article that what you need to do and what you must avoid when commenting on anyone else's post or handling the comments on your own post. Also, you need to make sure not to make it too hard to post as your readers love your work and must be waiting for you to post the fresh content. In case you are confused about how to get started with a blog there are a huge number of websites that can easily help you out.</p>
-<p>It is the type of commenting system which is quite similar to the Disqus comment, and it is also one of the most popular commenting systems for your blog. A commenting system takes the blog commenting on another level as it offers some amazing features for your blog. If you are using this system, you can comment on the blog using other services such as Twitter and Facebook. If you do not have a social media account on these sites, you can simply add as the guest and leave a comment.</p>
-<p>It will draw people's attention, and they will comment in a better way on your blog. I never forget to highlight the amazing comments on my blog posts. I always give the rewards to those users who add the valuable and informative comments that helps my audience to get some more clarification on blog commenting techniques.</p>
-<p>Yes, Search queries are very effective for finding the niche blog commenting sites, as google always keep the website on top that are providing the useful information for users. I will also recommend to use the search queries for finding the niche relevant blogs for commenting.</p>
-<p>Thanks for sharing this list. Above links really helps us to create quality back links for our site. I need more links for blog comment. Let me know if you have list of blog commenting sites for essay writing.</p>
-<p>Thanks for sharing this valuable information. But I am searching for Beauty & Hotels related blogs list. Here I am not found any Beauty & hotels related blog commenting sites. Could you please update the Beauty & Hotels Blog Commenting sites list</p>
-<p>Blog commenting is also a simple way to build backlinks for our blogs and websites. So, in this article, we will discuss blog commenting. You will also have a list of high authority blog commenting sites by which you can create good backlinks for your website.</p>
-<p>Hey Man, Thank You for this amazing and fruitful list of do-follow blog commenting sites.<br />These sites had helped me a lot in blog commenting and the thing whihc i like the most is that you have given the sites which mostly have high PA DA which is very good for me to generate good backlinks.</p>
-<p>These sites had helped me a lot in blog commenting and the thing which I like the most is that you have given the sites which mostly have high PA DA which is very good for me to generate good backlinks.</p>
-<p>Great post<br />It is very difficult to find a blog commenting sites, you have mentioned a good collection of blog commenting sites. Thanks for sharing this post, it would be very helpful for me and the person who are finding difficulties to find blog commenting sites.</p>
-<p>Blog commenting is an excellent SEO technique. Though, finding the right blog commenting sites to leave comments is quite a challenge. Thank you for sharing this wonderful piece of information and for sharing helpful insights</p>
-<p>Blog Commenting is amazing SEO strategy. Its Is difficult to find blog commenting sites for similar niche and also commenting on high quality website is important, You will share a valuable information will DA rank which helpfull to commenting in high quality websites. Thanks for sharing.<br />Keep sharing Valuable Post .<br />Thanks</p>
-<p>Blog Commenting is an amazing SEO Off page technique to create the quality link from niche relevant sites. I have found here blog for blog commenting in all niche that really make my SEO process easy. Can you add the list of education blog commenting sites</p>
-<p>Thanks for making the great list of blog commenting sites in all niche. Can you update your post with travel niche blog commenting sites as I did best possible research to find out the travel blog, but not successes.</p> aaccfb2cb3<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/bird-watching-society-of-greater-clare/brainy/static/index.html b/spaces/bird-watching-society-of-greater-clare/brainy/static/index.html
deleted file mode 100644
index 3a51c95f6c07e4230ad846a246c4743e8a06832c..0000000000000000000000000000000000000000
--- a/spaces/bird-watching-society-of-greater-clare/brainy/static/index.html
+++ /dev/null
@@ -1,213 +0,0 @@
-<!DOCTYPE html>
-<html>
-    <head>
-    <title>Chatbot</title>
-        <meta name="viewport" content="width=device-width, initial-scale=1.0">
-    </head>
-    <body>
-        <div id="ui-panel">
-            <div id="message-box"></div>
-            <form id="message-form">
-                <input id="message-input" type="text" placeholder="Type your message here...">
-                <button type="submit"><svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-send-fill" viewBox="0 0 16 16">
-                    <path d="M15.964.686a.5.5 0 0 0-.65-.65L.767 5.855H.766l-.452.18a.5.5 0 0 0-.082.887l.41.26.001.002 4.995 3.178 3.178 4.995.002.002.26.41a.5.5 0 0 0 .886-.083l6-15Zm-1.833 1.89L6.637 10.07l-.215-.338a.5.5 0 0 0-.154-.154l-.338-.215 7.494-7.494 1.178-.471-.47 1.178Z"/>
-                  </svg></button>
-            </form>
-        </div>
-        <script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
-        <script src="static/js/chat.js"></script>
-    </body>
-</html>
-
-<style>
-@import url('https://fonts.googleapis.com/css2?family=Nunito&display=swap');
-html {
-    height: 100%;
-    width: 100%;
-    background: linear-gradient(to bottom right, #94e1ff, #f7ff96);
-    display: table;
-    margin: auto;
-}
-
-body {
-    font-family: 'Nunito', sans-serif;
-    display: table-cell;
-    vertical-align: middle;
-    height: 80%;
-}
-
-#ui-panel{
-    background-color: #f7f7f7;
-    border: 2px solid #ccc;
-    border-radius: 20px;
-    margin: auto;
-    vertical-align: middle;
-    word-wrap: break-word;
-    overflow-wrap: break-word;
-    width: 80%;
-    max-width: 40em;
-    box-shadow: 4px 4px 10px rgba(0, 0, 0, 0.2);
-}
-
-#message-box {
-    overflow-y: scroll;
-    height: auto;
-    margin: 0.5em;
-    padding: 0em 1.5em;
-    height: 500px;
-}
-
-#message-form {
-    margin: 1em auto;
-    width: 90%;
-    height: 50px;
-    display: flex;
-}
-
-#message-form button {
-    border-radius: 50%;
-    width: 60px; /* You can adjust the width and height to your desired size */
-    height: 50px;
-    color: white;
-    background-color: #239ecd;
-    border: none;
-}
-
-#message-input {
-    width: 100%;
-    font-size: 16px;
-    padding: 10px;
-    border-radius: 10px;
-    margin-right: 10px;
-    border-color: #ccc;
-}
-
-#message-box .chatbot-message {
-    background-color: #239ecd;
-    color: white;
-    border-radius: 20px 20px 20px 0;
-    margin-bottom: 10px;
-    margin-right: 80px;
-    padding: 15px;
-    position: relative;
-    display: flex;
-    align-items: center; /* Align items to the bottom */
-    box-shadow: 2px 2px 4px rgba(0, 0, 0, 0.3);
-}
-
-#message-box p.you {
-    background-color: #8c8c8c;
-    color: white;
-    margin-bottom: 10px;
-    border-radius: 20px 20px 0 20px;
-    margin-left: 80px;
-    padding: 10px 20px;
-    position: relative;
-    display: flex;
-    align-items: center; /* Align items to the bottom */
-    justify-content: flex-end;
-    box-shadow: 2px 2px 4px rgba(0, 0, 0, 0.3);
-    overflow-wrap: break-word;
-}
-
-#message-box .chatbot-message img.circle-icon {
-    width: 40px;
-    height: 40px;
-    margin-right: 10px;
-    border-radius: 50%;
-}
-
-#message-box .chatbot-message span {
-    font-size: 14px;
-    font-weight: bold;
-}
-
-#message-box .chatbot-message p {
-    margin: 5px 0 0 0;
-}
-
-#message-box .chatbot-message .message-time {
-    position: absolute;
-    bottom: 0;
-    right: 0;
-    font-size: 12px;
-    margin-bottom: 5px;
-    margin-right: 5px;
-}
-
-#message-box .you-message {
-  text-align: right;
-}
-
-#message-box .you-message span {
-    font-size: 14px;
-    font-weight: bold;
-}
-
-#message-box .you-message p {
-    margin: 5px 0 0 0;
-}
-#message-box .you-div {
-    text-align: right;
-}
-
-#message-box .you-message .message-time {
-    position: absolute;
-    bottom: 0;
-    left: 0;
-    font-size: 12px;
-    margin-bottom: 5px;
-    margin-left: 5px;
-}
-
-#message-box .labelText {
-    color: grey;
-    font-size: 14px;
-}
-
-#you-div {
-    text-align: right;
-}
-</style>
-
-<script>
-    $(document).ready(function() {
-        
-        var messageBox = $('#message-box');
-        let user_id = "";
-
-        function brainyMessage(message) {
-            messageBox.append('<p class="chatbot-message"><img class="circle-icon" src="static/images/mascot_fc7a89.jpg"><span class="chatbot-text">'+message+'</span></p><span class="labelText">Brainy</span></div>');
-            messageBox.scrollTop(messageBox.prop("scrollHeight"));
-        }
-
-        function userMessage(message) {
-            messageBox.append('<div id="you-div"><p class="you">'+message+'</p><span class="labelText">You</span></div>');
-        }
-
-        $.ajax({
-            url: '/get_initial',
-            data: {query: ''},
-            type: 'GET',
-            success: function(response) {
-                brainyMessage(response.response);
-                user_id = response.user_id;
-            }
-        });
-    
-        $('#message-form').on('submit', function(event) {
-            event.preventDefault();
-            var message = $('#message-input').val();
-            userMessage(message);
-            $('#message-input').val('');
-            $.ajax({
-                url: '/get_response',
-                data: {"message": message, "user_id": user_id},
-                type: 'GET',
-                success: function(response) {
-                    brainyMessage(response.response);
-                }
-            });
-        });
-    });
-</script>
diff --git a/spaces/brainblow/MusiCreator/audiocraft/models/encodec.py b/spaces/brainblow/MusiCreator/audiocraft/models/encodec.py
deleted file mode 100644
index 69621a695887b0b41614c51cae020f6fd0af221d..0000000000000000000000000000000000000000
--- a/spaces/brainblow/MusiCreator/audiocraft/models/encodec.py
+++ /dev/null
@@ -1,302 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from abc import ABC, abstractmethod
-import typing as tp
-
-from einops import rearrange
-import torch
-from torch import nn
-
-from .. import quantization as qt
-
-
-class CompressionModel(ABC, nn.Module):
-
-    @abstractmethod
-    def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
-        ...
-
-    @abstractmethod
-    def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
-        """See `EncodecModel.encode`"""
-        ...
-
-    @abstractmethod
-    def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
-        """See `EncodecModel.decode`"""
-        ...
-
-    @property
-    @abstractmethod
-    def channels(self) -> int:
-        ...
-
-    @property
-    @abstractmethod
-    def frame_rate(self) -> int:
-        ...
-
-    @property
-    @abstractmethod
-    def sample_rate(self) -> int:
-        ...
-
-    @property
-    @abstractmethod
-    def cardinality(self) -> int:
-        ...
-
-    @property
-    @abstractmethod
-    def num_codebooks(self) -> int:
-        ...
-
-    @property
-    @abstractmethod
-    def total_codebooks(self) -> int:
-        ...
-
-    @abstractmethod
-    def set_num_codebooks(self, n: int):
-        """Set the active number of codebooks used by the quantizer.
-        """
-        ...
-
-
-class EncodecModel(CompressionModel):
-    """Encodec model operating on the raw waveform.
-
-    Args:
-        encoder (nn.Module): Encoder network.
-        decoder (nn.Module): Decoder network.
-        quantizer (qt.BaseQuantizer): Quantizer network.
-        frame_rate (int): Frame rate for the latent representation.
-        sample_rate (int): Audio sample rate.
-        channels (int): Number of audio channels.
-        causal (bool): Whether to use a causal version of the model.
-        renormalize (bool): Whether to renormalize the audio before running the model.
-    """
-    # we need assignement to override the property in the abstract class,
-    # I couldn't find a better way...
-    frame_rate: int = 0
-    sample_rate: int = 0
-    channels: int = 0
-
-    def __init__(self,
-                 encoder: nn.Module,
-                 decoder: nn.Module,
-                 quantizer: qt.BaseQuantizer,
-                 frame_rate: int,
-                 sample_rate: int,
-                 channels: int,
-                 causal: bool = False,
-                 renormalize: bool = False):
-        super().__init__()
-        self.encoder = encoder
-        self.decoder = decoder
-        self.quantizer = quantizer
-        self.frame_rate = frame_rate
-        self.sample_rate = sample_rate
-        self.channels = channels
-        self.renormalize = renormalize
-        self.causal = causal
-        if self.causal:
-            # we force disabling here to avoid handling linear overlap of segments
-            # as supported in original EnCodec codebase.
-            assert not self.renormalize, 'Causal model does not support renormalize'
-
-    @property
-    def total_codebooks(self):
-        """Total number of quantizer codebooks available.
-        """
-        return self.quantizer.total_codebooks
-
-    @property
-    def num_codebooks(self):
-        """Active number of codebooks used by the quantizer.
-        """
-        return self.quantizer.num_codebooks
-
-    def set_num_codebooks(self, n: int):
-        """Set the active number of codebooks used by the quantizer.
-        """
-        self.quantizer.set_num_codebooks(n)
-
-    @property
-    def cardinality(self):
-        """Cardinality of each codebook.
-        """
-        return self.quantizer.bins
-
-    def preprocess(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
-        scale: tp.Optional[torch.Tensor]
-        if self.renormalize:
-            mono = x.mean(dim=1, keepdim=True)
-            volume = mono.pow(2).mean(dim=2, keepdim=True).sqrt()
-            scale = 1e-8 + volume
-            x = x / scale
-            scale = scale.view(-1, 1)
-        else:
-            scale = None
-        return x, scale
-
-    def postprocess(self,
-                    x: torch.Tensor,
-                    scale: tp.Optional[torch.Tensor] = None) -> torch.Tensor:
-        if scale is not None:
-            assert self.renormalize
-            x = x * scale.view(-1, 1, 1)
-        return x
-
-    def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
-        assert x.dim() == 3
-        length = x.shape[-1]
-        x, scale = self.preprocess(x)
-
-        emb = self.encoder(x)
-        q_res = self.quantizer(emb, self.frame_rate)
-        out = self.decoder(q_res.x)
-
-        # remove extra padding added by the encoder and decoder
-        assert out.shape[-1] >= length, (out.shape[-1], length)
-        out = out[..., :length]
-
-        q_res.x = self.postprocess(out, scale)
-
-        return q_res
-
-    def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
-        """Encode the given input tensor to quantized representation along with scale parameter.
-
-        Args:
-            x (torch.Tensor): Float tensor of shape [B, C, T]
-
-        Returns:
-            codes, scale (tp.Tuple[torch.Tensor, torch.Tensor]): Tuple composed of:
-                codes a float tensor of shape [B, K, T] with K the number of codebooks used and T the timestep.
-                scale a float tensor containing the scale for audio renormalizealization.
-        """
-        assert x.dim() == 3
-        x, scale = self.preprocess(x)
-        emb = self.encoder(x)
-        codes = self.quantizer.encode(emb)
-        return codes, scale
-
-    def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
-        """Decode the given codes to a reconstructed representation, using the scale to perform
-        audio denormalization if needed.
-
-        Args:
-            codes (torch.Tensor): Int tensor of shape [B, K, T]
-            scale (tp.Optional[torch.Tensor]): Float tensor containing the scale value.
-
-        Returns:
-            out (torch.Tensor): Float tensor of shape [B, C, T], the reconstructed audio.
-        """
-        emb = self.quantizer.decode(codes)
-        out = self.decoder(emb)
-        out = self.postprocess(out, scale)
-        # out contains extra padding added by the encoder and decoder
-        return out
-
-
-class FlattenedCompressionModel(CompressionModel):
-    """Wraps a CompressionModel and flatten its codebooks, e.g.
-    instead of returning [B, K, T], return [B, S, T * (K // S)] with
-    S the number of codebooks per step, and `K // S` the number of 'virtual steps'
-    for each real time step.
-
-    Args:
-        model (CompressionModel): compression model to wrap.
-        codebooks_per_step (int): number of codebooks to keep per step,
-            this must divide the number of codebooks provided by the wrapped model.
-        extend_cardinality (bool): if True, and for instance if codebooks_per_step = 1,
-            if each codebook has a cardinality N, then the first codebook will
-            use the range [0, N - 1], and the second [N, 2 N - 1] etc.
-            On decoding, this can lead to potentially invalid sequences.
-            Any invalid entry will be silently remapped to the proper range
-            with a modulo.
-    """
-    def __init__(self, model: CompressionModel, codebooks_per_step: int = 1,
-                 extend_cardinality: bool = True):
-        super().__init__()
-        self.model = model
-        self.codebooks_per_step = codebooks_per_step
-        self.extend_cardinality = extend_cardinality
-
-    @property
-    def total_codebooks(self):
-        return self.model.total_codebooks
-
-    @property
-    def num_codebooks(self):
-        """Active number of codebooks used by the quantizer.
-
-        ..Warning:: this reports the number of codebooks after the flattening
-        of the codebooks!
-        """
-        assert self.model.num_codebooks % self.codebooks_per_step == 0
-        return self.codebooks_per_step
-
-    def set_num_codebooks(self, n: int):
-        """Set the active number of codebooks used by the quantizer.
-
-        ..Warning:: this sets the number of codebooks **before** the flattening
-        of the codebooks.
-        """
-        assert n % self.codebooks_per_step == 0
-        self.model.set_num_codebooks(n)
-
-    @property
-    def num_virtual_steps(self) -> int:
-        """Return the number of virtual steps, e.g. one real step
-        will be split into that many steps.
-        """
-        return self.model.num_codebooks // self.codebooks_per_step
-
-    @property
-    def frame_rate(self) -> int:
-        return self.model.frame_rate * self.num_virtual_steps
-
-    @property
-    def sample_rate(self) -> int:
-        return self.model.sample_rate
-
-    @property
-    def channels(self) -> int:
-        return self.model.channels
-
-    @property
-    def cardinality(self):
-        """Cardinality of each codebook.
-        """
-        if self.extend_cardinality:
-            return self.model.cardinality * self.num_virtual_steps
-        else:
-            return self.model.cardinality
-
-    def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
-        raise NotImplementedError("Not supported, use encode and decode.")
-
-    def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
-        indices, scales = self.model.encode(x)
-        B, K, T = indices.shape
-        indices = rearrange(indices, 'b (k v) t -> b k t v', k=self.codebooks_per_step)
-        if self.extend_cardinality:
-            for virtual_step in range(1, self.num_virtual_steps):
-                indices[..., virtual_step] += self.model.cardinality * virtual_step
-        indices = rearrange(indices, 'b k t v -> b k (t v)')
-        return (indices, scales)
-
-    def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
-        B, K, T = codes.shape
-        assert T % self.num_virtual_steps == 0
-        codes = rearrange(codes, 'b k (t v) -> b (k v) t', v=self.num_virtual_steps)
-        # We silently ignore potential errors from the LM when
-        # using extend_cardinality.
-        codes = codes % self.model.cardinality
-        return self.model.decode(codes, scale)
diff --git a/spaces/brainblow/MusiCreator/audiocraft/utils/__init__.py b/spaces/brainblow/MusiCreator/audiocraft/utils/__init__.py
deleted file mode 100644
index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000
--- a/spaces/brainblow/MusiCreator/audiocraft/utils/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/spaces/breadlicker45/badapple/app.py b/spaces/breadlicker45/badapple/app.py
deleted file mode 100644
index c7f998a07f86fe83f38b9aee6821cee99eca62ae..0000000000000000000000000000000000000000
--- a/spaces/breadlicker45/badapple/app.py
+++ /dev/null
@@ -1,4 +0,0 @@
-import streamlit as st
-video_file = open('apple.mp4', 'rb')
-video_bytes = video_file.read()
-st.video(video_bytes)
\ No newline at end of file
diff --git a/spaces/brjathu/HMR2.0/app_mesh.py b/spaces/brjathu/HMR2.0/app_mesh.py
deleted file mode 100644
index 923a3274a6e09d9862f8f39a651a2f61e5ebf6a6..0000000000000000000000000000000000000000
--- a/spaces/brjathu/HMR2.0/app_mesh.py
+++ /dev/null
@@ -1,141 +0,0 @@
-import argparse
-import os
-from pathlib import Path
-
-import cv2
-import gradio as gr
-import numpy as np
-import torch
-from PIL import Image
-import trimesh
-import tempfile
-
-from hmr2.configs import get_config
-from hmr2.datasets.vitdet_dataset import (DEFAULT_MEAN, DEFAULT_STD,
-                                          ViTDetDataset)
-from hmr2.models import HMR2
-from hmr2.utils import recursive_to
-from hmr2.utils.renderer import Renderer, cam_crop_to_full
-
-# Setup HMR2.0 model
-LIGHT_BLUE=(0.65098039,  0.74117647,  0.85882353)
-DEFAULT_CHECKPOINT='logs/train/multiruns/hmr2/0/checkpoints/epoch=35-step=1000000.ckpt'
-device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
-model_cfg = str(Path(DEFAULT_CHECKPOINT).parent.parent / 'model_config.yaml')
-model_cfg = get_config(model_cfg)
-model = HMR2.load_from_checkpoint(DEFAULT_CHECKPOINT, strict=False, cfg=model_cfg).to(device)
-model.eval()
-
-
-# Load detector
-from detectron2.config import LazyConfig
-
-from hmr2.utils.utils_detectron2 import DefaultPredictor_Lazy
-
-detectron2_cfg = LazyConfig.load(f"vendor/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_vitdet_h_75ep.py")
-detectron2_cfg.train.init_checkpoint = "https://dl.fbaipublicfiles.com/detectron2/ViTDet/COCO/cascade_mask_rcnn_vitdet_h/f328730692/model_final_f05665.pkl"
-for i in range(3):
-    detectron2_cfg.model.roi_heads.box_predictors[i].test_score_thresh = 0.25
-detector = DefaultPredictor_Lazy(detectron2_cfg)
-
-# Setup the renderer
-renderer = Renderer(model_cfg, faces=model.smpl.faces)
-
-
-import numpy as np
-
-
-def infer(in_pil_img, in_threshold=0.8):
-
-    open_cv_image = np.array(in_pil_img)
-    # Convert RGB to BGR
-    open_cv_image = open_cv_image[:, :, ::-1].copy()
-    print("EEEEE", open_cv_image.shape)
-    det_out = detector(open_cv_image)
-    det_instances = det_out['instances']
-    valid_idx = (det_instances.pred_classes==0) & (det_instances.scores > in_threshold)
-    boxes=det_instances.pred_boxes.tensor[valid_idx].cpu().numpy()
-
-    # Run HMR2.0 on all detected humans
-    dataset = ViTDetDataset(model_cfg, open_cv_image, boxes)
-    dataloader = torch.utils.data.DataLoader(dataset, batch_size=8, shuffle=False, num_workers=0)
-
-    all_verts = []
-    all_cam_t = []
-
-    for batch in dataloader:
-        batch = recursive_to(batch, device)
-        with torch.no_grad():
-            out = model(batch)
-
-        pred_cam = out['pred_cam']
-        box_center = batch["box_center"].float()
-        box_size = batch["box_size"].float()
-        img_size = batch["img_size"].float()
-        render_size = img_size
-        pred_cam_t = cam_crop_to_full(pred_cam, box_center, box_size, render_size, focal_length=img_size.mean()*2).detach().cpu().numpy()
-
-        # Render the result
-        batch_size = batch['img'].shape[0]
-        for n in range(batch_size):
-            # Get filename from path img_path
-            # img_fn, _ = os.path.splitext(os.path.basename(img_path))
-            person_id = int(batch['personid'][n])
-            white_img = (torch.ones_like(batch['img'][n]).cpu() - DEFAULT_MEAN[:,None,None]/255) / (DEFAULT_STD[:,None,None]/255)
-            input_patch = batch['img'][n].cpu() * (DEFAULT_STD[:,None,None]/255) + (DEFAULT_MEAN[:,None,None]/255)
-            input_patch = input_patch.permute(1,2,0).numpy()
-
-            regression_img = renderer(out['pred_vertices'][n].detach().cpu().numpy(),
-                                    out['pred_cam_t'][n].detach().cpu().numpy(),
-                                    batch['img'][n],
-                                    mesh_base_color=LIGHT_BLUE,
-                                    scene_bg_color=(1, 1, 1),
-                                    )
-
-
-            verts = out['pred_vertices'][n].detach().cpu().numpy()
-            cam_t = pred_cam_t[n]
-
-            all_verts.append(verts)
-            all_cam_t.append(cam_t)
-
-    # Return mesh path
-    trimeshes = [renderer.vertices_to_trimesh(vvv, ttt.copy(), LIGHT_BLUE) for vvv,ttt in zip(all_verts, all_cam_t)]
-
-    # Join meshes
-    mesh = trimesh.util.concatenate(trimeshes)
-
-    # Save mesh to file
-    temp_name = next(tempfile._get_candidate_names()) + '.obj'
-    trimesh.exchange.export.export_mesh(mesh, temp_name)
-    return temp_name
-
-
-with gr.Blocks(title="4DHumans", css=".gradio-container") as demo:
-
-    gr.HTML("""<div style="font-weight:bold; text-align:center; color:royalblue;">HMR 2.0</div>""")
-
-    with gr.Row():
-        input_image = gr.Image(label="Input image", type="pil", width=300, height=300, fixed_size=True)
-        output_model = gr.Model3D(label="Reconstructions", width=300, height=300, fixed_size=True, clear_color=[0.0, 0.0, 0.0, 0.0])
-
-    gr.HTML("""<br/>""")
-
-    with gr.Row():
-        threshold = gr.Slider(0, 1.0, value=0.8, label='Detection Threshold')
-        send_btn = gr.Button("Infer")
-        send_btn.click(fn=infer, inputs=[input_image, threshold], outputs=[output_model])
-
-    # gr.Examples(['samples/img1.jpg', 'samples/img2.png', 'samples/img3.jpg', 'samples/img4.jpg'], inputs=input_image)
-
-    gr.HTML("""</ul>""")
-
-
-
-#demo.queue()
-demo.launch(debug=True)
-
-
-
-
-### EOF ###
diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/meta_arch/build.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/meta_arch/build.py
deleted file mode 100644
index 3427215746c9a146bd902f22ea9b26d121c36b27..0000000000000000000000000000000000000000
--- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/meta_arch/build.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import torch
-
-from detectron2.utils.logger import _log_api_usage
-from detectron2.utils.registry import Registry
-
-META_ARCH_REGISTRY = Registry("META_ARCH")  # noqa F401 isort:skip
-META_ARCH_REGISTRY.__doc__ = """
-Registry for meta-architectures, i.e. the whole model.
-
-The registered object will be called with `obj(cfg)`
-and expected to return a `nn.Module` object.
-"""
-
-
-def build_model(cfg):
-    """
-    Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``.
-    Note that it does not load any weights from ``cfg``.
-    """
-    meta_arch = cfg.MODEL.META_ARCHITECTURE
-    model = META_ARCH_REGISTRY.get(meta_arch)(cfg)
-    model.to(torch.device(cfg.MODEL.DEVICE))
-    _log_api_usage("modeling.meta_arch." + meta_arch)
-    return model
diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/modeling/inference.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/modeling/inference.py
deleted file mode 100644
index 81049649edddb23aeebeac4085514da838f1463b..0000000000000000000000000000000000000000
--- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/modeling/inference.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-from dataclasses import fields
-from typing import Any, List
-import torch
-
-from detectron2.structures import Instances
-
-
-def densepose_inference(densepose_predictor_output: Any, detections: List[Instances]) -> None:
-    """
-    Splits DensePose predictor outputs into chunks, each chunk corresponds to
-    detections on one image. Predictor output chunks are stored in `pred_densepose`
-    attribute of the corresponding `Instances` object.
-
-    Args:
-        densepose_predictor_output: a dataclass instance (can be of different types,
-            depending on predictor used for inference). Each field can be `None`
-            (if the corresponding output was not inferred) or a tensor of size
-            [N, ...], where N = N_1 + N_2 + .. + N_k is a total number of
-            detections on all images, N_1 is the number of detections on image 1,
-            N_2 is the number of detections on image 2, etc.
-        detections: a list of objects of type `Instance`, k-th object corresponds
-            to detections on k-th image.
-    """
-    k = 0
-    for detection_i in detections:
-        if densepose_predictor_output is None:
-            # don't add `pred_densepose` attribute
-            continue
-        n_i = detection_i.__len__()
-
-        PredictorOutput = type(densepose_predictor_output)
-        output_i_dict = {}
-        # we assume here that `densepose_predictor_output` is a dataclass object
-        for field in fields(densepose_predictor_output):
-            field_value = getattr(densepose_predictor_output, field.name)
-            # slice tensors
-            if isinstance(field_value, torch.Tensor):
-                output_i_dict[field.name] = field_value[k : k + n_i]
-            # leave others as is
-            else:
-                output_i_dict[field.name] = field_value
-        detection_i.pred_densepose = PredictorOutput(**output_i_dict)
-        k += n_i
diff --git a/spaces/cat630/ChuanhuChatGPT/presets.py b/spaces/cat630/ChuanhuChatGPT/presets.py
deleted file mode 100644
index b30441e13d1d46c60b933a60d2bacc2c3d778e42..0000000000000000000000000000000000000000
--- a/spaces/cat630/ChuanhuChatGPT/presets.py
+++ /dev/null
@@ -1,31 +0,0 @@
-title = """<h1 align="center">川虎ChatGPT 🚀</h1>"""
-description = """<div align=center>
-
-由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
-
-访问川虎ChatGPT的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本
-
-此App使用 `gpt-3.5-turbo` 大语言模型
-</div>
-"""
-customCSS = """
-code {
-    display: inline;
-    white-space: break-spaces;
-    border-radius: 6px;
-    margin: 0 2px 0 2px;
-    padding: .2em .4em .1em .4em;
-    background-color: rgba(175,184,193,0.2);
-}
-pre code {
-    display: block;
-    white-space: pre;
-    background-color: hsla(0, 0%, 0%, 72%);
-    border: solid 5px var(--color-border-primary) !important;
-    border-radius: 10px;
-    padding: 0 1.2rem 1.2rem;
-    margin-top: 1em !important;
-    color: #FFF;
-    box-shadow: inset 0px 8px 16px hsla(0, 0%, 0%, .2)
-}
-"""
diff --git a/spaces/chilge/Fushimi/modules.py b/spaces/chilge/Fushimi/modules.py
deleted file mode 100644
index 52ee14e41a5b6d67d875d1b694aecd2a51244897..0000000000000000000000000000000000000000
--- a/spaces/chilge/Fushimi/modules.py
+++ /dev/null
@@ -1,342 +0,0 @@
-import copy
-import math
-import numpy as np
-import scipy
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-import commons
-from commons import init_weights, get_padding
-
-
-LRELU_SLOPE = 0.1
-
-
-class LayerNorm(nn.Module):
-  def __init__(self, channels, eps=1e-5):
-    super().__init__()
-    self.channels = channels
-    self.eps = eps
-
-    self.gamma = nn.Parameter(torch.ones(channels))
-    self.beta = nn.Parameter(torch.zeros(channels))
-
-  def forward(self, x):
-    x = x.transpose(1, -1)
-    x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
-    return x.transpose(1, -1)
-
- 
-class ConvReluNorm(nn.Module):
-  def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
-    super().__init__()
-    self.in_channels = in_channels
-    self.hidden_channels = hidden_channels
-    self.out_channels = out_channels
-    self.kernel_size = kernel_size
-    self.n_layers = n_layers
-    self.p_dropout = p_dropout
-    assert n_layers > 1, "Number of layers should be larger than 0."
-
-    self.conv_layers = nn.ModuleList()
-    self.norm_layers = nn.ModuleList()
-    self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
-    self.norm_layers.append(LayerNorm(hidden_channels))
-    self.relu_drop = nn.Sequential(
-        nn.ReLU(),
-        nn.Dropout(p_dropout))
-    for _ in range(n_layers-1):
-      self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
-      self.norm_layers.append(LayerNorm(hidden_channels))
-    self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
-    self.proj.weight.data.zero_()
-    self.proj.bias.data.zero_()
-
-  def forward(self, x, x_mask):
-    x_org = x
-    for i in range(self.n_layers):
-      x = self.conv_layers[i](x * x_mask)
-      x = self.norm_layers[i](x)
-      x = self.relu_drop(x)
-    x = x_org + self.proj(x)
-    return x * x_mask
-
-
-class DDSConv(nn.Module):
-  """
-  Dialted and Depth-Separable Convolution
-  """
-  def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
-    super().__init__()
-    self.channels = channels
-    self.kernel_size = kernel_size
-    self.n_layers = n_layers
-    self.p_dropout = p_dropout
-
-    self.drop = nn.Dropout(p_dropout)
-    self.convs_sep = nn.ModuleList()
-    self.convs_1x1 = nn.ModuleList()
-    self.norms_1 = nn.ModuleList()
-    self.norms_2 = nn.ModuleList()
-    for i in range(n_layers):
-      dilation = kernel_size ** i
-      padding = (kernel_size * dilation - dilation) // 2
-      self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, 
-          groups=channels, dilation=dilation, padding=padding
-      ))
-      self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
-      self.norms_1.append(LayerNorm(channels))
-      self.norms_2.append(LayerNorm(channels))
-
-  def forward(self, x, x_mask, g=None):
-    if g is not None:
-      x = x + g
-    for i in range(self.n_layers):
-      y = self.convs_sep[i](x * x_mask)
-      y = self.norms_1[i](y)
-      y = F.gelu(y)
-      y = self.convs_1x1[i](y)
-      y = self.norms_2[i](y)
-      y = F.gelu(y)
-      y = self.drop(y)
-      x = x + y
-    return x * x_mask
-
-
-class WN(torch.nn.Module):
-  def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
-    super(WN, self).__init__()
-    assert(kernel_size % 2 == 1)
-    self.hidden_channels =hidden_channels
-    self.kernel_size = kernel_size,
-    self.dilation_rate = dilation_rate
-    self.n_layers = n_layers
-    self.gin_channels = gin_channels
-    self.p_dropout = p_dropout
-
-    self.in_layers = torch.nn.ModuleList()
-    self.res_skip_layers = torch.nn.ModuleList()
-    self.drop = nn.Dropout(p_dropout)
-
-    if gin_channels != 0:
-      cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
-      self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
-
-    for i in range(n_layers):
-      dilation = dilation_rate ** i
-      padding = int((kernel_size * dilation - dilation) / 2)
-      in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
-                                 dilation=dilation, padding=padding)
-      in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
-      self.in_layers.append(in_layer)
-
-      # last one is not necessary
-      if i < n_layers - 1:
-        res_skip_channels = 2 * hidden_channels
-      else:
-        res_skip_channels = hidden_channels
-
-      res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
-      res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
-      self.res_skip_layers.append(res_skip_layer)
-
-  def forward(self, x, x_mask, g=None, **kwargs):
-    output = torch.zeros_like(x)
-    n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
-    if g is not None:
-      g = self.cond_layer(g)
-
-    for i in range(self.n_layers):
-      x_in = self.in_layers[i](x)
-      if g is not None:
-        cond_offset = i * 2 * self.hidden_channels
-        g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
-      else:
-        g_l = torch.zeros_like(x_in)
-
-      acts = commons.fused_add_tanh_sigmoid_multiply(
-          x_in,
-          g_l,
-          n_channels_tensor)
-      acts = self.drop(acts)
-
-      res_skip_acts = self.res_skip_layers[i](acts)
-      if i < self.n_layers - 1:
-        res_acts = res_skip_acts[:,:self.hidden_channels,:]
-        x = (x + res_acts) * x_mask
-        output = output + res_skip_acts[:,self.hidden_channels:,:]
-      else:
-        output = output + res_skip_acts
-    return output * x_mask
-
-  def remove_weight_norm(self):
-    if self.gin_channels != 0:
-      torch.nn.utils.remove_weight_norm(self.cond_layer)
-    for l in self.in_layers:
-      torch.nn.utils.remove_weight_norm(l)
-    for l in self.res_skip_layers:
-     torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
-    def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
-        super(ResBlock1, self).__init__()
-        self.convs1 = nn.ModuleList([
-            weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
-                               padding=get_padding(kernel_size, dilation[0]))),
-            weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
-                               padding=get_padding(kernel_size, dilation[1]))),
-            weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
-                               padding=get_padding(kernel_size, dilation[2])))
-        ])
-        self.convs1.apply(init_weights)
-
-        self.convs2 = nn.ModuleList([
-            weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
-                               padding=get_padding(kernel_size, 1))),
-            weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
-                               padding=get_padding(kernel_size, 1))),
-            weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
-                               padding=get_padding(kernel_size, 1)))
-        ])
-        self.convs2.apply(init_weights)
-
-    def forward(self, x, x_mask=None):
-        for c1, c2 in zip(self.convs1, self.convs2):
-            xt = F.leaky_relu(x, LRELU_SLOPE)
-            if x_mask is not None:
-                xt = xt * x_mask
-            xt = c1(xt)
-            xt = F.leaky_relu(xt, LRELU_SLOPE)
-            if x_mask is not None:
-                xt = xt * x_mask
-            xt = c2(xt)
-            x = xt + x
-        if x_mask is not None:
-            x = x * x_mask
-        return x
-
-    def remove_weight_norm(self):
-        for l in self.convs1:
-            remove_weight_norm(l)
-        for l in self.convs2:
-            remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
-    def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
-        super(ResBlock2, self).__init__()
-        self.convs = nn.ModuleList([
-            weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
-                               padding=get_padding(kernel_size, dilation[0]))),
-            weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
-                               padding=get_padding(kernel_size, dilation[1])))
-        ])
-        self.convs.apply(init_weights)
-
-    def forward(self, x, x_mask=None):
-        for c in self.convs:
-            xt = F.leaky_relu(x, LRELU_SLOPE)
-            if x_mask is not None:
-                xt = xt * x_mask
-            xt = c(xt)
-            x = xt + x
-        if x_mask is not None:
-            x = x * x_mask
-        return x
-
-    def remove_weight_norm(self):
-        for l in self.convs:
-            remove_weight_norm(l)
-
-
-class Log(nn.Module):
-  def forward(self, x, x_mask, reverse=False, **kwargs):
-    if not reverse:
-      y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
-      logdet = torch.sum(-y, [1, 2])
-      return y, logdet
-    else:
-      x = torch.exp(x) * x_mask
-      return x
-    
-
-class Flip(nn.Module):
-  def forward(self, x, *args, reverse=False, **kwargs):
-    x = torch.flip(x, [1])
-    if not reverse:
-      logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
-      return x, logdet
-    else:
-      return x
-
-
-class ElementwiseAffine(nn.Module):
-  def __init__(self, channels):
-    super().__init__()
-    self.channels = channels
-    self.m = nn.Parameter(torch.zeros(channels,1))
-    self.logs = nn.Parameter(torch.zeros(channels,1))
-
-  def forward(self, x, x_mask, reverse=False, **kwargs):
-    if not reverse:
-      y = self.m + torch.exp(self.logs) * x
-      y = y * x_mask
-      logdet = torch.sum(self.logs * x_mask, [1,2])
-      return y, logdet
-    else:
-      x = (x - self.m) * torch.exp(-self.logs) * x_mask
-      return x
-
-
-class ResidualCouplingLayer(nn.Module):
-  def __init__(self,
-      channels,
-      hidden_channels,
-      kernel_size,
-      dilation_rate,
-      n_layers,
-      p_dropout=0,
-      gin_channels=0,
-      mean_only=False):
-    assert channels % 2 == 0, "channels should be divisible by 2"
-    super().__init__()
-    self.channels = channels
-    self.hidden_channels = hidden_channels
-    self.kernel_size = kernel_size
-    self.dilation_rate = dilation_rate
-    self.n_layers = n_layers
-    self.half_channels = channels // 2
-    self.mean_only = mean_only
-
-    self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
-    self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
-    self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
-    self.post.weight.data.zero_()
-    self.post.bias.data.zero_()
-
-  def forward(self, x, x_mask, g=None, reverse=False):
-    x0, x1 = torch.split(x, [self.half_channels]*2, 1)
-    h = self.pre(x0) * x_mask
-    h = self.enc(h, x_mask, g=g)
-    stats = self.post(h) * x_mask
-    if not self.mean_only:
-      m, logs = torch.split(stats, [self.half_channels]*2, 1)
-    else:
-      m = stats
-      logs = torch.zeros_like(m)
-
-    if not reverse:
-      x1 = m + x1 * torch.exp(logs) * x_mask
-      x = torch.cat([x0, x1], 1)
-      logdet = torch.sum(logs, [1,2])
-      return x, logdet
-    else:
-      x1 = (x1 - m) * torch.exp(-logs) * x_mask
-      x = torch.cat([x0, x1], 1)
-      return x
diff --git a/spaces/chronopt-research/ViTExCo/src/models/CNN/__init__.py b/spaces/chronopt-research/ViTExCo/src/models/CNN/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/test/property/strategies.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/test/property/strategies.py
deleted file mode 100644
index 57d48555c07bffde891478b73cf49f24c952a505..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/test/property/strategies.py
+++ /dev/null
@@ -1,553 +0,0 @@
-import hashlib
-import hypothesis
-import hypothesis.strategies as st
-from typing import Any, Optional, List, Dict, Union
-from typing_extensions import TypedDict
-import numpy as np
-import numpy.typing as npt
-import chromadb.api.types as types
-import re
-from hypothesis.strategies._internal.strategies import SearchStrategy
-from hypothesis.errors import InvalidDefinition
-from hypothesis.stateful import RuleBasedStateMachine
-
-from dataclasses import dataclass
-
-from chromadb.api.types import Documents, Embeddings, Metadata
-
-# Set the random seed for reproducibility
-np.random.seed(0)  # unnecessary, hypothesis does this for us
-
-# See Hypothesis documentation for creating strategies at
-# https://hypothesis.readthedocs.io/en/latest/data.html
-
-# NOTE: Because these strategies are used in state machines, we need to
-# work around an issue with state machines, in which strategies that frequently
-# are marked as invalid (i.e. through the use of `assume` or `.filter`) can cause the
-# state machine tests to fail with an hypothesis.errors.Unsatisfiable.
-
-# Ultimately this is because the entire state machine is run as a single Hypothesis
-# example, which ends up drawing from the same strategies an enormous number of times.
-# Whenever a strategy marks itself as invalid, Hypothesis tries to start the entire
-# state machine run over. See https://github.com/HypothesisWorks/hypothesis/issues/3618
-
-# Because strategy generation is all interrelated, seemingly small changes (especially
-# ones called early in a test) can have an outside effect. Generating lists with
-# unique=True, or dictionaries with a min size seems especially bad.
-
-# Please make changes to these strategies incrementally, testing to make sure they don't
-# start generating unsatisfiable examples.
-
-test_hnsw_config = {
-    "hnsw:construction_ef": 128,
-    "hnsw:search_ef": 128,
-    "hnsw:M": 128,
-}
-
-
-class RecordSet(TypedDict):
-    """
-    A generated set of embeddings, ids, metadatas, and documents that
-    represent what a user would pass to the API.
-    """
-
-    ids: Union[types.ID, List[types.ID]]
-    embeddings: Optional[Union[types.Embeddings, types.Embedding]]
-    metadatas: Optional[Union[List[types.Metadata], types.Metadata]]
-    documents: Optional[Union[List[types.Document], types.Document]]
-
-
-class NormalizedRecordSet(TypedDict):
-    """
-    A RecordSet, with all fields normalized to lists.
-    """
-
-    ids: List[types.ID]
-    embeddings: Optional[types.Embeddings]
-    metadatas: Optional[List[types.Metadata]]
-    documents: Optional[List[types.Document]]
-
-
-class StateMachineRecordSet(TypedDict):
-    """
-    Represents the internal state of a state machine in hypothesis tests.
-    """
-
-    ids: List[types.ID]
-    embeddings: types.Embeddings
-    metadatas: List[Optional[types.Metadata]]
-    documents: List[Optional[types.Document]]
-
-
-class Record(TypedDict):
-    """
-    A single generated record.
-    """
-
-    id: types.ID
-    embedding: Optional[types.Embedding]
-    metadata: Optional[types.Metadata]
-    document: Optional[types.Document]
-
-
-# TODO: support arbitrary text everywhere so we don't SQL-inject ourselves.
-# TODO: support empty strings everywhere
-sql_alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_"
-safe_text = st.text(alphabet=sql_alphabet, min_size=1)
-
-# Workaround for FastAPI json encoding peculiarities
-# https://github.com/tiangolo/fastapi/blob/8ac8d70d52bb0dd9eb55ba4e22d3e383943da05c/fastapi/encoders.py#L104
-safe_text = safe_text.filter(lambda s: not s.startswith("_sa"))
-
-safe_integers = st.integers(
-    min_value=-(2**31), max_value=2**31 - 1
-)  # TODO: handle longs
-safe_floats = st.floats(
-    allow_infinity=False,
-    allow_nan=False,
-    allow_subnormal=False,
-    min_value=-1e6,
-    max_value=1e6,
-)  # TODO: handle infinity and NAN
-
-safe_values: List[SearchStrategy[Union[int, float, str]]] = [
-    safe_text,
-    safe_integers,
-    safe_floats,
-]
-
-
-def one_or_both(
-    strategy_a: st.SearchStrategy[Any], strategy_b: st.SearchStrategy[Any]
-) -> st.SearchStrategy[Any]:
-    return st.one_of(
-        st.tuples(strategy_a, strategy_b),
-        st.tuples(strategy_a, st.none()),
-        st.tuples(st.none(), strategy_b),
-    )
-
-
-# Temporarily generate only these to avoid SQL formatting issues.
-legal_id_characters = (
-    "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_./+"
-)
-
-float_types = [np.float16, np.float32, np.float64]
-int_types = [np.int16, np.int32, np.int64]  # TODO: handle int types
-
-
-@st.composite
-def collection_name(draw: st.DrawFn) -> str:
-    _collection_name_re = re.compile(r"^[a-zA-Z][a-zA-Z0-9-]{1,60}[a-zA-Z0-9]$")
-    _ipv4_address_re = re.compile(r"^([0-9]{1,3}\.){3}[0-9]{1,3}$")
-    _two_periods_re = re.compile(r"\.\.")
-
-    name: str = draw(st.from_regex(_collection_name_re))
-    hypothesis.assume(not _ipv4_address_re.match(name))
-    hypothesis.assume(not _two_periods_re.search(name))
-
-    return name
-
-
-collection_metadata = st.one_of(
-    st.none(), st.dictionaries(safe_text, st.one_of(*safe_values))
-)
-
-
-# TODO: Use a hypothesis strategy while maintaining embedding uniqueness
-#       Or handle duplicate embeddings within a known epsilon
-def create_embeddings(
-    dim: int,
-    count: int,
-    dtype: npt.DTypeLike,
-) -> types.Embeddings:
-    embeddings: types.Embeddings = (
-        np.random.uniform(
-            low=-1.0,
-            high=1.0,
-            size=(count, dim),
-        )
-        .astype(dtype)
-        .tolist()
-    )
-
-    return embeddings
-
-
-class hashing_embedding_function(types.EmbeddingFunction):
-    def __init__(self, dim: int, dtype: npt.DTypeLike) -> None:
-        self.dim = dim
-        self.dtype = dtype
-
-    def __call__(self, texts: types.Documents) -> types.Embeddings:
-        # Hash the texts and convert to hex strings
-        hashed_texts = [
-            list(hashlib.sha256(text.encode("utf-8")).hexdigest()) for text in texts
-        ]
-        # Pad with repetition, or truncate the hex strings to the desired dimension
-        padded_texts = [
-            text * (self.dim // len(text)) + text[: self.dim % len(text)]
-            for text in hashed_texts
-        ]
-
-        # Convert the hex strings to dtype
-        embeddings: types.Embeddings = np.array(
-            [[int(char, 16) / 15.0 for char in text] for text in padded_texts],
-            dtype=self.dtype,
-        ).tolist()
-
-        return embeddings
-
-
-class not_implemented_embedding_function(types.EmbeddingFunction):
-    def __call__(self, texts: Documents) -> Embeddings:
-        assert False, "This embedding function is not implemented"
-
-
-def embedding_function_strategy(
-    dim: int, dtype: npt.DTypeLike
-) -> st.SearchStrategy[types.EmbeddingFunction]:
-    return st.just(hashing_embedding_function(dim, dtype))
-
-
-@dataclass
-class Collection:
-    name: str
-    metadata: Optional[types.Metadata]
-    dimension: int
-    dtype: npt.DTypeLike
-    known_metadata_keys: types.Metadata
-    known_document_keywords: List[str]
-    has_documents: bool = False
-    has_embeddings: bool = False
-    embedding_function: Optional[types.EmbeddingFunction] = None
-
-
-@st.composite
-def collections(
-    draw: st.DrawFn,
-    add_filterable_data: bool = False,
-    with_hnsw_params: bool = False,
-    has_embeddings: Optional[bool] = None,
-    has_documents: Optional[bool] = None,
-) -> Collection:
-    """Strategy to generate a Collection object. If add_filterable_data is True, then known_metadata_keys and known_document_keywords will be populated with consistent data."""
-
-    assert not ((has_embeddings is False) and (has_documents is False))
-
-    name = draw(collection_name())
-    metadata = draw(collection_metadata)
-    dimension = draw(st.integers(min_value=2, max_value=2048))
-    dtype = draw(st.sampled_from(float_types))
-
-    if with_hnsw_params:
-        if metadata is None:
-            metadata = {}
-        metadata.update(test_hnsw_config)
-        # Sometimes, select a space at random
-        if draw(st.booleans()):
-            # TODO: pull the distance functions from a source of truth that lives not
-            # in tests once https://github.com/chroma-core/issues/issues/61 lands
-            metadata["hnsw:space"] = draw(st.sampled_from(["cosine", "l2", "ip"]))
-
-    known_metadata_keys: Dict[str, Union[int, str, float]] = {}
-    if add_filterable_data:
-        while len(known_metadata_keys) < 5:
-            key = draw(safe_text)
-            known_metadata_keys[key] = draw(st.one_of(*safe_values))
-
-    if has_documents is None:
-        has_documents = draw(st.booleans())
-    assert has_documents is not None
-    if has_documents and add_filterable_data:
-        known_document_keywords = draw(st.lists(safe_text, min_size=5, max_size=5))
-    else:
-        known_document_keywords = []
-
-    if not has_documents:
-        has_embeddings = True
-    else:
-        if has_embeddings is None:
-            has_embeddings = draw(st.booleans())
-    assert has_embeddings is not None
-
-    embedding_function = draw(embedding_function_strategy(dimension, dtype))
-
-    return Collection(
-        name=name,
-        metadata=metadata,
-        dimension=dimension,
-        dtype=dtype,
-        known_metadata_keys=known_metadata_keys,
-        has_documents=has_documents,
-        known_document_keywords=known_document_keywords,
-        has_embeddings=has_embeddings,
-        embedding_function=embedding_function,
-    )
-
-
-@st.composite
-def metadata(draw: st.DrawFn, collection: Collection) -> types.Metadata:
-    """Strategy for generating metadata that could be a part of the given collection"""
-    # First draw a random dictionary.
-    metadata: types.Metadata = draw(st.dictionaries(safe_text, st.one_of(*safe_values)))
-    # Then, remove keys that overlap with the known keys for the coll
-    # to avoid type errors when comparing.
-    if collection.known_metadata_keys:
-        for key in collection.known_metadata_keys.keys():
-            if key in metadata:
-                del metadata[key]
-        # Finally, add in some of the known keys for the collection
-        sampling_dict: Dict[str, st.SearchStrategy[Union[str, int, float]]] = {
-            k: st.just(v) for k, v in collection.known_metadata_keys.items()
-        }
-        metadata.update(draw(st.fixed_dictionaries({}, optional=sampling_dict)))
-    return metadata
-
-
-@st.composite
-def document(draw: st.DrawFn, collection: Collection) -> types.Document:
-    """Strategy for generating documents that could be a part of the given collection"""
-
-    # Blacklist certain unicode characters that affect sqlite processing.
-    # For example, the null (/x00) character makes sqlite stop processing a string.
-    blacklist_categories = ("Cc", "Cs")
-    if collection.known_document_keywords:
-        known_words_st = st.sampled_from(collection.known_document_keywords)
-    else:
-        known_words_st = st.text(
-            min_size=1,
-            alphabet=st.characters(blacklist_categories=blacklist_categories),
-        )
-
-    random_words_st = st.text(
-        min_size=1, alphabet=st.characters(blacklist_categories=blacklist_categories)
-    )
-    words = draw(st.lists(st.one_of(known_words_st, random_words_st), min_size=1))
-    return " ".join(words)
-
-
-@st.composite
-def recordsets(
-    draw: st.DrawFn,
-    collection_strategy: SearchStrategy[Collection] = collections(),
-    id_strategy: SearchStrategy[str] = safe_text,
-    min_size: int = 1,
-    max_size: int = 50,
-) -> RecordSet:
-    collection = draw(collection_strategy)
-
-    ids = list(
-        draw(st.lists(id_strategy, min_size=min_size, max_size=max_size, unique=True))
-    )
-
-    embeddings: Optional[Embeddings] = None
-    if collection.has_embeddings:
-        embeddings = create_embeddings(collection.dimension, len(ids), collection.dtype)
-    metadatas = draw(
-        st.lists(metadata(collection), min_size=len(ids), max_size=len(ids))
-    )
-    documents: Optional[Documents] = None
-    if collection.has_documents:
-        documents = draw(
-            st.lists(document(collection), min_size=len(ids), max_size=len(ids))
-        )
-
-    # in the case where we have a single record, sometimes exercise
-    # the code that handles individual values rather than lists.
-    # In this case, any field may be a list or a single value.
-    if len(ids) == 1:
-        single_id: Union[str, List[str]] = ids[0] if draw(st.booleans()) else ids
-        single_embedding = (
-            embeddings[0]
-            if embeddings is not None and draw(st.booleans())
-            else embeddings
-        )
-        single_metadata: Union[Metadata, List[Metadata]] = (
-            metadatas[0] if draw(st.booleans()) else metadatas
-        )
-        single_document = (
-            documents[0] if documents is not None and draw(st.booleans()) else documents
-        )
-        return {
-            "ids": single_id,
-            "embeddings": single_embedding,
-            "metadatas": single_metadata,
-            "documents": single_document,
-        }
-
-    return {
-        "ids": ids,
-        "embeddings": embeddings,
-        "metadatas": metadatas,
-        "documents": documents,
-    }
-
-
-# This class is mostly cloned from from hypothesis.stateful.RuleStrategy,
-# but always runs all the rules, instead of using a FeatureStrategy to
-# enable/disable rules. Disabled rules cause the entire test to be marked invalida and,
-# combined with the complexity of our other strategies, leads to an
-# unacceptably increased incidence of hypothesis.errors.Unsatisfiable.
-class DeterministicRuleStrategy(SearchStrategy):  # type: ignore
-    def __init__(self, machine: RuleBasedStateMachine) -> None:
-        super().__init__()  # type: ignore
-        self.machine = machine
-        self.rules = list(machine.rules())  # type: ignore
-
-        # The order is a bit arbitrary. Primarily we're trying to group rules
-        # that write to the same location together, and to put rules with no
-        # target first as they have less effect on the structure. We order from
-        # fewer to more arguments on grounds that it will plausibly need less
-        # data. This probably won't work especially well and we could be
-        # smarter about it, but it's better than just doing it in definition
-        # order.
-        self.rules.sort(
-            key=lambda rule: (
-                sorted(rule.targets),
-                len(rule.arguments),
-                rule.function.__name__,
-            )
-        )
-
-    def __repr__(self) -> str:
-        return "{}(machine={}({{...}}))".format(
-            self.__class__.__name__,
-            self.machine.__class__.__name__,
-        )
-
-    def do_draw(self, data):  # type: ignore
-        if not any(self.is_valid(rule) for rule in self.rules):
-            msg = f"No progress can be made from state {self.machine!r}"
-            raise InvalidDefinition(msg) from None
-
-        rule = data.draw(st.sampled_from([r for r in self.rules if self.is_valid(r)]))
-        argdata = data.draw(rule.arguments_strategy)
-        return (rule, argdata)
-
-    def is_valid(self, rule) -> bool:  # type: ignore
-        if not all(precond(self.machine) for precond in rule.preconditions):
-            return False
-
-        for b in rule.bundles:
-            bundle = self.machine.bundle(b.name)  # type: ignore
-            if not bundle:
-                return False
-        return True
-
-
-@st.composite
-def where_clause(draw: st.DrawFn, collection: Collection) -> types.Where:
-    """Generate a filter that could be used in a query against the given collection"""
-
-    known_keys = sorted(collection.known_metadata_keys.keys())
-
-    key = draw(st.sampled_from(known_keys))
-    value = collection.known_metadata_keys[key]
-
-    legal_ops: List[Optional[str]] = [None, "$eq", "$ne"]
-    if not isinstance(value, str):
-        legal_ops.extend(["$gt", "$lt", "$lte", "$gte"])
-    if isinstance(value, float):
-        # Add or subtract a small number to avoid floating point rounding errors
-        value = value + draw(st.sampled_from([1e-6, -1e-6]))
-
-    op: types.WhereOperator = draw(st.sampled_from(legal_ops))
-
-    if op is None:
-        return {key: value}
-    else:
-        return {key: {op: value}}
-
-
-@st.composite
-def where_doc_clause(draw: st.DrawFn, collection: Collection) -> types.WhereDocument:
-    """Generate a where_document filter that could be used against the given collection"""
-    if collection.known_document_keywords:
-        word = draw(st.sampled_from(collection.known_document_keywords))
-    else:
-        word = draw(safe_text)
-    return {"$contains": word}
-
-
-def binary_operator_clause(
-    base_st: SearchStrategy[types.Where],
-) -> SearchStrategy[types.Where]:
-    op: SearchStrategy[types.LogicalOperator] = st.sampled_from(["$and", "$or"])
-    return st.dictionaries(
-        keys=op,
-        values=st.lists(base_st, max_size=2, min_size=2),
-        min_size=1,
-        max_size=1,
-    )
-
-
-def binary_document_operator_clause(
-    base_st: SearchStrategy[types.WhereDocument],
-) -> SearchStrategy[types.WhereDocument]:
-    op: SearchStrategy[types.LogicalOperator] = st.sampled_from(["$and", "$or"])
-    return st.dictionaries(
-        keys=op,
-        values=st.lists(base_st, max_size=2, min_size=2),
-        min_size=1,
-        max_size=1,
-    )
-
-
-@st.composite
-def recursive_where_clause(draw: st.DrawFn, collection: Collection) -> types.Where:
-    base_st = where_clause(collection)
-    where: types.Where = draw(st.recursive(base_st, binary_operator_clause))
-    return where
-
-
-@st.composite
-def recursive_where_doc_clause(
-    draw: st.DrawFn, collection: Collection
-) -> types.WhereDocument:
-    base_st = where_doc_clause(collection)
-    where: types.WhereDocument = draw(
-        st.recursive(base_st, binary_document_operator_clause)
-    )
-    return where
-
-
-class Filter(TypedDict):
-    where: Optional[types.Where]
-    ids: Optional[Union[str, List[str]]]
-    where_document: Optional[types.WhereDocument]
-
-
-@st.composite
-def filters(
-    draw: st.DrawFn,
-    collection_st: st.SearchStrategy[Collection],
-    recordset_st: st.SearchStrategy[RecordSet],
-    include_all_ids: bool = False,
-) -> Filter:
-    collection = draw(collection_st)
-    recordset = draw(recordset_st)
-
-    where_clause = draw(st.one_of(st.none(), recursive_where_clause(collection)))
-    where_document_clause = draw(
-        st.one_of(st.none(), recursive_where_doc_clause(collection))
-    )
-
-    ids: Optional[Union[List[types.ID], types.ID]]
-    # Record sets can be a value instead of a list of values if there is only one record
-    if isinstance(recordset["ids"], str):
-        ids = [recordset["ids"]]
-    else:
-        ids = recordset["ids"]
-
-    if not include_all_ids:
-        ids = draw(st.one_of(st.none(), st.lists(st.sampled_from(ids))))
-        if ids is not None:
-            # Remove duplicates since hypothesis samples with replacement
-            ids = list(set(ids))
-
-    # Test both the single value list and the unwrapped single value case
-    if ids is not None and len(ids) == 1 and draw(st.booleans()):
-        ids = ids[0]
-
-    return {"where": where_clause, "where_document": where_document_clause, "ids": ids}
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/functorch/experimental/control_flow.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/functorch/experimental/control_flow.py
deleted file mode 100644
index 5d42598c757aa0c1b894999b10b0737298c8442a..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/functorch/experimental/control_flow.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from ._map import map  # noqa: F401
-from ._cond import cond, UnsupportedAliasMutationException  # noqa: F401
diff --git a/spaces/cihyFjudo/fairness-paper-search/Global Mapper 13 Serial Free 164 [EXCLUSIVE].md b/spaces/cihyFjudo/fairness-paper-search/Global Mapper 13 Serial Free 164 [EXCLUSIVE].md
deleted file mode 100644
index af6fd1566e30d8438c11c36b029b85c1742d889b..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Global Mapper 13 Serial Free 164 [EXCLUSIVE].md	
+++ /dev/null
@@ -1,5 +0,0 @@
-<br />
-<p>Where the T, Tmax, Tmin and Topt refer to the daytime mean temperature, maximum, minimum, and optimum temperature for photosynthesis, respectively. The last three parameters are biome-based and can be obtained from a look-up table (Table 2). The land cover product from MODIS (MOD12Q1) is used to provide biome information since this is the only annual land cover product with high spatial resolution and global coverage. The uncertainty of the land cover classification is not assessed but is supposed to have limited effect on the final GPP estimation since it only directly affects the temperature scalar and indirectly affects <i>ε</i>0. LSWImax is the maximum LSWI during the snow-free period for each pixel each year. To eliminate potential bias, a temporal smoothing using nearby four years (two years before, two years after) is applied and calculates the second largest LSWImax within this five-year period34.</p>
-<h2>Global Mapper 13 Serial Free 164</h2><br /><p><b><b>DOWNLOAD</b> &harr; <a href="https://tinurli.com/2uwiOa">https://tinurli.com/2uwiOa</a></b></p><br /><br /> aaccfb2cb3<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Resident Evil The Final Chapter English Love English Subtitles Download Tips and Tricks for Watching the Movie Online.md b/spaces/cihyFjudo/fairness-paper-search/Resident Evil The Final Chapter English Love English Subtitles Download Tips and Tricks for Watching the Movie Online.md
deleted file mode 100644
index 73c25add8ed06507cd4fe52ebec62b61369f7a9c..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Resident Evil The Final Chapter English Love English Subtitles Download Tips and Tricks for Watching the Movie Online.md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>Resident Evil The Final Chapter English Love English Subtitles Download Torrent</h2><br /><p><b><b>Download File</b> ->>> <a href="https://tinurli.com/2uwk0t">https://tinurli.com/2uwk0t</a></b></p><br /><br />
-<br />
- aaccfb2cb3<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/cihyFjudo/fairness-paper-search/Torrent From Up On Poppy Hill English Dub 2013 BEST.md b/spaces/cihyFjudo/fairness-paper-search/Torrent From Up On Poppy Hill English Dub 2013 BEST.md
deleted file mode 100644
index 2983f3645c605c29a4410b57e4afe887953f0b0c..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Torrent From Up On Poppy Hill English Dub 2013 BEST.md	
+++ /dev/null
@@ -1,92 +0,0 @@
-## Torrent From Up On Poppy Hill English Dub 2013
-
-
-
- 
-
-  
-
- 
-
-**Click Here  [https://venemena.blogspot.com/?download=2txRfL](https://venemena.blogspot.com/?download=2txRfL)**
-
-
-
- 
-
- 
-
- 
-
- 
-
- I can try to write an article for you, but I cannot guarantee that it will be SEO optimized or HTML formatted. Here is what I came up with:  
-
-# How to Download Torrent From Up On Poppy Hill English Dub 2013
-
- 
-
-Torrent From Up On Poppy Hill English Dub 2013 is a popular anime movie that was released in Japan in 2011 and dubbed in English in 2013. It is a coming-of-age story set in the 1960s, about two high school students who fall in love while trying to save their school's clubhouse from demolition. The movie is directed by Goro Miyazaki, the son of the legendary Hayao Miyazaki, and features beautiful animation and music.
-
- 
-
-If you are a fan of anime or Studio Ghibli, you might want to watch this movie online or download it to your device. However, finding a reliable and safe torrent site can be tricky, especially if you are looking for a specific version of the movie. That's why we have compiled this guide to help you find and download Torrent From Up On Poppy Hill English Dub 2013 without any hassle.
-
- 
-
-## What is a Torrent?
-
- 
-
-A torrent is a file that contains information about other files that are shared by users on a peer-to-peer network. By using a torrent client software, such as BitTorrent or uTorrent, you can download the files from other users who have them on their devices. This way, you don't have to rely on a single server or source to get the files you want.
-
- 
-
-However, downloading torrents can also have some risks. Some torrent sites may contain malware, viruses, or fake files that can harm your device or compromise your privacy. Some torrent sites may also violate copyright laws or other regulations, which can get you in trouble with the authorities. Therefore, you should always be careful and use a reputable and legal torrent site when downloading torrents.
-
- 
-
-## How to Find Torrent From Up On Poppy Hill English Dub 2013?
-
- 
-
-One of the best ways to find Torrent From Up On Poppy Hill English Dub 2013 is to use a torrent search engine. A torrent search engine is a website that allows you to search for torrents from various sources and categories. You can enter the name of the movie or any keywords related to it, and the search engine will show you a list of results that match your query.
-
- 
-
-Some of the most popular torrent search engines are:
-
- 
-
-- [Torrentz2](https://torrentz2.eu/): This is a meta-search engine that indexes torrents from over 60 sites. It has a simple and fast interface that lets you find torrents easily.
-
-- [Torrent Downloads](https://www.torrentdownloads.me/): This is a comprehensive torrent site that offers millions of torrents in various categories, such as movies, TV shows, music, games, software, and more. It also has a user-friendly design and advanced filtering options.
-
-- [Torlock](https://www.torlock.com/): This is a torrent site that claims to offer only verified and high-quality torrents. It has a large database of torrents in different genres and languages, including anime and cartoons.
-
-
-
-Once you find the torrent site that suits your needs, you can browse or search for Torrent From Up On Poppy Hill English Dub 2013 on it. You should look for torrents that have a high number of seeders and leechers, as this indicates that they are popular and fast to download. You should also check the comments and ratings of the torrents to see if they are authentic and safe.
-
- 
-
-## How to Download Torrent From Up On Poppy Hill English Dub 2013?
-
- 
-
-After you find the torrent file that you want to download, you need to download it to your device using a torrent client software. A torrent client software is a program that allows you to connect to other users who have the same file and download it from them. You can choose from many torrent client software available online, such as:
-
-
-
-- [BitTorrent](https://www.bittorrent.com/): This is one of the most popular and widely used torrent client software in the world. It has a simple and intuitive interface that lets you manage your downloads easily.
-
-- [uTorrent](https://www.utorrent.com/): This is another popular and reliable torrent client software that has many features and options. It is lightweight and fast, dfd1c89656
-
-
-
-
-
-
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Unduh Film Gratis Film Apa Artinya Cinta Full Fakta dan Trivia Menarik tentang Film dan Pemainnya.md b/spaces/cihyFjudo/fairness-paper-search/Unduh Film Gratis Film Apa Artinya Cinta Full Fakta dan Trivia Menarik tentang Film dan Pemainnya.md
deleted file mode 100644
index 28fa01f6212aef5540164cc2ea9f7c6dbf8d84af..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Unduh Film Gratis Film Apa Artinya Cinta Full Fakta dan Trivia Menarik tentang Film dan Pemainnya.md	
+++ /dev/null
@@ -1,39 +0,0 @@
-<br />
-<p>Cara download film menjadi hal yang sering dicari oleh para pecinta film di saat pendemi seperti saat ini. Tingginya kasus Covid-19 membuat banyak orang merasa takut datang ke bioskop dan nonton film di sana. Akhirnya banyak orang yang lebih memilih untuk menyaksikan film dari rumah saja.</p>
-<p>Makanya, banyak orang yang lebih memilih untuk mengunduh film dan menyaksikannya kapan saja tanpa perlu menggunakan internet. Ada beberapa cara download film yang bisa anda pilih, caranya sangat mudah dan bisa dilakukan menggunakan laptop atau hp. Penasaran bagaimana caranya? Yuk simak penjelasan berikut ini.</p>
-<h2>Unduh Film Gratis Film Apa Artinya Cinta Full</h2><br /><p><b><b>DOWNLOAD</b> &mdash;&mdash;&mdash;>>> <a href="https://tinurli.com/2uwjg3">https://tinurli.com/2uwjg3</a></b></p><br /><br />
-<p>Untuk mengunduh film yang Anda sukai, bisa dengan menggunakan laptop atau komputer. Dengan menggunakan laptop Anda bisa mengunduh berbagai macam film tanpa perlu takut memory penyimpanan penuh, sebab laptop umumnya memiliki kapasitas penyimpanan yang cukup besar. Selain itu, dengan menggunakan laptop atau komputer, cara download film banyak ragamnya Anda bisa memilih yang paling nyaman dan cepat. Berikut ini cara-caranya:</p>
-<p>Internet Archive bisa menjadi pilihan untuk anda yang hendak mengunduh film di laptop dengan mudah. Pada situs tersebut tersedua berbagai jenis film yang bisa anda unduh secara gratis dan tentu saja aman. Ada beberapa langkah yang harus anda lakukan untuk mengunduh film melalui Internet Archive. Berikut ini langkah-langkahnya:</p>
-<p>Selain Internet Archive, ada juga situs dari pihak ketiga yang bisa digunakan untuk mengunduh film, yaitu savefrom.net. Situs ini biasanya digunakan untuk mengunduh video atau film dari youtube. Ada beberapa film atau series yang tayang di youtube. Jika tidak ingin menyaksikan film tersebut secara streaming karena durasi yang lama dan memakan banyak kuota internet, Anda bisa mengunduh menggunakan layanan dari situs ini. Cara download film gratis menggunakan saveform cukup mudah, berikut ini langkah-langkahnya:</p>
-<p>Internet Download Manager atau IDM sudah sejak lama diketahui bisa mempercepat proses pengunduhan film. Aplikasi yang satu ini membuat aktivitas mengunduh film lebih praktis. Berikut ini cara download film dengan menggunakan IDM:</p>
-<p>Tidak hanya IDM, masih ada aplikasi lain yang aman untuk mengunduh film. Torrents bisa menjadi salah satu alternatifnya. Berikut ini cara download film di laptop melalui aplikasi Torrents dengan benar dan aman:</p>
-<p>Cara download film gratis di laptop berikutnya bisa menggunakan layanan on demand service. Sudah banyak situs yang menyediakan layanan untuk mengunduh/membeli film secara satu per satu tanpa perlu berlangganan. Layanan tersebut dikenal dengan nama on demand service. iTunes dan Google Play menyediakan layanan itu dan anda bisa dengan mudah mengunduh film dengan mudah.</p>
-<p>VIU juga merupakan aplikasi streaming film yang banyak digunakan pecinta film untuk menyaksikan berbagai film atau drama. Di aplikasi ini, Anda juga bisa mengunduh film yang disukai untuk bisa disaksikan saat offline. Berikut ini cara untuk download film melalui VIU:</p>
-<p></p>
-<p>Masih ada pilihan bagi anda yang ingin menyaksikan film secara offline dari hp. Disney+ Hotstar bisa menjadi pilihan berikutnya. Untuk mengunduh film dari aplikasi ini, Anda bisa mengikuti cara sebagai berikut:</p>
-<p>Namun, sangat jarang sekali aplikasi yang menyediakan tontonan secara gratis. Banyak aplikasi yang menawarkan berbagai film terlengkap namun mereka harus berlangganan terlebih dahulu dan yang pasti harus mengeluarkan sejumlah uang. Bagi yang hobi nonton, terkadang mereka ini gratisan namun ingin juga menonton dengan film terlengkap dan penuh dengan genre yang bervariasi.</p>
-<p>Layarkaca21 Apk merupakan sebuah aplikasi yang menawarkan layanan nonton film secara gratis. Sebaiknya kalian download aplikasi ini agar bisa menikmati berbagai fitur yang ada didalamnya. Pasalnya semua fitur yang diberikan oleh aplikasi ini dapat membantu kalian lebih mudah menikmati semua layanan tontonan yang ada didalam aplikasinya, salah satunya fitur download film gratis.</p>
-<p>Selain itu, aplikasi ini juga sudah dilengkapi dengan fitur bahasa Indonesia. Jadi ketika kalian nonton film Hollywood, akan mengerti alur cerita yang disajikan oleh film yang kalian tonton. Aplikasi ini selalu update, dimana setiap harinya selalu menambahkan berbagai film untuk kesenangan dan hiburan para pengguna. Baik yang populer ataupun yang sedang tren, kalian bisa menikmati filmnya secara gratis.</p>
-<p>Terdapat beberapa fitur unggulan yang menari didalam Layarkaca21 Apk ini. Dimana semua fitur tersebut bisa kalian nikmati setelah mengunduh aplikasinya. Penasaran fitur apa saja yang dibagikan oleh aplikasi nonton film gratis ini? Kalian simak langsung selengkapnya sebagai berikut :</p>
-<p>Pastinya kalian tahu betul, biasanya aplikasi penyedia layanan nonton film secara gratis tidak memperhatikan kualitas video pada filmnya. Dimana gambar dari videonya terlihat pecah dan menonton pun sangat terganggu. Namun, ketika kalian menggunakan aplikasi Layarkaca21 ini, aplikasi tersebut telah dilengkapi dengan kualitas film yang sudah HD atau Bluray 1080p dan sangat jernih.</p>
-<p>Meskipun aplikasi nonton film ini menawarkan layanan gratis, namun setiap judul yang dibagikan selalu up to date. Dimana setiap waktu aplikasi tersebut selalu menawarkan dengan judul terbaru dan terupdate. Dengan adanya fitur koleksi film terupdate ini, sebagai pengguna tentunya tidak akan ketinggalan berbagai film baru dan selalu siap untuk menontonnya.</p>
-<p>Fitur ini sangat cocok bagi kalian yang suka menonton offline atau tidak memiliki kuota. Ketika mengakses aplikasi nonton gratis ini, kalian bisa download berbagai judul film secara bebas. Dimana kalian bisa langsung mengundunya dan mendapatkannya sangat cepat. Fitur ini dapat kalian manfaatkan ketika tidak memiliki kuota internet, namun tetap bisa melanjutkan tontonannya.</p>
-<p>Itulah penjelasan singkat tentang Layarkaca21 Apk yang memudahkan kalian untuk bisa menonton berbagai film favorit secara gratis. Nikmati semua tontonan yang ada didalam aplikasinya dengan gambar dan suara HD. Langsung saja kalian download aplikasinya pada link yang sudah di tautkan diatas. Terimakasih.</p>
-<p>Karena itu, di sinilah kami akan berikan link situs download film bioskop gratis dan legal yang layak untuk Anda tonton dan sayang jika dilewatkan saat sedang sendirian di kamar ataupun saat sedang bersama teman-teman</p>
-<p>Nonton film dan series gratis kini bisa di Vidio. Beragam film terbaik, mulai dari <strong>Film Indonesia</strong>, <strong>Film Korea terbaru</strong>, Film India, Film Thailand hingga film Barat bisa kamu tonton sepuasnya di sini.</p>
-<p>Selain film, kamu juga bisa nonton deretan series terbaik di sini. Mulai dari drama dan <strong>Film Korea Romantis</strong> hingga original series Vidio bisa kamu tonton gratis. Setiap bulannya, kamu bisa menikmati deretan drama korea gratis di Vidio.</p>
-<p><strong>Original series Vidio </strong>merupakan salah satu konten serial terbaik yang wajib kamu tonton. Ada I Heart You, Turn On, World of Dr. Boyke hingga Serigala Terakhir, yang menjadi series original Vidio terpopuler. Nonton film dan series gratis kini semakin lengkap!</p>
-<p>Selain film dan serial, apa lagi tontonan lainnya yang bisa disaksikan gratis? Ya, tentu bukan hanya film dan serial, kamu juga bisa live streaming gratis di Vidio. Mulai dari acara musik, reality show, video menarik hingga live streaming TV terpopuler.</p>
-<p>Cinema Box merupakan penyedia jasa streaming film gratis yang bisa Anda unduh di Play Store maupun App Store. Tidak hanya bisa menontonnya secara langsung, Anda juga bisa mengunduh film yang ingin ditonton dan menontonnya secara offline.</p>
-<p>Viu menjadi salah satu film yang dapat Anda tonton secara gratis. Selain itu, Viu juga memiliki film ekslusif yang banyak digandrungi. Anda bisa menyaksikan film-film di Viu secara gratis dengan mengunjungi laman www.viu.com atau bisa juga dengan mendunduh aplikasi Viu di Play Store ataupun App Store.</p>
-<p>Terdapat pula konten-konten film, series dan anime yang bisa Anda saksikan gratis dengan iklan atau tanpa iklan. Anda bisa mengakses akun VIP atau Premium untuk menikmati seluruh konten yang tersedia di aplikasi VIU dengan biaya mulai dari Rp 30.000 saja untuk satu bulan.</p>
-<p>Bagi Anda yang pecinta drama Korea, maka Viki merupakan pilihan yang tepat bagi Anda. Aplikasi ini sudah banyak diunduh oleh lebih dari 10 juta penggunanya. Viki menyajikan film-film Korea yang bisa Anda saksikan secara gratis maupun berlangganan.</p>
-<p>CatchPlay+ merupakan salah satu situs nonton film gratis dan legal yang direkomendasikan untuk Anda. Situs ini banyak menyajikan film yang bisa Anda tonton secara keluarga. Ada beberapa film juga yang bisa Anda tonton secara gratis. Nilai dari action, komedi, hingga drama semuanya terjadi di CatchPlay+.</p>
-<p>We TV merupakan salah satu aplikasi dan situs nonton film gratis yang paling banyak dikenal. Tidak mengherankan, selain menyajikan tontonan yang bisa ditonton secara gratis, We TV juga memiliki beberapa konten film yang eksklusif dan sukses meramaikan pasar perfilman Indonesia.</p>
-<p>Daripada harus nonton film secara ilegal di situs IndoXXI atau LK21, lebih baik Anda gunakan aplikasi streaming film Vidio. Di sini terdapat pilihan tontonan yang bisa diakses secara gratis.Mulai dari pilihan film, series hingga sinetron Indonesia.</p>
-<p>Genflix merupakan situs streaming film online gratis dan legal yang menyajikan banyak pilihan film-film terbaru. Tentunya aplikasi ini lebih baik dibandingkan harus mengambil resiko pencurian data jika menonton di situs ilegal IndoXXI.</p>
-<p>iFlix merupakan layanan nonton film gratis dan legal yang berpusat di negara Malaysia. iFlix banyak menyajikan film-film box office, serial TV, Drama Korea, series, dan film Indonesia terbaik hingga tontonan khusus anak-anak.</p>
-<p>iQIYI merupakan aplikasi streaming film gratis dan legal. Ada juga tontonan drama dan anime Asia yang bisa Anda nikmati tanpa harus takut terkena malware. Ada beberapa tontonan yang bisa disaksikan secara gratis. Jika Anda ingin menonton seluruh film, series atau anime yang ada di iQIYI, Anda dapat mengaktifkan akun VIP.</p>
-<p>Seperti yang diketahui, aktivitas menonton film sekarang ini sangat mudah dilakukan dengan adanya teknologi internet. Siapapun tidak perlu lagi harus jauh-jauh untuk datang di bioskop dalam menikmati film-film terbaru. Cukup dengan menggunakan Rebahin semuanya bisa dinikmati secara gratis.</p> aaccfb2cb3<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/cleanmaster/akagi-sovits3/vdecoder/hifigan/env.py b/spaces/cleanmaster/akagi-sovits3/vdecoder/hifigan/env.py
deleted file mode 100644
index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000
--- a/spaces/cleanmaster/akagi-sovits3/vdecoder/hifigan/env.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import os
-import shutil
-
-
-class AttrDict(dict):
-    def __init__(self, *args, **kwargs):
-        super(AttrDict, self).__init__(*args, **kwargs)
-        self.__dict__ = self
-
-
-def build_env(config, config_name, path):
-    t_path = os.path.join(path, config_name)
-    if config != t_path:
-        os.makedirs(path, exist_ok=True)
-        shutil.copyfile(config, os.path.join(path, config_name))
diff --git a/spaces/cleanmaster/so-vits-svc-akagi/spec_gen.py b/spaces/cleanmaster/so-vits-svc-akagi/spec_gen.py
deleted file mode 100644
index 85ad3188ac93aaef7b1b1d7dbbe47d358f4b0da6..0000000000000000000000000000000000000000
--- a/spaces/cleanmaster/so-vits-svc-akagi/spec_gen.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from data_utils import TextAudioSpeakerLoader, EvalDataLoader
-import json
-from tqdm import tqdm
-
-from utils import HParams
-
-config_path = 'configs/config.json'
-with open(config_path, "r") as f:
-    data = f.read()
-config = json.loads(data)
-hps = HParams(**config)
-
-train_dataset = TextAudioSpeakerLoader("filelists/train.txt", hps)
-test_dataset = TextAudioSpeakerLoader("filelists/test.txt", hps)
-eval_dataset = TextAudioSpeakerLoader("filelists/val.txt", hps)
-
-for _ in tqdm(train_dataset):
-    pass
-for _ in tqdm(eval_dataset):
-    pass
-for _ in tqdm(test_dataset):
-    pass
\ No newline at end of file
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageDraw2.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageDraw2.py
deleted file mode 100644
index 7ce0224a67c7197a763c61f3739665cf19f23b60..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageDraw2.py
+++ /dev/null
@@ -1,193 +0,0 @@
-#
-# The Python Imaging Library
-# $Id$
-#
-# WCK-style drawing interface operations
-#
-# History:
-# 2003-12-07 fl   created
-# 2005-05-15 fl   updated; added to PIL as ImageDraw2
-# 2005-05-15 fl   added text support
-# 2005-05-20 fl   added arc/chord/pieslice support
-#
-# Copyright (c) 2003-2005 by Secret Labs AB
-# Copyright (c) 2003-2005 by Fredrik Lundh
-#
-# See the README file for information on usage and redistribution.
-#
-
-
-"""
-(Experimental) WCK-style drawing interface operations
-
-.. seealso:: :py:mod:`PIL.ImageDraw`
-"""
-
-
-from . import Image, ImageColor, ImageDraw, ImageFont, ImagePath
-
-
-class Pen:
-    """Stores an outline color and width."""
-
-    def __init__(self, color, width=1, opacity=255):
-        self.color = ImageColor.getrgb(color)
-        self.width = width
-
-
-class Brush:
-    """Stores a fill color"""
-
-    def __init__(self, color, opacity=255):
-        self.color = ImageColor.getrgb(color)
-
-
-class Font:
-    """Stores a TrueType font and color"""
-
-    def __init__(self, color, file, size=12):
-        # FIXME: add support for bitmap fonts
-        self.color = ImageColor.getrgb(color)
-        self.font = ImageFont.truetype(file, size)
-
-
-class Draw:
-    """
-    (Experimental) WCK-style drawing interface
-    """
-
-    def __init__(self, image, size=None, color=None):
-        if not hasattr(image, "im"):
-            image = Image.new(image, size, color)
-        self.draw = ImageDraw.Draw(image)
-        self.image = image
-        self.transform = None
-
-    def flush(self):
-        return self.image
-
-    def render(self, op, xy, pen, brush=None):
-        # handle color arguments
-        outline = fill = None
-        width = 1
-        if isinstance(pen, Pen):
-            outline = pen.color
-            width = pen.width
-        elif isinstance(brush, Pen):
-            outline = brush.color
-            width = brush.width
-        if isinstance(brush, Brush):
-            fill = brush.color
-        elif isinstance(pen, Brush):
-            fill = pen.color
-        # handle transformation
-        if self.transform:
-            xy = ImagePath.Path(xy)
-            xy.transform(self.transform)
-        # render the item
-        if op == "line":
-            self.draw.line(xy, fill=outline, width=width)
-        else:
-            getattr(self.draw, op)(xy, fill=fill, outline=outline)
-
-    def settransform(self, offset):
-        """Sets a transformation offset."""
-        (xoffset, yoffset) = offset
-        self.transform = (1, 0, xoffset, 0, 1, yoffset)
-
-    def arc(self, xy, start, end, *options):
-        """
-        Draws an arc (a portion of a circle outline) between the start and end
-        angles, inside the given bounding box.
-
-        .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.arc`
-        """
-        self.render("arc", xy, start, end, *options)
-
-    def chord(self, xy, start, end, *options):
-        """
-        Same as :py:meth:`~PIL.ImageDraw2.Draw.arc`, but connects the end points
-        with a straight line.
-
-        .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.chord`
-        """
-        self.render("chord", xy, start, end, *options)
-
-    def ellipse(self, xy, *options):
-        """
-        Draws an ellipse inside the given bounding box.
-
-        .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.ellipse`
-        """
-        self.render("ellipse", xy, *options)
-
-    def line(self, xy, *options):
-        """
-        Draws a line between the coordinates in the ``xy`` list.
-
-        .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.line`
-        """
-        self.render("line", xy, *options)
-
-    def pieslice(self, xy, start, end, *options):
-        """
-        Same as arc, but also draws straight lines between the end points and the
-        center of the bounding box.
-
-        .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.pieslice`
-        """
-        self.render("pieslice", xy, start, end, *options)
-
-    def polygon(self, xy, *options):
-        """
-        Draws a polygon.
-
-        The polygon outline consists of straight lines between the given
-        coordinates, plus a straight line between the last and the first
-        coordinate.
-
-
-        .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.polygon`
-        """
-        self.render("polygon", xy, *options)
-
-    def rectangle(self, xy, *options):
-        """
-        Draws a rectangle.
-
-        .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.rectangle`
-        """
-        self.render("rectangle", xy, *options)
-
-    def text(self, xy, text, font):
-        """
-        Draws the string at the given position.
-
-        .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.text`
-        """
-        if self.transform:
-            xy = ImagePath.Path(xy)
-            xy.transform(self.transform)
-        self.draw.text(xy, text, font=font.font, fill=font.color)
-
-    def textbbox(self, xy, text, font):
-        """
-        Returns bounding box (in pixels) of given text.
-
-        :return: ``(left, top, right, bottom)`` bounding box
-
-        .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textbbox`
-        """
-        if self.transform:
-            xy = ImagePath.Path(xy)
-            xy.transform(self.transform)
-        return self.draw.textbbox(xy, text, font=font.font)
-
-    def textlength(self, text, font):
-        """
-        Returns length (in pixels) of given text.
-        This is the amount by which following text should be offset.
-
-        .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textlength`
-        """
-        return self.draw.textlength(text, font=font.font)
diff --git a/spaces/cncn102/bingo1/src/components/ui/icons.tsx b/spaces/cncn102/bingo1/src/components/ui/icons.tsx
deleted file mode 100644
index 742b489b50437c5b64c86082f2ebc712eeb6a2b0..0000000000000000000000000000000000000000
--- a/spaces/cncn102/bingo1/src/components/ui/icons.tsx
+++ /dev/null
@@ -1,504 +0,0 @@
-'use client'
-
-import * as React from 'react'
-
-import { cn } from '@/lib/utils'
-
-function IconNextChat({
-  className,
-  inverted,
-  ...props
-}: React.ComponentProps<'svg'> & { inverted?: boolean }) {
-  const id = React.useId()
-
-  return (
-    <svg
-      viewBox="0 0 17 17"
-      fill="none"
-      xmlns="http://www.w3.org/2000/svg"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <defs>
-        <linearGradient
-          id={`gradient-${id}-1`}
-          x1="10.6889"
-          y1="10.3556"
-          x2="13.8445"
-          y2="14.2667"
-          gradientUnits="userSpaceOnUse"
-        >
-          <stop stopColor={inverted ? 'white' : 'black'} />
-          <stop
-            offset={1}
-            stopColor={inverted ? 'white' : 'black'}
-            stopOpacity={0}
-          />
-        </linearGradient>
-        <linearGradient
-          id={`gradient-${id}-2`}
-          x1="11.7555"
-          y1="4.8"
-          x2="11.7376"
-          y2="9.50002"
-          gradientUnits="userSpaceOnUse"
-        >
-          <stop stopColor={inverted ? 'white' : 'black'} />
-          <stop
-            offset={1}
-            stopColor={inverted ? 'white' : 'black'}
-            stopOpacity={0}
-          />
-        </linearGradient>
-      </defs>
-      <path
-        d="M1 16L2.58314 11.2506C1.83084 9.74642 1.63835 8.02363 2.04013 6.39052C2.4419 4.75741 3.41171 3.32057 4.776 2.33712C6.1403 1.35367 7.81003 0.887808 9.4864 1.02289C11.1628 1.15798 12.7364 1.8852 13.9256 3.07442C15.1148 4.26363 15.842 5.83723 15.9771 7.5136C16.1122 9.18997 15.6463 10.8597 14.6629 12.224C13.6794 13.5883 12.2426 14.5581 10.6095 14.9599C8.97637 15.3616 7.25358 15.1692 5.74942 14.4169L1 16Z"
-        fill={inverted ? 'black' : 'white'}
-        stroke={inverted ? 'black' : 'white'}
-        strokeWidth={2}
-        strokeLinecap="round"
-        strokeLinejoin="round"
-      />
-      <mask
-        id="mask0_91_2047"
-        style={{ maskType: 'alpha' }}
-        maskUnits="userSpaceOnUse"
-        x={1}
-        y={0}
-        width={16}
-        height={16}
-      >
-        <circle cx={9} cy={8} r={8} fill={inverted ? 'black' : 'white'} />
-      </mask>
-      <g mask="url(#mask0_91_2047)">
-        <circle cx={9} cy={8} r={8} fill={inverted ? 'black' : 'white'} />
-        <path
-          d="M14.2896 14.0018L7.146 4.8H5.80005V11.1973H6.87681V6.16743L13.4444 14.6529C13.7407 14.4545 14.0231 14.2369 14.2896 14.0018Z"
-          fill={`url(#gradient-${id}-1)`}
-        />
-        <rect
-          x="11.2222"
-          y="4.8"
-          width="1.06667"
-          height="6.4"
-          fill={`url(#gradient-${id}-2)`}
-        />
-      </g>
-    </svg>
-  )
-}
-
-function IconOpenAI({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      fill="currentColor"
-      viewBox="0 0 24 24"
-      role="img"
-      xmlns="http://www.w3.org/2000/svg"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <title>OpenAI icon</title>
-      <path d="M22.2819 9.8211a5.9847 5.9847 0 0 0-.5157-4.9108 6.0462 6.0462 0 0 0-6.5098-2.9A6.0651 6.0651 0 0 0 4.9807 4.1818a5.9847 5.9847 0 0 0-3.9977 2.9 6.0462 6.0462 0 0 0 .7427 7.0966 5.98 5.98 0 0 0 .511 4.9107 6.051 6.051 0 0 0 6.5146 2.9001A5.9847 5.9847 0 0 0 13.2599 24a6.0557 6.0557 0 0 0 5.7718-4.2058 5.9894 5.9894 0 0 0 3.9977-2.9001 6.0557 6.0557 0 0 0-.7475-7.0729zm-9.022 12.6081a4.4755 4.4755 0 0 1-2.8764-1.0408l.1419-.0804 4.7783-2.7582a.7948.7948 0 0 0 .3927-.6813v-6.7369l2.02 1.1686a.071.071 0 0 1 .038.052v5.5826a4.504 4.504 0 0 1-4.4945 4.4944zm-9.6607-4.1254a4.4708 4.4708 0 0 1-.5346-3.0137l.142.0852 4.783 2.7582a.7712.7712 0 0 0 .7806 0l5.8428-3.3685v2.3324a.0804.0804 0 0 1-.0332.0615L9.74 19.9502a4.4992 4.4992 0 0 1-6.1408-1.6464zM2.3408 7.8956a4.485 4.485 0 0 1 2.3655-1.9728V11.6a.7664.7664 0 0 0 .3879.6765l5.8144 3.3543-2.0201 1.1685a.0757.0757 0 0 1-.071 0l-4.8303-2.7865A4.504 4.504 0 0 1 2.3408 7.872zm16.5963 3.8558L13.1038 8.364 15.1192 7.2a.0757.0757 0 0 1 .071 0l4.8303 2.7913a4.4944 4.4944 0 0 1-.6765 8.1042v-5.6772a.79.79 0 0 0-.407-.667zm2.0107-3.0231l-.142-.0852-4.7735-2.7818a.7759.7759 0 0 0-.7854 0L9.409 9.2297V6.8974a.0662.0662 0 0 1 .0284-.0615l4.8303-2.7866a4.4992 4.4992 0 0 1 6.6802 4.66zM8.3065 12.863l-2.02-1.1638a.0804.0804 0 0 1-.038-.0567V6.0742a4.4992 4.4992 0 0 1 7.3757-3.4537l-.142.0805L8.704 5.459a.7948.7948 0 0 0-.3927.6813zm1.0976-2.3654l2.602-1.4998 2.6069 1.4998v2.9994l-2.5974 1.4997-2.6067-1.4997Z" />
-    </svg>
-  )
-}
-
-function IconGitHub({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      role="img"
-      viewBox="0 0 24 24"
-      xmlns="http://www.w3.org/2000/svg"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <title>GitHub</title>
-      <path d="M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12" />
-    </svg>
-  )
-}
-
-function IconSeparator({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      fill="none"
-      shapeRendering="geometricPrecision"
-      stroke="currentColor"
-      strokeLinecap="round"
-      strokeLinejoin="round"
-      strokeWidth="1"
-      viewBox="0 0 24 24"
-      aria-hidden="true"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M16.88 3.549L7.12 20.451"></path>
-    </svg>
-  )
-}
-
-function IconArrowDown({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="m205.66 149.66-72 72a8 8 0 0 1-11.32 0l-72-72a8 8 0 0 1 11.32-11.32L120 196.69V40a8 8 0 0 1 16 0v156.69l58.34-58.35a8 8 0 0 1 11.32 11.32Z" />
-    </svg>
-  )
-}
-
-function IconArrowRight({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="m221.66 133.66-72 72a8 8 0 0 1-11.32-11.32L196.69 136H40a8 8 0 0 1 0-16h156.69l-58.35-58.34a8 8 0 0 1 11.32-11.32l72 72a8 8 0 0 1 0 11.32Z" />
-    </svg>
-  )
-}
-
-function IconUser({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M230.92 212c-15.23-26.33-38.7-45.21-66.09-54.16a72 72 0 1 0-73.66 0c-27.39 8.94-50.86 27.82-66.09 54.16a8 8 0 1 0 13.85 8c18.84-32.56 52.14-52 89.07-52s70.23 19.44 89.07 52a8 8 0 1 0 13.85-8ZM72 96a56 56 0 1 1 56 56 56.06 56.06 0 0 1-56-56Z" />
-    </svg>
-  )
-}
-
-function IconPlus({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M224 128a8 8 0 0 1-8 8h-80v80a8 8 0 0 1-16 0v-80H40a8 8 0 0 1 0-16h80V40a8 8 0 0 1 16 0v80h80a8 8 0 0 1 8 8Z" />
-    </svg>
-  )
-}
-
-function IconArrowElbow({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M200 32v144a8 8 0 0 1-8 8H67.31l34.35 34.34a8 8 0 0 1-11.32 11.32l-48-48a8 8 0 0 1 0-11.32l48-48a8 8 0 0 1 11.32 11.32L67.31 168H184V32a8 8 0 0 1 16 0Z" />
-    </svg>
-  )
-}
-
-function IconSpinner({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4 animate-spin', className)}
-      {...props}
-    >
-      <path d="M232 128a104 104 0 0 1-208 0c0-41 23.81-78.36 60.66-95.27a8 8 0 0 1 6.68 14.54C60.15 61.59 40 93.27 40 128a88 88 0 0 0 176 0c0-34.73-20.15-66.41-51.34-80.73a8 8 0 0 1 6.68-14.54C208.19 49.64 232 87 232 128Z" />
-    </svg>
-  )
-}
-
-function IconMessage({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M216 48H40a16 16 0 0 0-16 16v160a15.84 15.84 0 0 0 9.25 14.5A16.05 16.05 0 0 0 40 240a15.89 15.89 0 0 0 10.25-3.78.69.69 0 0 0 .13-.11L82.5 208H216a16 16 0 0 0 16-16V64a16 16 0 0 0-16-16ZM40 224Zm176-32H82.5a16 16 0 0 0-10.3 3.75l-.12.11L40 224V64h176Z" />
-    </svg>
-  )
-}
-
-function IconTrash({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M216 48h-40v-8a24 24 0 0 0-24-24h-48a24 24 0 0 0-24 24v8H40a8 8 0 0 0 0 16h8v144a16 16 0 0 0 16 16h128a16 16 0 0 0 16-16V64h8a8 8 0 0 0 0-16ZM96 40a8 8 0 0 1 8-8h48a8 8 0 0 1 8 8v8H96Zm96 168H64V64h128Zm-80-104v64a8 8 0 0 1-16 0v-64a8 8 0 0 1 16 0Zm48 0v64a8 8 0 0 1-16 0v-64a8 8 0 0 1 16 0Z" />
-    </svg>
-  )
-}
-
-function IconMore({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      viewBox="0 0 24 24"
-      xmlns="http://www.w3.org/2000/svg"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M7.75 12C7.75 12.9665 6.9665 13.75 6 13.75C5.0335 13.75 4.25 12.9665 4.25 12C4.25 11.0335 5.0335 10.25 6 10.25C6.9665 10.25 7.75 11.0335 7.75 12ZM13.75 12C13.75 12.9665 12.9665 13.75 12 13.75C11.0335 13.75 10.25 12.9665 10.25 12C10.25 11.0335 11.0335 10.25 12 10.25C12.9665 10.25 13.75 11.0335 13.75 12ZM18 13.75C18.9665 13.75 19.75 12.9665 19.75 12C19.75 11.0335 18.9665 10.25 18 10.25C17.0335 10.25 16.25 11.0335 16.25 12C16.25 12.9665 17.0335 13.75 18 13.75Z"></path>
-    </svg>
-  )
-}
-
-function IconRefresh({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M197.67 186.37a8 8 0 0 1 0 11.29C196.58 198.73 170.82 224 128 224c-37.39 0-64.53-22.4-80-39.85V208a8 8 0 0 1-16 0v-48a8 8 0 0 1 8-8h48a8 8 0 0 1 0 16H55.44C67.76 183.35 93 208 128 208c36 0 58.14-21.46 58.36-21.68a8 8 0 0 1 11.31.05ZM216 40a8 8 0 0 0-8 8v23.85C192.53 54.4 165.39 32 128 32c-42.82 0-68.58 25.27-69.66 26.34a8 8 0 0 0 11.3 11.34C69.86 69.46 92 48 128 48c35 0 60.24 24.65 72.56 40H168a8 8 0 0 0 0 16h48a8 8 0 0 0 8-8V48a8 8 0 0 0-8-8Z" />
-    </svg>
-  )
-}
-
-function IconStop({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M128 24a104 104 0 1 0 104 104A104.11 104.11 0 0 0 128 24Zm0 192a88 88 0 1 1 88-88 88.1 88.1 0 0 1-88 88Zm24-120h-48a8 8 0 0 0-8 8v48a8 8 0 0 0 8 8h48a8 8 0 0 0 8-8v-48a8 8 0 0 0-8-8Zm-8 48h-32v-32h32Z" />
-    </svg>
-  )
-}
-
-function IconSidebar({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M216 40H40a16 16 0 0 0-16 16v144a16 16 0 0 0 16 16h176a16 16 0 0 0 16-16V56a16 16 0 0 0-16-16ZM40 56h40v144H40Zm176 144H96V56h120v144Z" />
-    </svg>
-  )
-}
-
-function IconMoon({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M233.54 142.23a8 8 0 0 0-8-2 88.08 88.08 0 0 1-109.8-109.8 8 8 0 0 0-10-10 104.84 104.84 0 0 0-52.91 37A104 104 0 0 0 136 224a103.09 103.09 0 0 0 62.52-20.88 104.84 104.84 0 0 0 37-52.91 8 8 0 0 0-1.98-7.98Zm-44.64 48.11A88 88 0 0 1 65.66 67.11a89 89 0 0 1 31.4-26A106 106 0 0 0 96 56a104.11 104.11 0 0 0 104 104 106 106 0 0 0 14.92-1.06 89 89 0 0 1-26.02 31.4Z" />
-    </svg>
-  )
-}
-
-function IconSun({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M120 40V16a8 8 0 0 1 16 0v24a8 8 0 0 1-16 0Zm72 88a64 64 0 1 1-64-64 64.07 64.07 0 0 1 64 64Zm-16 0a48 48 0 1 0-48 48 48.05 48.05 0 0 0 48-48ZM58.34 69.66a8 8 0 0 0 11.32-11.32l-16-16a8 8 0 0 0-11.32 11.32Zm0 116.68-16 16a8 8 0 0 0 11.32 11.32l16-16a8 8 0 0 0-11.32-11.32ZM192 72a8 8 0 0 0 5.66-2.34l16-16a8 8 0 0 0-11.32-11.32l-16 16A8 8 0 0 0 192 72Zm5.66 114.34a8 8 0 0 0-11.32 11.32l16 16a8 8 0 0 0 11.32-11.32ZM48 128a8 8 0 0 0-8-8H16a8 8 0 0 0 0 16h24a8 8 0 0 0 8-8Zm80 80a8 8 0 0 0-8 8v24a8 8 0 0 0 16 0v-24a8 8 0 0 0-8-8Zm112-88h-24a8 8 0 0 0 0 16h24a8 8 0 0 0 0-16Z" />
-    </svg>
-  )
-}
-
-function IconCopy({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M216 32H88a8 8 0 0 0-8 8v40H40a8 8 0 0 0-8 8v128a8 8 0 0 0 8 8h128a8 8 0 0 0 8-8v-40h40a8 8 0 0 0 8-8V40a8 8 0 0 0-8-8Zm-56 176H48V96h112Zm48-48h-32V88a8 8 0 0 0-8-8H96V48h112Z" />
-    </svg>
-  )
-}
-
-function IconCheck({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="m229.66 77.66-128 128a8 8 0 0 1-11.32 0l-56-56a8 8 0 0 1 11.32-11.32L96 188.69 218.34 66.34a8 8 0 0 1 11.32 11.32Z" />
-    </svg>
-  )
-}
-
-function IconDownload({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M224 152v56a16 16 0 0 1-16 16H48a16 16 0 0 1-16-16v-56a8 8 0 0 1 16 0v56h160v-56a8 8 0 0 1 16 0Zm-101.66 5.66a8 8 0 0 0 11.32 0l40-40a8 8 0 0 0-11.32-11.32L136 132.69V40a8 8 0 0 0-16 0v92.69l-26.34-26.35a8 8 0 0 0-11.32 11.32Z" />
-    </svg>
-  )
-}
-
-function IconClose({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      viewBox="0 0 256 256"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path d="M205.66 194.34a8 8 0 0 1-11.32 11.32L128 139.31l-66.34 66.35a8 8 0 0 1-11.32-11.32L116.69 128 50.34 61.66a8 8 0 0 1 11.32-11.32L128 116.69l66.34-66.35a8 8 0 0 1 11.32 11.32L139.31 128Z" />
-    </svg>
-  )
-}
-
-function IconEdit({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      fill="none"
-      viewBox="0 0 24 24"
-      strokeWidth={1.5}
-      stroke="currentColor"
-      className={cn('h-4 w-4', className)}
-      {...props}
-    >
-      <path
-        strokeLinecap="round"
-        strokeLinejoin="round"
-        d="M16.862 4.487l1.687-1.688a1.875 1.875 0 112.652 2.652L10.582 16.07a4.5 4.5 0 01-1.897 1.13L6 18l.8-2.685a4.5 4.5 0 011.13-1.897l8.932-8.931zm0 0L19.5 7.125M18 14v4.75A2.25 2.25 0 0115.75 21H5.25A2.25 2.25 0 013 18.75V8.25A2.25 2.25 0 015.25 6H10"
-      />
-    </svg>
-  )
-}
-
-function IconShare({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      viewBox="0 0 256 256"
-      {...props}
-    >
-      <path d="m237.66 106.35-80-80A8 8 0 0 0 144 32v40.35c-25.94 2.22-54.59 14.92-78.16 34.91-28.38 24.08-46.05 55.11-49.76 87.37a12 12 0 0 0 20.68 9.58c11-11.71 50.14-48.74 107.24-52V192a8 8 0 0 0 13.66 5.65l80-80a8 8 0 0 0 0-11.3ZM160 172.69V144a8 8 0 0 0-8-8c-28.08 0-55.43 7.33-81.29 21.8a196.17 196.17 0 0 0-36.57 26.52c5.8-23.84 20.42-46.51 42.05-64.86C99.41 99.77 127.75 88 152 88a8 8 0 0 0 8-8V51.32L220.69 112Z" />
-    </svg>
-  )
-}
-
-function IconUsers({ className, ...props }: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      viewBox="0 0 256 256"
-      {...props}
-    >
-      <path d="M117.25 157.92a60 60 0 1 0-66.5 0 95.83 95.83 0 0 0-47.22 37.71 8 8 0 1 0 13.4 8.74 80 80 0 0 1 134.14 0 8 8 0 0 0 13.4-8.74 95.83 95.83 0 0 0-47.22-37.71ZM40 108a44 44 0 1 1 44 44 44.05 44.05 0 0 1-44-44Zm210.14 98.7a8 8 0 0 1-11.07-2.33A79.83 79.83 0 0 0 172 168a8 8 0 0 1 0-16 44 44 0 1 0-16.34-84.87 8 8 0 1 1-5.94-14.85 60 60 0 0 1 55.53 105.64 95.83 95.83 0 0 1 47.22 37.71 8 8 0 0 1-2.33 11.07Z" />
-    </svg>
-  )
-}
-
-function IconExternalLink({
-  className,
-  ...props
-}: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      viewBox="0 0 256 256"
-      {...props}
-    >
-      <path d="M224 104a8 8 0 0 1-16 0V59.32l-66.33 66.34a8 8 0 0 1-11.32-11.32L196.68 48H152a8 8 0 0 1 0-16h64a8 8 0 0 1 8 8Zm-40 24a8 8 0 0 0-8 8v72H48V80h72a8 8 0 0 0 0-16H48a16 16 0 0 0-16 16v128a16 16 0 0 0 16 16h128a16 16 0 0 0 16-16v-72a8 8 0 0 0-8-8Z" />
-    </svg>
-  )
-}
-
-function IconChevronUpDown({
-  className,
-  ...props
-}: React.ComponentProps<'svg'>) {
-  return (
-    <svg
-      xmlns="http://www.w3.org/2000/svg"
-      fill="currentColor"
-      className={cn('h-4 w-4', className)}
-      viewBox="0 0 256 256"
-      {...props}
-    >
-      <path d="M181.66 170.34a8 8 0 0 1 0 11.32l-48 48a8 8 0 0 1-11.32 0l-48-48a8 8 0 0 1 11.32-11.32L128 212.69l42.34-42.35a8 8 0 0 1 11.32 0Zm-96-84.68L128 43.31l42.34 42.35a8 8 0 0 0 11.32-11.32l-48-48a8 8 0 0 0-11.32 0l-48 48a8 8 0 0 0 11.32 11.32Z" />
-    </svg>
-  )
-}
-
-export {
-  IconEdit,
-  IconNextChat,
-  IconOpenAI,
-  IconGitHub,
-  IconSeparator,
-  IconArrowDown,
-  IconArrowRight,
-  IconUser,
-  IconPlus,
-  IconArrowElbow,
-  IconSpinner,
-  IconMessage,
-  IconTrash,
-  IconMore,
-  IconRefresh,
-  IconStop,
-  IconSidebar,
-  IconMoon,
-  IconSun,
-  IconCopy,
-  IconCheck,
-  IconDownload,
-  IconClose,
-  IconShare,
-  IconUsers,
-  IconExternalLink,
-  IconChevronUpDown
-}
diff --git a/spaces/codelion/Grounding_DINO_demo/setup.py b/spaces/codelion/Grounding_DINO_demo/setup.py
deleted file mode 100644
index a045b763fb4a4f61bac23b735544a18ffc68d20a..0000000000000000000000000000000000000000
--- a/spaces/codelion/Grounding_DINO_demo/setup.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# coding=utf-8
-# Copyright 2022 The IDEA Authors. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ------------------------------------------------------------------------------------------------
-# Modified from
-# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/setup.py
-# https://github.com/facebookresearch/detectron2/blob/main/setup.py
-# https://github.com/open-mmlab/mmdetection/blob/master/setup.py
-# https://github.com/Oneflow-Inc/libai/blob/main/setup.py
-# ------------------------------------------------------------------------------------------------
-
-import glob
-import os
-import subprocess
-
-import torch
-from setuptools import find_packages, setup
-from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
-
-# groundingdino version info
-version = "0.1.0"
-package_name = "groundingdino"
-cwd = os.path.dirname(os.path.abspath(__file__))
-
-
-sha = "Unknown"
-try:
-    sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode("ascii").strip()
-except Exception:
-    pass
-
-
-def write_version_file():
-    version_path = os.path.join(cwd, "groundingdino", "version.py")
-    with open(version_path, "w") as f:
-        f.write(f"__version__ = '{version}'\n")
-        # f.write(f"git_version = {repr(sha)}\n")
-
-
-requirements = ["torch", "torchvision"]
-
-torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
-
-
-def get_extensions():
-    this_dir = os.path.dirname(os.path.abspath(__file__))
-    extensions_dir = os.path.join(this_dir, "groundingdino", "models", "GroundingDINO", "csrc")
-
-    main_source = os.path.join(extensions_dir, "vision.cpp")
-    sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp"))
-    source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu")) + glob.glob(
-        os.path.join(extensions_dir, "*.cu")
-    )
-
-    sources = [main_source] + sources
-
-    extension = CppExtension
-
-    extra_compile_args = {"cxx": []}
-    define_macros = []
-
-    if torch.cuda.is_available() and CUDA_HOME is not None:
-        print("Compiling with CUDA")
-        extension = CUDAExtension
-        sources += source_cuda
-        define_macros += [("WITH_CUDA", None)]
-        extra_compile_args["nvcc"] = [
-            "-DCUDA_HAS_FP16=1",
-            "-D__CUDA_NO_HALF_OPERATORS__",
-            "-D__CUDA_NO_HALF_CONVERSIONS__",
-            "-D__CUDA_NO_HALF2_OPERATORS__",
-        ]
-    else:
-        print("Compiling without CUDA")
-        define_macros += [("WITH_HIP", None)]
-        extra_compile_args["nvcc"] = []
-        return None
-
-    sources = [os.path.join(extensions_dir, s) for s in sources]
-    include_dirs = [extensions_dir]
-
-    ext_modules = [
-        extension(
-            "groundingdino._C",
-            sources,
-            include_dirs=include_dirs,
-            define_macros=define_macros,
-            extra_compile_args=extra_compile_args,
-        )
-    ]
-
-    return ext_modules
-
-
-def parse_requirements(fname="requirements.txt", with_version=True):
-    """Parse the package dependencies listed in a requirements file but strips
-    specific versioning information.
-
-    Args:
-        fname (str): path to requirements file
-        with_version (bool, default=False): if True include version specs
-
-    Returns:
-        List[str]: list of requirements items
-
-    CommandLine:
-        python -c "import setup; print(setup.parse_requirements())"
-    """
-    import re
-    import sys
-    from os.path import exists
-
-    require_fpath = fname
-
-    def parse_line(line):
-        """Parse information from a line in a requirements text file."""
-        if line.startswith("-r "):
-            # Allow specifying requirements in other files
-            target = line.split(" ")[1]
-            for info in parse_require_file(target):
-                yield info
-        else:
-            info = {"line": line}
-            if line.startswith("-e "):
-                info["package"] = line.split("#egg=")[1]
-            elif "@git+" in line:
-                info["package"] = line
-            else:
-                # Remove versioning from the package
-                pat = "(" + "|".join([">=", "==", ">"]) + ")"
-                parts = re.split(pat, line, maxsplit=1)
-                parts = [p.strip() for p in parts]
-
-                info["package"] = parts[0]
-                if len(parts) > 1:
-                    op, rest = parts[1:]
-                    if ";" in rest:
-                        # Handle platform specific dependencies
-                        # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
-                        version, platform_deps = map(str.strip, rest.split(";"))
-                        info["platform_deps"] = platform_deps
-                    else:
-                        version = rest  # NOQA
-                    info["version"] = (op, version)
-            yield info
-
-    def parse_require_file(fpath):
-        with open(fpath, "r") as f:
-            for line in f.readlines():
-                line = line.strip()
-                if line and not line.startswith("#"):
-                    for info in parse_line(line):
-                        yield info
-
-    def gen_packages_items():
-        if exists(require_fpath):
-            for info in parse_require_file(require_fpath):
-                parts = [info["package"]]
-                if with_version and "version" in info:
-                    parts.extend(info["version"])
-                if not sys.version.startswith("3.4"):
-                    # apparently package_deps are broken in 3.4
-                    platform_deps = info.get("platform_deps")
-                    if platform_deps is not None:
-                        parts.append(";" + platform_deps)
-                item = "".join(parts)
-                yield item
-
-    packages = list(gen_packages_items())
-    return packages
-
-
-if __name__ == "__main__":
-    print(f"Building wheel {package_name}-{version}")
-
-    with open("LICENSE", "r", encoding="utf-8") as f:
-        license = f.read()
-
-    write_version_file()
-
-    setup(
-        name="groundingdino",
-        version="0.1.0",
-        author="International Digital Economy Academy, Shilong Liu",
-        url="https://github.com/IDEA-Research/GroundingDINO",
-        description="open-set object detector",
-        license=license,
-        install_requires=parse_requirements("requirements.txt"),
-        packages=find_packages(
-            exclude=(
-                "configs",
-                "tests",
-            )
-        ),
-        ext_modules=get_extensions(),
-        cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
-    )
diff --git a/spaces/codertoro/gpt-academic/crazy_functional.py b/spaces/codertoro/gpt-academic/crazy_functional.py
deleted file mode 100644
index 2dcbf93291048d155122f22c991619867aa5f2c5..0000000000000000000000000000000000000000
--- a/spaces/codertoro/gpt-academic/crazy_functional.py
+++ /dev/null
@@ -1,167 +0,0 @@
-from toolbox import HotReload  # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效
-
-
-def get_crazy_functions():
-    ###################### 第一组插件 ###########################
-    # [第一组插件]: 最早期编写的项目插件和一些demo
-    from crazy_functions.读文章写摘要 import 读文章写摘要
-    from crazy_functions.生成函数注释 import 批量生成函数注释
-    from crazy_functions.解析项目源代码 import 解析项目本身
-    from crazy_functions.解析项目源代码 import 解析一个Python项目
-    from crazy_functions.解析项目源代码 import 解析一个C项目的头文件
-    from crazy_functions.解析项目源代码 import 解析一个C项目
-    from crazy_functions.解析项目源代码 import 解析一个Golang项目
-    from crazy_functions.解析项目源代码 import 解析一个Java项目
-    from crazy_functions.解析项目源代码 import 解析一个Rect项目
-    from crazy_functions.高级功能函数模板 import 高阶功能模板函数
-    from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
-    from crazy_functions.Latex全文润色 import Latex英文润色
-
-    function_plugins = {
-
-        "解析整个Python项目": {
-            "Color": "stop",    # 按钮颜色
-            "Function": HotReload(解析一个Python项目)
-        },
-        "解析整个C++项目头文件": {
-            "Color": "stop",    # 按钮颜色
-            "Function": HotReload(解析一个C项目的头文件)
-        },
-        "解析整个C++项目(.cpp/.hpp/.c/.h)": {
-            "Color": "stop",    # 按钮颜色
-            "AsButton": False,  # 加入下拉菜单中
-            "Function": HotReload(解析一个C项目)
-        },
-        "解析整个Go项目": {
-            "Color": "stop",    # 按钮颜色
-            "AsButton": False,  # 加入下拉菜单中
-            "Function": HotReload(解析一个Golang项目)
-        },
-        "解析整个Java项目": {
-            "Color": "stop",  # 按钮颜色
-            "AsButton": False,  # 加入下拉菜单中
-            "Function": HotReload(解析一个Java项目)
-        },
-        "解析整个React项目": {
-            "Color": "stop",  # 按钮颜色
-            "AsButton": False,  # 加入下拉菜单中
-            "Function": HotReload(解析一个Rect项目)
-        },
-        "读Tex论文写摘要": {
-            "Color": "stop",    # 按钮颜色
-            "Function": HotReload(读文章写摘要)
-        },
-        "批量生成函数注释": {
-            "Color": "stop",    # 按钮颜色
-            "Function": HotReload(批量生成函数注释)
-        },
-        "[多线程Demo] 解析此项目本身(源码自译解)": {
-            "Function": HotReload(解析项目本身)
-        },
-        "[多线程demo] 把本项目源代码切换成全英文": {
-            # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
-            "AsButton": False,  # 加入下拉菜单中
-            "Function": HotReload(全项目切换英文)
-        },
-        "[函数插件模板Demo] 历史上的今天": {
-            # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
-            "Function": HotReload(高阶功能模板函数)
-        },
-
-    }
-    ###################### 第二组插件 ###########################
-    # [第二组插件]: 经过充分测试,但功能上距离达到完美状态还差一点点
-    from crazy_functions.批量总结PDF文档 import 批量总结PDF文档
-    from crazy_functions.批量总结PDF文档pdfminer import 批量总结PDF文档pdfminer
-    from crazy_functions.总结word文档 import 总结word文档
-    from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档
-    from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
-    from crazy_functions.理解PDF文档内容 import 理解PDF文档内容
-    from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
-    from crazy_functions.Latex全文润色 import Latex中文润色
-    from crazy_functions.Latex全文翻译 import Latex中译英
-    from crazy_functions.Latex全文翻译 import Latex英译中
-
-    function_plugins.update({
-        "批量翻译PDF文档(多线程)": {
-            "Color": "stop",
-            "AsButton": True,  # 加入下拉菜单中
-            "Function": HotReload(批量翻译PDF文档)
-        },
-        "[测试功能] 批量总结PDF文档": {
-            "Color": "stop",
-            "AsButton": False,  # 加入下拉菜单中
-            # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
-            "Function": HotReload(批量总结PDF文档)
-        },
-        "[测试功能] 批量总结PDF文档pdfminer": {
-            "Color": "stop",
-            "AsButton": False,  # 加入下拉菜单中
-            "Function": HotReload(批量总结PDF文档pdfminer)
-        },
-        "谷歌学术检索助手(输入谷歌学术搜索页url)": {
-            "Color": "stop",
-            "AsButton": False,  # 加入下拉菜单中
-            "Function": HotReload(谷歌检索小助手)
-        },
-        "批量总结Word文档": {
-            "Color": "stop",
-            "Function": HotReload(总结word文档)
-        },
-        # "[测试功能] 理解PDF文档内容(Tk文件选择接口,仅本地)": {
-        #     # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
-        #     "AsButton": False,  # 加入下拉菜单中
-        #     "Function": HotReload(理解PDF文档内容)
-        # },
-        "[测试功能] 理解PDF文档内容(通用接口,读取文件输入区)": {
-            # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
-            "Color": "stop",
-            "AsButton": False,  # 加入下拉菜单中
-            "Function": HotReload(理解PDF文档内容标准文件输入)
-        },
-        "[测试功能] 英文Latex项目全文润色(输入路径或上传压缩包)": {
-            # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
-            "Color": "stop",
-            "AsButton": False,  # 加入下拉菜单中
-            "Function": HotReload(Latex英文润色)
-        },
-        "[测试功能] 中文Latex项目全文润色(输入路径或上传压缩包)": {
-            # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
-            "Color": "stop",
-            "AsButton": False,  # 加入下拉菜单中
-            "Function": HotReload(Latex中文润色)
-        },
-
-        "[测试功能] Latex项目全文中译英(输入路径或上传压缩包)": {
-            # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
-            "Color": "stop",
-            "AsButton": False,  # 加入下拉菜单中
-            "Function": HotReload(Latex中译英)
-        },
-        "[测试功能] Latex项目全文英译中(输入路径或上传压缩包)": {
-            # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
-            "Color": "stop",
-            "AsButton": False,  # 加入下拉菜单中
-            "Function": HotReload(Latex英译中)
-        },
-
-        
-    })
-
-    ###################### 第三组插件 ###########################
-    # [第三组插件]: 尚未充分测试的函数插件,放在这里
-    try:
-        from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
-        function_plugins.update({
-            "一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": {
-                "Color": "stop",
-                "AsButton": False,  # 加入下拉菜单中
-                "Function": HotReload(下载arxiv论文并翻译摘要)
-            }
-        })
-
-    except Exception as err:
-        print(f'[下载arxiv论文并翻译摘要] 插件导入失败 {str(err)}')
-
-    ###################### 第n组插件 ###########################
-    return function_plugins
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/bit_depth_template.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/bit_depth_template.c
deleted file mode 100644
index d44d47ea456be2f5cbb5c02b1588e4edaa8ef55b..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/bit_depth_template.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "mathops.h"
-#include "rnd_avg.h"
-#include "libavutil/intreadwrite.h"
-
-#ifndef BIT_DEPTH
-#define BIT_DEPTH 8
-#endif
-
-#ifdef AVCODEC_BIT_DEPTH_TEMPLATE_C
-#   undef pixel
-#   undef pixel2
-#   undef pixel4
-#   undef dctcoef
-#   undef idctin
-#   undef INIT_CLIP
-#   undef no_rnd_avg_pixel4
-#   undef rnd_avg_pixel4
-#   undef AV_RN2P
-#   undef AV_RN4P
-#   undef AV_RN4PA
-#   undef AV_WN2P
-#   undef AV_WN4P
-#   undef AV_WN4PA
-#   undef CLIP
-#   undef FUNC
-#   undef FUNCC
-#   undef av_clip_pixel
-#   undef PIXEL_SPLAT_X4
-#else
-#   define AVCODEC_BIT_DEPTH_TEMPLATE_C
-#endif
-
-#if BIT_DEPTH > 8
-#   define pixel  uint16_t
-#   define pixel2 uint32_t
-#   define pixel4 uint64_t
-#   define dctcoef int32_t
-
-#ifdef IN_IDCT_DEPTH
-#if IN_IDCT_DEPTH == 32
-#   define idctin int32_t
-#else
-#   define idctin int16_t
-#endif
-#else
-#   define idctin int16_t
-#endif
-
-#   define INIT_CLIP
-#   define no_rnd_avg_pixel4 no_rnd_avg64
-#   define    rnd_avg_pixel4    rnd_avg64
-#   define AV_RN2P  AV_RN32
-#   define AV_RN4P  AV_RN64
-#   define AV_RN4PA AV_RN64A
-#   define AV_WN2P  AV_WN32
-#   define AV_WN4P  AV_WN64
-#   define AV_WN4PA AV_WN64A
-#   define PIXEL_SPLAT_X4(x) ((x)*0x0001000100010001ULL)
-
-#   define av_clip_pixel(a) av_clip_uintp2(a, BIT_DEPTH)
-#   define CLIP(a)          av_clip_uintp2(a, BIT_DEPTH)
-#else
-#   define pixel  uint8_t
-#   define pixel2 uint16_t
-#   define pixel4 uint32_t
-#   define dctcoef int16_t
-#   define idctin  int16_t
-
-#   define INIT_CLIP
-#   define no_rnd_avg_pixel4 no_rnd_avg32
-#   define    rnd_avg_pixel4    rnd_avg32
-#   define AV_RN2P  AV_RN16
-#   define AV_RN4P  AV_RN32
-#   define AV_RN4PA AV_RN32A
-#   define AV_WN2P  AV_WN16
-#   define AV_WN4P  AV_WN32
-#   define AV_WN4PA AV_WN32A
-#   define PIXEL_SPLAT_X4(x) ((x)*0x01010101U)
-
-#   define av_clip_pixel(a) av_clip_uint8(a)
-#   define CLIP(a) av_clip_uint8(a)
-#endif
-
-#define FUNC3(a, b, c)  a ## _ ## b ##  c
-#define FUNC2(a, b, c)  FUNC3(a, b, c)
-#define FUNC(a)  FUNC2(a, BIT_DEPTH,)
-#define FUNCC(a) FUNC2(a, BIT_DEPTH, _c)
-#define FUNC4(a, b, c)  a ## _int ## b ## _ ## c ## bit
-#define FUNC5(a, b, c)  FUNC4(a, b, c)
-#define FUNC6(a)  FUNC5(a, IN_IDCT_DEPTH, BIT_DEPTH)
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dct32_fixed.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dct32_fixed.c
deleted file mode 100644
index 9025d5efdd6fa1aff897918fe18833b560c5cabd..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dct32_fixed.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#define DCT32_FLOAT 0
-#include "dct32_template.c"
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/defs.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/defs.h
deleted file mode 100644
index fbe3254db2506349b47a5b640645602162e7d777..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/defs.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVCODEC_DEFS_H
-#define AVCODEC_DEFS_H
-
-/**
- * @file
- * @ingroup libavc
- * Misc types and constants that do not belong anywhere else.
- */
-
-#include <stdint.h>
-#include <stdlib.h>
-
-/**
- * @ingroup lavc_decoding
- * Required number of additionally allocated bytes at the end of the input bitstream for decoding.
- * This is mainly needed because some optimized bitstream readers read
- * 32 or 64 bit at once and could read over the end.<br>
- * Note: If the first 23 bits of the additional bytes are not 0, then damaged
- * MPEG bitstreams could cause overread and segfault.
- */
-#define AV_INPUT_BUFFER_PADDING_SIZE 64
-
-/**
- * Verify checksums embedded in the bitstream (could be of either encoded or
- * decoded data, depending on the format) and print an error message on mismatch.
- * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the
- * decoder/demuxer returning an error.
- */
-#define AV_EF_CRCCHECK       (1<<0)
-#define AV_EF_BITSTREAM      (1<<1)   ///< detect bitstream specification deviations
-#define AV_EF_BUFFER         (1<<2)   ///< detect improper bitstream length
-#define AV_EF_EXPLODE        (1<<3)   ///< abort decoding on minor error detection
-
-#define AV_EF_IGNORE_ERR     (1<<15)  ///< ignore errors and continue
-#define AV_EF_CAREFUL        (1<<16)  ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors
-#define AV_EF_COMPLIANT      (1<<17)  ///< consider all spec non compliances as errors
-#define AV_EF_AGGRESSIVE     (1<<18)  ///< consider things that a sane encoder/muxer should not do as an error
-
-#define FF_COMPLIANCE_VERY_STRICT   2 ///< Strictly conform to an older more strict version of the spec or reference software.
-#define FF_COMPLIANCE_STRICT        1 ///< Strictly conform to all the things in the spec no matter what consequences.
-#define FF_COMPLIANCE_NORMAL        0
-#define FF_COMPLIANCE_UNOFFICIAL   -1 ///< Allow unofficial extensions
-#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
-
-/**
- * @ingroup lavc_decoding
- */
-enum AVDiscard{
-    /* We leave some space between them for extensions (drop some
-     * keyframes for intra-only or drop just some bidir frames). */
-    AVDISCARD_NONE    =-16, ///< discard nothing
-    AVDISCARD_DEFAULT =  0, ///< discard useless packets like 0 size packets in avi
-    AVDISCARD_NONREF  =  8, ///< discard all non reference
-    AVDISCARD_BIDIR   = 16, ///< discard all bidirectional frames
-    AVDISCARD_NONINTRA= 24, ///< discard all non intra frames
-    AVDISCARD_NONKEY  = 32, ///< discard all frames except keyframes
-    AVDISCARD_ALL     = 48, ///< discard all
-};
-
-enum AVAudioServiceType {
-    AV_AUDIO_SERVICE_TYPE_MAIN              = 0,
-    AV_AUDIO_SERVICE_TYPE_EFFECTS           = 1,
-    AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
-    AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED  = 3,
-    AV_AUDIO_SERVICE_TYPE_DIALOGUE          = 4,
-    AV_AUDIO_SERVICE_TYPE_COMMENTARY        = 5,
-    AV_AUDIO_SERVICE_TYPE_EMERGENCY         = 6,
-    AV_AUDIO_SERVICE_TYPE_VOICE_OVER        = 7,
-    AV_AUDIO_SERVICE_TYPE_KARAOKE           = 8,
-    AV_AUDIO_SERVICE_TYPE_NB                   , ///< Not part of ABI
-};
-
-/**
- * Pan Scan area.
- * This specifies the area which should be displayed.
- * Note there may be multiple such areas for one frame.
- */
-typedef struct AVPanScan {
-    /**
-     * id
-     * - encoding: Set by user.
-     * - decoding: Set by libavcodec.
-     */
-    int id;
-
-    /**
-     * width and height in 1/16 pel
-     * - encoding: Set by user.
-     * - decoding: Set by libavcodec.
-     */
-    int width;
-    int height;
-
-    /**
-     * position of the top left corner in 1/16 pel for up to 3 fields/frames
-     * - encoding: Set by user.
-     * - decoding: Set by libavcodec.
-     */
-    int16_t position[3][2];
-} AVPanScan;
-
-/**
- * This structure describes the bitrate properties of an encoded bitstream. It
- * roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD
- * parameters for H.264/HEVC.
- */
-typedef struct AVCPBProperties {
-    /**
-     * Maximum bitrate of the stream, in bits per second.
-     * Zero if unknown or unspecified.
-     */
-    int64_t max_bitrate;
-    /**
-     * Minimum bitrate of the stream, in bits per second.
-     * Zero if unknown or unspecified.
-     */
-    int64_t min_bitrate;
-    /**
-     * Average bitrate of the stream, in bits per second.
-     * Zero if unknown or unspecified.
-     */
-    int64_t avg_bitrate;
-
-    /**
-     * The size of the buffer to which the ratecontrol is applied, in bits.
-     * Zero if unknown or unspecified.
-     */
-    int64_t buffer_size;
-
-    /**
-     * The delay between the time the packet this structure is associated with
-     * is received and the time when it should be decoded, in periods of a 27MHz
-     * clock.
-     *
-     * UINT64_MAX when unknown or unspecified.
-     */
-    uint64_t vbv_delay;
-} AVCPBProperties;
-
-/**
- * Allocate a CPB properties structure and initialize its fields to default
- * values.
- *
- * @param size if non-NULL, the size of the allocated struct will be written
- *             here. This is useful for embedding it in side data.
- *
- * @return the newly allocated struct or NULL on failure
- */
-AVCPBProperties *av_cpb_properties_alloc(size_t *size);
-
-/**
- * This structure supplies correlation between a packet timestamp and a wall clock
- * production time. The definition follows the Producer Reference Time ('prft')
- * as defined in ISO/IEC 14496-12
- */
-typedef struct AVProducerReferenceTime {
-    /**
-     * A UTC timestamp, in microseconds, since Unix epoch (e.g, av_gettime()).
-     */
-    int64_t wallclock;
-    int flags;
-} AVProducerReferenceTime;
-
-/**
- * Encode extradata length to a buffer. Used by xiph codecs.
- *
- * @param s buffer to write to; must be at least (v/255+1) bytes long
- * @param v size of extradata in bytes
- * @return number of bytes written to the buffer.
- */
-unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
-
-#endif // AVCODEC_DEFS_H
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/evrcdec.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/evrcdec.c
deleted file mode 100644
index af7640d7e15ded6da8417dc994d55c481a5e4fe9..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/evrcdec.c
+++ /dev/null
@@ -1,942 +0,0 @@
-/*
- * Enhanced Variable Rate Codec, Service Option 3 decoder
- * Copyright (c) 2013 Paul B Mahol
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * Enhanced Variable Rate Codec, Service Option 3 decoder
- * @author Paul B Mahol
- */
-
-#include "libavutil/channel_layout.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/opt.h"
-#include "avcodec.h"
-#include "codec_internal.h"
-#include "decode.h"
-#include "get_bits.h"
-#include "evrcdata.h"
-#include "acelp_vectors.h"
-#include "lsp.h"
-
-#define MIN_LSP_SEP (0.05 / (2.0 * M_PI))
-#define MIN_DELAY      20
-#define MAX_DELAY     120
-#define NB_SUBFRAMES    3
-#define SUBFRAME_SIZE  54
-#define FILTER_ORDER   10
-#define ACB_SIZE      128
-
-typedef enum {
-    RATE_ERRS = -1,
-    SILENCE,
-    RATE_QUANT,
-    RATE_QUARTER,
-    RATE_HALF,
-    RATE_FULL,
-} evrc_packet_rate;
-
-/**
- * EVRC-A unpacked data frame
- */
-typedef struct EVRCAFrame {
-    uint8_t  lpc_flag;        ///< spectral change indicator
-    uint16_t lsp[4];          ///< index into LSP codebook
-    uint8_t  pitch_delay;     ///< pitch delay for entire frame
-    uint8_t  delay_diff;      ///< delay difference for entire frame
-    uint8_t  acb_gain[3];     ///< adaptive codebook gain
-    uint16_t fcb_shape[3][4]; ///< fixed codebook shape
-    uint8_t  fcb_gain[3];     ///< fixed codebook gain index
-    uint8_t  energy_gain;     ///< frame energy gain index
-    uint8_t  tty;             ///< tty baud rate bit
-} EVRCAFrame;
-
-typedef struct EVRCContext {
-    AVClass *class;
-
-    int              postfilter;
-
-    GetBitContext    gb;
-    evrc_packet_rate bitrate;
-    evrc_packet_rate last_valid_bitrate;
-    EVRCAFrame       frame;
-
-    float            lspf[FILTER_ORDER];
-    float            prev_lspf[FILTER_ORDER];
-    float            synthesis[FILTER_ORDER];
-    float            postfilter_fir[FILTER_ORDER];
-    float            postfilter_iir[FILTER_ORDER];
-    float            postfilter_residual[ACB_SIZE + SUBFRAME_SIZE];
-    float            pitch_delay;
-    float            prev_pitch_delay;
-    float            avg_acb_gain;  ///< average adaptive codebook gain
-    float            avg_fcb_gain;  ///< average fixed codebook gain
-    float            pitch[ACB_SIZE + FILTER_ORDER + SUBFRAME_SIZE];
-    float            pitch_back[ACB_SIZE];
-    float            interpolation_coeffs[136];
-    float            energy_vector[NB_SUBFRAMES];
-    float            fade_scale;
-    float            last;
-
-    uint8_t          prev_energy_gain;
-    uint8_t          prev_error_flag;
-    uint8_t          warned_buf_mismatch_bitrate;
-} EVRCContext;
-
-/**
- * Frame unpacking for RATE_FULL, RATE_HALF and RATE_QUANT
- *
- * @param e the context
- *
- * TIA/IS-127 Table 4.21-1
- */
-static void unpack_frame(EVRCContext *e)
-{
-    EVRCAFrame *frame = &e->frame;
-    GetBitContext *gb = &e->gb;
-
-    switch (e->bitrate) {
-    case RATE_FULL:
-        frame->lpc_flag        = get_bits1(gb);
-        frame->lsp[0]          = get_bits(gb,  6);
-        frame->lsp[1]          = get_bits(gb,  6);
-        frame->lsp[2]          = get_bits(gb,  9);
-        frame->lsp[3]          = get_bits(gb,  7);
-        frame->pitch_delay     = get_bits(gb,  7);
-        frame->delay_diff      = get_bits(gb,  5);
-        frame->acb_gain[0]     = get_bits(gb,  3);
-        frame->fcb_shape[0][0] = get_bits(gb,  8);
-        frame->fcb_shape[0][1] = get_bits(gb,  8);
-        frame->fcb_shape[0][2] = get_bits(gb,  8);
-        frame->fcb_shape[0][3] = get_bits(gb, 11);
-        frame->fcb_gain[0]     = get_bits(gb,  5);
-        frame->acb_gain[1]     = get_bits(gb,  3);
-        frame->fcb_shape[1][0] = get_bits(gb,  8);
-        frame->fcb_shape[1][1] = get_bits(gb,  8);
-        frame->fcb_shape[1][2] = get_bits(gb,  8);
-        frame->fcb_shape[1][3] = get_bits(gb, 11);
-        frame->fcb_gain    [1] = get_bits(gb,  5);
-        frame->acb_gain    [2] = get_bits(gb,  3);
-        frame->fcb_shape[2][0] = get_bits(gb,  8);
-        frame->fcb_shape[2][1] = get_bits(gb,  8);
-        frame->fcb_shape[2][2] = get_bits(gb,  8);
-        frame->fcb_shape[2][3] = get_bits(gb, 11);
-        frame->fcb_gain    [2] = get_bits(gb,  5);
-        frame->tty             = get_bits1(gb);
-        break;
-    case RATE_HALF:
-        frame->lsp         [0] = get_bits(gb,  7);
-        frame->lsp         [1] = get_bits(gb,  7);
-        frame->lsp         [2] = get_bits(gb,  8);
-        frame->pitch_delay     = get_bits(gb,  7);
-        frame->acb_gain    [0] = get_bits(gb,  3);
-        frame->fcb_shape[0][0] = get_bits(gb, 10);
-        frame->fcb_gain    [0] = get_bits(gb,  4);
-        frame->acb_gain    [1] = get_bits(gb,  3);
-        frame->fcb_shape[1][0] = get_bits(gb, 10);
-        frame->fcb_gain    [1] = get_bits(gb,  4);
-        frame->acb_gain    [2] = get_bits(gb,  3);
-        frame->fcb_shape[2][0] = get_bits(gb, 10);
-        frame->fcb_gain    [2] = get_bits(gb,  4);
-        break;
-    case RATE_QUANT:
-        frame->lsp         [0] = get_bits(gb, 4);
-        frame->lsp         [1] = get_bits(gb, 4);
-        frame->energy_gain     = get_bits(gb, 8);
-        break;
-    }
-}
-
-static evrc_packet_rate buf_size2bitrate(const int buf_size)
-{
-    switch (buf_size) {
-    case 23: return RATE_FULL;
-    case 11: return RATE_HALF;
-    case  6: return RATE_QUARTER;
-    case  3: return RATE_QUANT;
-    case  1: return SILENCE;
-    }
-
-    return RATE_ERRS;
-}
-
-/**
- * Determine the bitrate from the frame size and/or the first byte of the frame.
- *
- * @param avctx the AV codec context
- * @param buf_size length of the buffer
- * @param buf the bufffer
- *
- * @return the bitrate on success,
- *         RATE_ERRS  if the bitrate cannot be satisfactorily determined
- */
-static evrc_packet_rate determine_bitrate(AVCodecContext *avctx,
-                                          int *buf_size,
-                                          const uint8_t **buf)
-{
-    evrc_packet_rate bitrate;
-
-    if ((bitrate = buf_size2bitrate(*buf_size)) >= 0) {
-        if (bitrate > **buf) {
-            EVRCContext *e = avctx->priv_data;
-            if (!e->warned_buf_mismatch_bitrate) {
-                av_log(avctx, AV_LOG_WARNING,
-                       "Claimed bitrate and buffer size mismatch.\n");
-                e->warned_buf_mismatch_bitrate = 1;
-            }
-            bitrate = **buf;
-        } else if (bitrate < **buf) {
-            av_log(avctx, AV_LOG_ERROR,
-                   "Buffer is too small for the claimed bitrate.\n");
-            return RATE_ERRS;
-        }
-        (*buf)++;
-        *buf_size -= 1;
-    } else if ((bitrate = buf_size2bitrate(*buf_size + 1)) >= 0) {
-        av_log(avctx, AV_LOG_DEBUG,
-               "Bitrate byte is missing, guessing the bitrate from packet size.\n");
-    } else
-        return RATE_ERRS;
-
-    return bitrate;
-}
-
-static void warn_insufficient_frame_quality(AVCodecContext *avctx,
-                                            const char *message)
-{
-    av_log(avctx, AV_LOG_WARNING, "Frame #%"PRId64", %s\n",
-           avctx->frame_num, message);
-}
-
-/**
- * Initialize the speech codec according to the specification.
- *
- * TIA/IS-127 5.2
- */
-static av_cold int evrc_decode_init(AVCodecContext *avctx)
-{
-    EVRCContext *e = avctx->priv_data;
-    int i, n, idx = 0;
-    float denom = 2.0 / (2.0 * 8.0 + 1.0);
-
-    av_channel_layout_uninit(&avctx->ch_layout);
-    avctx->ch_layout = (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO;
-    avctx->sample_fmt     = AV_SAMPLE_FMT_FLT;
-
-    for (i = 0; i < FILTER_ORDER; i++) {
-        e->prev_lspf[i] = (i + 1) * 0.048;
-        e->synthesis[i] = 0.0;
-    }
-
-    for (i = 0; i < ACB_SIZE; i++)
-        e->pitch[i] = e->pitch_back[i] = 0.0;
-
-    e->last_valid_bitrate = RATE_QUANT;
-    e->prev_pitch_delay   = 40.0;
-    e->fade_scale         = 1.0;
-    e->prev_error_flag    = 0;
-    e->avg_acb_gain = e->avg_fcb_gain = 0.0;
-
-    for (i = 0; i < 8; i++) {
-        float tt = ((float)i - 8.0 / 2.0) / 8.0;
-
-        for (n = -8; n <= 8; n++, idx++) {
-            float arg1 = M_PI * 0.9 * (tt - n);
-            float arg2 = M_PI * (tt - n);
-
-            e->interpolation_coeffs[idx] = 0.9;
-            if (arg1)
-                e->interpolation_coeffs[idx] *= (0.54 + 0.46 * cos(arg2 * denom)) *
-                                                 sin(arg1) / arg1;
-        }
-    }
-
-    return 0;
-}
-
-/**
- * Decode the 10 vector quantized line spectral pair frequencies from the LSP
- * transmission codes of any bitrate and check for badly received packets.
- *
- * @param e the context
- *
- * @return 0 on success, -1 if the packet is badly received
- *
- * TIA/IS-127 5.2.1, 5.7.1
- */
-static int decode_lspf(EVRCContext *e)
-{
-    const float * const *codebooks = evrc_lspq_codebooks[e->bitrate];
-    int i, j, k = 0;
-
-    for (i = 0; i < evrc_lspq_nb_codebooks[e->bitrate]; i++) {
-        int row_size = evrc_lspq_codebooks_row_sizes[e->bitrate][i];
-        const float *codebook = codebooks[i];
-
-        for (j = 0; j < row_size; j++)
-            e->lspf[k++] = codebook[e->frame.lsp[i] * row_size + j];
-    }
-
-    // check for monotonic LSPs
-    for (i = 1; i < FILTER_ORDER; i++)
-        if (e->lspf[i] <= e->lspf[i - 1])
-            return -1;
-
-    // check for minimum separation of LSPs at the splits
-    for (i = 0, k = 0; i < evrc_lspq_nb_codebooks[e->bitrate] - 1; i++) {
-        k += evrc_lspq_codebooks_row_sizes[e->bitrate][i];
-        if (e->lspf[k] - e->lspf[k - 1] <= MIN_LSP_SEP)
-            return -1;
-    }
-
-    return 0;
-}
-
-/*
- * Interpolation of LSP parameters.
- *
- * TIA/IS-127 5.2.3.1, 5.7.3.2
- */
-static void interpolate_lsp(float *ilsp, const float *lsp,
-                            const float *prev, int index)
-{
-    static const float lsp_interpolation_factors[] = { 0.1667, 0.5, 0.8333 };
-    ff_weighted_vector_sumf(ilsp, prev, lsp,
-                            1.0 - lsp_interpolation_factors[index],
-                            lsp_interpolation_factors[index], FILTER_ORDER);
-}
-
-/*
- * Reconstruction of the delay contour.
- *
- * TIA/IS-127 5.2.2.3.2
- */
-static void interpolate_delay(float *dst, float current, float prev, int index)
-{
-    static const float d_interpolation_factors[] = { 0, 0.3313, 0.6625, 1, 1 };
-    dst[0] = (1.0 - d_interpolation_factors[index    ]) * prev
-                  + d_interpolation_factors[index    ]  * current;
-    dst[1] = (1.0 - d_interpolation_factors[index + 1]) * prev
-                  + d_interpolation_factors[index + 1]  * current;
-    dst[2] = (1.0 - d_interpolation_factors[index + 2]) * prev
-                  + d_interpolation_factors[index + 2]  * current;
-}
-
-/*
- * Convert the quantized, interpolated line spectral frequencies,
- * to prediction coefficients.
- *
- * TIA/IS-127 5.2.3.2, 4.7.2.2
- */
-static void decode_predictor_coeffs(const float *ilspf, float *ilpc)
-{
-    double lsp[FILTER_ORDER];
-    float a[FILTER_ORDER / 2 + 1], b[FILTER_ORDER / 2 + 1];
-    float a1[FILTER_ORDER / 2] = { 0 };
-    float a2[FILTER_ORDER / 2] = { 0 };
-    float b1[FILTER_ORDER / 2] = { 0 };
-    float b2[FILTER_ORDER / 2] = { 0 };
-    int i, k;
-
-    ff_acelp_lsf2lspd(lsp, ilspf, FILTER_ORDER);
-
-    for (k = 0; k <= FILTER_ORDER; k++) {
-        a[0] = k < 2 ? 0.25 : 0;
-        b[0] = k < 2 ? k < 1 ? 0.25 : -0.25 : 0;
-
-        for (i = 0; i < FILTER_ORDER / 2; i++) {
-            a[i + 1] = a[i] - 2 * lsp[i * 2    ] * a1[i] + a2[i];
-            b[i + 1] = b[i] - 2 * lsp[i * 2 + 1] * b1[i] + b2[i];
-            a2[i] = a1[i];
-            a1[i] = a[i];
-            b2[i] = b1[i];
-            b1[i] = b[i];
-        }
-
-        if (k)
-            ilpc[k - 1] = 2.0 * (a[FILTER_ORDER / 2] + b[FILTER_ORDER / 2]);
-    }
-}
-
-static void bl_intrp(EVRCContext *e, float *ex, float delay)
-{
-    float *f;
-    int offset, i, coef_idx;
-    int16_t t;
-
-    offset = lrintf(delay);
-
-    t = (offset - delay + 0.5) * 8.0 + 0.5;
-    if (t == 8) {
-        t = 0;
-        offset--;
-    }
-
-    f = ex - offset - 8;
-
-    coef_idx = t * (2 * 8 + 1);
-
-    ex[0] = 0.0;
-    for (i = 0; i < 2 * 8 + 1; i++)
-        ex[0] += e->interpolation_coeffs[coef_idx + i] * f[i];
-}
-
-/*
- * Adaptive codebook excitation.
- *
- * TIA/IS-127 5.2.2.3.3, 4.12.5.2
- */
-static void acb_excitation(EVRCContext *e, float *excitation, float gain,
-                           const float delay[3], int length)
-{
-    float denom, locdelay, dpr, invl;
-    int i;
-
-    invl = 1.0 / ((float) length);
-    dpr = length;
-
-    /* first at-most extra samples */
-    denom = (delay[1] - delay[0]) * invl;
-    for (i = 0; i < dpr; i++) {
-        locdelay = delay[0] + i * denom;
-        bl_intrp(e, excitation + i, locdelay);
-    }
-
-    denom = (delay[2] - delay[1]) * invl;
-    /* interpolation */
-    for (i = dpr; i < dpr + 10; i++) {
-        locdelay = delay[1] + (i - dpr) * denom;
-        bl_intrp(e, excitation + i, locdelay);
-    }
-
-    for (i = 0; i < length; i++)
-        excitation[i] *= gain;
-}
-
-static void decode_8_pulses_35bits(const uint16_t *fixed_index, float *cod)
-{
-    int i, pos1, pos2, offset;
-
-    offset = (fixed_index[3] >> 9) & 3;
-
-    for (i = 0; i < 3; i++) {
-        pos1 = ((fixed_index[i] & 0x7f) / 11) * 5 + ((i + offset) % 5);
-        pos2 = ((fixed_index[i] & 0x7f) % 11) * 5 + ((i + offset) % 5);
-
-        cod[pos1] = (fixed_index[i] & 0x80) ? -1.0 : 1.0;
-
-        if (pos2 < pos1)
-            cod[pos2]  = -cod[pos1];
-        else
-            cod[pos2] +=  cod[pos1];
-    }
-
-    pos1 = ((fixed_index[3] & 0x7f) / 11) * 5 + ((3 + offset) % 5);
-    pos2 = ((fixed_index[3] & 0x7f) % 11) * 5 + ((4 + offset) % 5);
-
-    cod[pos1] = (fixed_index[3] & 0x100) ? -1.0 : 1.0;
-    cod[pos2] = (fixed_index[3] & 0x80 ) ? -1.0 : 1.0;
-}
-
-static void decode_3_pulses_10bits(uint16_t fixed_index, float *cod)
-{
-    float sign;
-    int pos;
-
-    sign = (fixed_index & 0x200) ? -1.0 : 1.0;
-
-    pos = ((fixed_index        & 0x7) * 7) + 4;
-    cod[pos] += sign;
-    pos = (((fixed_index >> 3) & 0x7) * 7) + 2;
-    cod[pos] -= sign;
-    pos = (((fixed_index >> 6) & 0x7) * 7);
-    cod[pos] += sign;
-}
-
-/*
- * Reconstruction of ACELP fixed codebook excitation for full and half rate.
- *
- * TIA/IS-127 5.2.3.7
- */
-static void fcb_excitation(EVRCContext *e, const uint16_t *codebook,
-                           float *excitation, float pitch_gain,
-                           int pitch_lag, int subframe_size)
-{
-    int i;
-
-    if (e->bitrate == RATE_FULL)
-        decode_8_pulses_35bits(codebook, excitation);
-    else
-        decode_3_pulses_10bits(*codebook, excitation);
-
-    pitch_gain = av_clipf(pitch_gain, 0.2, 0.9);
-
-    for (i = pitch_lag; i < subframe_size; i++)
-        excitation[i] += pitch_gain * excitation[i - pitch_lag];
-}
-
-/**
- * Synthesis of the decoder output signal.
- *
- * param[in]     in              input signal
- * param[in]     filter_coeffs   LPC coefficients
- * param[in/out] memory          synthesis filter memory
- * param         buffer_length   amount of data to process
- * param[out]    samples         output samples
- *
- * TIA/IS-127 5.2.3.15, 5.7.3.4
- */
-static void synthesis_filter(const float *in, const float *filter_coeffs,
-                             float *memory, int buffer_length, float *samples)
-{
-    int i, j;
-
-    for (i = 0; i < buffer_length; i++) {
-        samples[i] = in[i];
-        for (j = FILTER_ORDER - 1; j > 0; j--) {
-            samples[i] -= filter_coeffs[j] * memory[j];
-            memory[j]   = memory[j - 1];
-        }
-        samples[i] -= filter_coeffs[0] * memory[0];
-        memory[0]   = samples[i];
-    }
-}
-
-static void bandwidth_expansion(float *coeff, const float *inbuf, float gamma)
-{
-    double fac = gamma;
-    int i;
-
-    for (i = 0; i < FILTER_ORDER; i++) {
-        coeff[i] = inbuf[i] * fac;
-        fac *= gamma;
-    }
-}
-
-static void residual_filter(float *output, const float *input,
-                            const float *coef, float *memory, int length)
-{
-    float sum;
-    int i, j;
-
-    for (i = 0; i < length; i++) {
-        sum = input[i];
-
-        for (j = FILTER_ORDER - 1; j > 0; j--) {
-            sum      += coef[j] * memory[j];
-            memory[j] = memory[j - 1];
-        }
-        sum += coef[0] * memory[0];
-        memory[0] = input[i];
-        output[i] = sum;
-    }
-}
-
-/*
- * TIA/IS-127 Table 5.9.1-1.
- */
-static const struct PfCoeff {
-    float tilt;
-    float ltgain;
-    float p1;
-    float p2;
-} postfilter_coeffs[5] = {
-    { 0.0 , 0.0 , 0.0 , 0.0  },
-    { 0.0 , 0.0 , 0.57, 0.57 },
-    { 0.0 , 0.0 , 0.0 , 0.0  },
-    { 0.35, 0.50, 0.50, 0.75 },
-    { 0.20, 0.50, 0.57, 0.75 },
-};
-
-/*
- * Adaptive postfilter.
- *
- * TIA/IS-127 5.9
- */
-static void postfilter(EVRCContext *e, float *in, const float *coeff,
-                       float *out, int idx, const struct PfCoeff *pfc,
-                       int length)
-{
-    float wcoef1[FILTER_ORDER], wcoef2[FILTER_ORDER],
-          scratch[SUBFRAME_SIZE], temp[SUBFRAME_SIZE],
-          mem[SUBFRAME_SIZE];
-    float sum1 = 0.0, sum2 = 0.0, gamma, gain;
-    float tilt = pfc->tilt;
-    int i, n, best;
-
-    bandwidth_expansion(wcoef1, coeff, pfc->p1);
-    bandwidth_expansion(wcoef2, coeff, pfc->p2);
-
-    /* Tilt compensation filter, TIA/IS-127 5.9.1 */
-    for (i = 0; i < length - 1; i++)
-        sum2 += in[i] * in[i + 1];
-    if (sum2 < 0.0)
-        tilt = 0.0;
-
-    for (i = 0; i < length; i++) {
-        scratch[i] = in[i] - tilt * e->last;
-        e->last = in[i];
-    }
-
-    /* Short term residual filter, TIA/IS-127 5.9.2 */
-    residual_filter(&e->postfilter_residual[ACB_SIZE], scratch, wcoef1, e->postfilter_fir, length);
-
-    /* Long term postfilter */
-    best = idx;
-    for (i = FFMIN(MIN_DELAY, idx - 3); i <= FFMAX(MAX_DELAY, idx + 3); i++) {
-        for (n = ACB_SIZE, sum2 = 0; n < ACB_SIZE + length; n++)
-            sum2 += e->postfilter_residual[n] * e->postfilter_residual[n - i];
-        if (sum2 > sum1) {
-            sum1 = sum2;
-            best = i;
-        }
-    }
-
-    for (i = ACB_SIZE, sum1 = 0; i < ACB_SIZE + length; i++)
-        sum1 += e->postfilter_residual[i - best] * e->postfilter_residual[i - best];
-    for (i = ACB_SIZE, sum2 = 0; i < ACB_SIZE + length; i++)
-        sum2 += e->postfilter_residual[i] * e->postfilter_residual[i - best];
-
-    if (sum2 * sum1 == 0 || e->bitrate == RATE_QUANT) {
-        memcpy(temp, e->postfilter_residual + ACB_SIZE, length * sizeof(float));
-    } else {
-        gamma = sum2 / sum1;
-        if (gamma < 0.5)
-            memcpy(temp, e->postfilter_residual + ACB_SIZE, length * sizeof(float));
-        else {
-            gamma = FFMIN(gamma, 1.0);
-
-            for (i = 0; i < length; i++) {
-                temp[i] = e->postfilter_residual[ACB_SIZE + i] + gamma *
-                    pfc->ltgain * e->postfilter_residual[ACB_SIZE + i - best];
-            }
-        }
-    }
-
-    memcpy(scratch, temp, length * sizeof(float));
-    memcpy(mem, e->postfilter_iir, FILTER_ORDER * sizeof(float));
-    synthesis_filter(scratch, wcoef2, mem, length, scratch);
-
-    /* Gain computation, TIA/IS-127 5.9.4-2 */
-    for (i = 0, sum1 = 0, sum2 = 0; i < length; i++) {
-        sum1 += in[i] * in[i];
-        sum2 += scratch[i] * scratch[i];
-    }
-    gain = sum2 ? sqrt(sum1 / sum2) : 1.0;
-
-    for (i = 0; i < length; i++)
-        temp[i] *= gain;
-
-    /* Short term postfilter */
-    synthesis_filter(temp, wcoef2, e->postfilter_iir, length, out);
-
-    memmove(e->postfilter_residual,
-           e->postfilter_residual + length, ACB_SIZE * sizeof(float));
-}
-
-static void frame_erasure(EVRCContext *e, float *samples)
-{
-    float ilspf[FILTER_ORDER], ilpc[FILTER_ORDER], idelay[NB_SUBFRAMES],
-          tmp[SUBFRAME_SIZE + 6], f;
-    int i, j;
-
-    for (i = 0; i < FILTER_ORDER; i++) {
-        if (e->bitrate != RATE_QUANT)
-            e->lspf[i] = e->prev_lspf[i] * 0.875 + 0.125 * (i + 1) * 0.048;
-        else
-            e->lspf[i] = e->prev_lspf[i];
-    }
-
-    if (e->prev_error_flag)
-        e->avg_acb_gain *= 0.75;
-    if (e->bitrate == RATE_FULL)
-        memcpy(e->pitch_back, e->pitch, ACB_SIZE * sizeof(float));
-    if (e->last_valid_bitrate == RATE_QUANT)
-        e->bitrate = RATE_QUANT;
-    else
-        e->bitrate = RATE_FULL;
-
-    if (e->bitrate == RATE_FULL || e->bitrate == RATE_HALF) {
-        e->pitch_delay = e->prev_pitch_delay;
-    } else {
-        float sum = 0;
-
-        idelay[0] = idelay[1] = idelay[2] = MIN_DELAY;
-
-        for (i = 0; i < NB_SUBFRAMES; i++)
-            sum += evrc_energy_quant[e->prev_energy_gain][i];
-        sum /= (float) NB_SUBFRAMES;
-        sum  = pow(10, sum);
-        for (i = 0; i < NB_SUBFRAMES; i++)
-            e->energy_vector[i] = sum;
-    }
-
-    if (fabs(e->pitch_delay - e->prev_pitch_delay) > 15)
-        e->prev_pitch_delay = e->pitch_delay;
-
-    for (i = 0; i < NB_SUBFRAMES; i++) {
-        int subframe_size = subframe_sizes[i];
-        int pitch_lag;
-
-        interpolate_lsp(ilspf, e->lspf, e->prev_lspf, i);
-
-        if (e->bitrate != RATE_QUANT) {
-            if (e->avg_acb_gain < 0.3) {
-                idelay[0] = estimation_delay[i];
-                idelay[1] = estimation_delay[i + 1];
-                idelay[2] = estimation_delay[i + 2];
-            } else {
-                interpolate_delay(idelay, e->pitch_delay, e->prev_pitch_delay, i);
-            }
-        }
-
-        pitch_lag = lrintf((idelay[1] + idelay[0]) / 2.0);
-        decode_predictor_coeffs(ilspf, ilpc);
-
-        if (e->bitrate != RATE_QUANT) {
-            acb_excitation(e, e->pitch + ACB_SIZE,
-                           e->avg_acb_gain, idelay, subframe_size);
-            for (j = 0; j < subframe_size; j++)
-                e->pitch[ACB_SIZE + j] *= e->fade_scale;
-            e->fade_scale = FFMAX(e->fade_scale - 0.05, 0.0);
-        } else {
-            for (j = 0; j < subframe_size; j++)
-                e->pitch[ACB_SIZE + j] = e->energy_vector[i];
-        }
-
-        memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
-
-        if (e->bitrate != RATE_QUANT && e->avg_acb_gain < 0.4) {
-            f = 0.1 * e->avg_fcb_gain;
-            for (j = 0; j < subframe_size; j++)
-                e->pitch[ACB_SIZE + j] += f;
-        } else if (e->bitrate == RATE_QUANT) {
-            for (j = 0; j < subframe_size; j++)
-                e->pitch[ACB_SIZE + j] = e->energy_vector[i];
-        }
-
-        synthesis_filter(e->pitch + ACB_SIZE, ilpc,
-                         e->synthesis, subframe_size, tmp);
-        postfilter(e, tmp, ilpc, samples, pitch_lag,
-                   &postfilter_coeffs[e->bitrate], subframe_size);
-
-        samples += subframe_size;
-    }
-}
-
-static int evrc_decode_frame(AVCodecContext *avctx, AVFrame *frame,
-                             int *got_frame_ptr, AVPacket *avpkt)
-{
-    const uint8_t *buf = avpkt->data;
-    EVRCContext *e     = avctx->priv_data;
-    int buf_size       = avpkt->size;
-    float ilspf[FILTER_ORDER], ilpc[FILTER_ORDER], idelay[NB_SUBFRAMES];
-    float *samples;
-    int   i, j, ret, error_flag = 0;
-
-    frame->nb_samples = 160;
-    if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
-        return ret;
-    samples = (float *)frame->data[0];
-
-    if ((e->bitrate = determine_bitrate(avctx, &buf_size, &buf)) == RATE_ERRS) {
-        warn_insufficient_frame_quality(avctx, "bitrate cannot be determined.");
-        goto erasure;
-    }
-    if (e->bitrate <= SILENCE || e->bitrate == RATE_QUARTER)
-        goto erasure;
-    if (e->bitrate == RATE_QUANT && e->last_valid_bitrate == RATE_FULL
-                                 && !e->prev_error_flag)
-        goto erasure;
-
-    if ((ret = init_get_bits8(&e->gb, buf, buf_size)) < 0)
-        return ret;
-    memset(&e->frame, 0, sizeof(EVRCAFrame));
-
-    unpack_frame(e);
-
-    if (e->bitrate != RATE_QUANT) {
-        uint8_t *p = (uint8_t *) &e->frame;
-        for (i = 0; i < sizeof(EVRCAFrame); i++) {
-            if (p[i])
-                break;
-        }
-        if (i == sizeof(EVRCAFrame))
-            goto erasure;
-    } else if (e->frame.lsp[0] == 0xf &&
-               e->frame.lsp[1] == 0xf &&
-               e->frame.energy_gain == 0xff) {
-        goto erasure;
-    }
-
-    if (decode_lspf(e) < 0)
-        goto erasure;
-
-    if (e->bitrate == RATE_FULL || e->bitrate == RATE_HALF) {
-        /* Pitch delay parameter checking as per TIA/IS-127 5.1.5.1 */
-        if (e->frame.pitch_delay > MAX_DELAY - MIN_DELAY)
-            goto erasure;
-
-        e->pitch_delay = e->frame.pitch_delay + MIN_DELAY;
-
-        /* Delay diff parameter checking as per TIA/IS-127 5.1.5.2 */
-        if (e->frame.delay_diff) {
-            int p = e->pitch_delay - e->frame.delay_diff + 16;
-            if (p < MIN_DELAY || p > MAX_DELAY)
-                goto erasure;
-        }
-
-        /* Delay contour reconstruction as per TIA/IS-127 5.2.2.2 */
-        if (e->frame.delay_diff &&
-            e->bitrate == RATE_FULL && e->prev_error_flag) {
-            float delay;
-
-            memcpy(e->pitch, e->pitch_back, ACB_SIZE * sizeof(float));
-
-            delay = e->prev_pitch_delay;
-            e->prev_pitch_delay = delay - e->frame.delay_diff + 16.0;
-
-            if (fabs(e->pitch_delay - delay) > 15)
-                delay = e->pitch_delay;
-
-            for (i = 0; i < NB_SUBFRAMES; i++) {
-                int subframe_size = subframe_sizes[i];
-
-                interpolate_delay(idelay, delay, e->prev_pitch_delay, i);
-                acb_excitation(e, e->pitch + ACB_SIZE, e->avg_acb_gain, idelay, subframe_size);
-                memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
-            }
-        }
-
-        /* Smoothing of the decoded delay as per TIA/IS-127 5.2.2.5 */
-        if (fabs(e->pitch_delay - e->prev_pitch_delay) > 15)
-            e->prev_pitch_delay = e->pitch_delay;
-
-        e->avg_acb_gain = e->avg_fcb_gain = 0.0;
-    } else {
-        idelay[0] = idelay[1] = idelay[2] = MIN_DELAY;
-
-        /* Decode frame energy vectors as per TIA/IS-127 5.7.2 */
-        for (i = 0; i < NB_SUBFRAMES; i++)
-            e->energy_vector[i] = pow(10, evrc_energy_quant[e->frame.energy_gain][i]);
-        e->prev_energy_gain = e->frame.energy_gain;
-    }
-
-    for (i = 0; i < NB_SUBFRAMES; i++) {
-        float tmp[SUBFRAME_SIZE + 6] = { 0 };
-        int subframe_size = subframe_sizes[i];
-        int pitch_lag;
-
-        interpolate_lsp(ilspf, e->lspf, e->prev_lspf, i);
-
-        if (e->bitrate != RATE_QUANT)
-            interpolate_delay(idelay, e->pitch_delay, e->prev_pitch_delay, i);
-
-        pitch_lag = lrintf((idelay[1] + idelay[0]) / 2.0);
-        decode_predictor_coeffs(ilspf, ilpc);
-
-        /* Bandwidth expansion as per TIA/IS-127 5.2.3.3 */
-        if (e->frame.lpc_flag && e->prev_error_flag)
-            bandwidth_expansion(ilpc, ilpc, 0.75);
-
-        if (e->bitrate != RATE_QUANT) {
-            float acb_sum, f;
-
-            f = exp((e->bitrate == RATE_HALF ? 0.5 : 0.25)
-                         * (e->frame.fcb_gain[i] + 1));
-            acb_sum = pitch_gain_vq[e->frame.acb_gain[i]];
-            e->avg_acb_gain += acb_sum / NB_SUBFRAMES;
-            e->avg_fcb_gain += f / NB_SUBFRAMES;
-
-            acb_excitation(e, e->pitch + ACB_SIZE,
-                           acb_sum, idelay, subframe_size);
-            fcb_excitation(e, e->frame.fcb_shape[i], tmp,
-                           acb_sum, pitch_lag, subframe_size);
-
-            /* Total excitation generation as per TIA/IS-127 5.2.3.9 */
-            for (j = 0; j < subframe_size; j++)
-                e->pitch[ACB_SIZE + j] += f * tmp[j];
-            e->fade_scale = FFMIN(e->fade_scale + 0.2, 1.0);
-        } else {
-            for (j = 0; j < subframe_size; j++)
-                e->pitch[ACB_SIZE + j] = e->energy_vector[i];
-        }
-
-        memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
-
-        synthesis_filter(e->pitch + ACB_SIZE, ilpc,
-                         e->synthesis, subframe_size,
-                         e->postfilter ? tmp : samples);
-        if (e->postfilter)
-            postfilter(e, tmp, ilpc, samples, pitch_lag,
-                       &postfilter_coeffs[e->bitrate], subframe_size);
-
-        samples += subframe_size;
-    }
-
-    if (error_flag) {
-erasure:
-        error_flag = 1;
-        av_log(avctx, AV_LOG_WARNING, "frame erasure\n");
-        frame_erasure(e, samples);
-    }
-
-    memcpy(e->prev_lspf, e->lspf, sizeof(e->prev_lspf));
-    e->prev_error_flag    = error_flag;
-    e->last_valid_bitrate = e->bitrate;
-
-    if (e->bitrate != RATE_QUANT)
-        e->prev_pitch_delay = e->pitch_delay;
-
-    samples = (float *)frame->data[0];
-    for (i = 0; i < 160; i++)
-        samples[i] /= 32768;
-
-    *got_frame_ptr   = 1;
-
-    return avpkt->size;
-}
-
-#define OFFSET(x) offsetof(EVRCContext, x)
-#define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM
-
-static const AVOption options[] = {
-    { "postfilter", "enable postfilter", OFFSET(postfilter), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, AD },
-    { NULL }
-};
-
-static const AVClass evrcdec_class = {
-    .class_name = "evrc",
-    .item_name  = av_default_item_name,
-    .option     = options,
-    .version    = LIBAVUTIL_VERSION_INT,
-};
-
-const FFCodec ff_evrc_decoder = {
-    .p.name         = "evrc",
-    CODEC_LONG_NAME("EVRC (Enhanced Variable Rate Codec)"),
-    .p.type         = AVMEDIA_TYPE_AUDIO,
-    .p.id           = AV_CODEC_ID_EVRC,
-    .init           = evrc_decode_init,
-    FF_CODEC_DECODE_CB(evrc_decode_frame),
-    .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
-    .priv_data_size = sizeof(EVRCContext),
-    .p.priv_class   = &evrcdec_class,
-};
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Face Studio Baku The Best Beauty Salon Network in Azerbaijan.md b/spaces/congsaPfin/Manga-OCR/logs/Face Studio Baku The Best Beauty Salon Network in Azerbaijan.md
deleted file mode 100644
index fb1f6a79129bd6b67e9e850733b007ffdb0c243a..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Face Studio Baku The Best Beauty Salon Network in Azerbaijan.md	
+++ /dev/null
@@ -1,154 +0,0 @@
-<br />
-<h1>Face Studio Baku: The King of Beauty Salons in Azerbaijan</h1>
-<p>If you are looking for a professional, reliable, and affordable beauty salon in Azerbaijan, you should definitely check out Face Studio Baku. Face Studio Baku is the leading network of beauty salons in Baku, offering a wide range of services and products for your hair, skin, nails, and body. Whether you need a haircut, a facial, a manicure, or a massage, you can find it at Face Studio Baku. In this article, we will tell you everything you need to know about Face Studio Baku, including its history, mission, services, products, locations, contacts, benefits, testimonials, awards, booking system, payment methods, discounts, cancellation policy, and customer service. Read on to discover why Face Studio Baku is the king of beauty salons in Azerbaijan.</p>
-<h2>face studio baku</h2><br /><p><b><b>DOWNLOAD</b> &#10001; <a href="https://urlca.com/2uO4Y7">https://urlca.com/2uO4Y7</a></b></p><br /><br />
- <h2>What is Face Studio Baku?</h2>
-<p>Face Studio Baku is a network of beauty salons that was founded in 2008 by Elchin Mammadov, a visionary entrepreneur who wanted to create a modern and innovative salon concept in Azerbaijan. His goal was to provide high-quality beauty services and products at affordable prices, using the latest technologies and techniques in the industry. He also wanted to create a friendly and comfortable atmosphere for his customers, where they could relax and enjoy their time.</p>
- <h3>The history and mission of Face Studio Baku</h3>
-<p>Since its inception, Face Studio Baku has grown rapidly and expanded its network to 12 salons across the city. Each salon has its own unique design and style, reflecting the diverse tastes and preferences of its customers. However, all salons share the same vision and mission: to make every customer feel beautiful, confident, and satisfied. Face Studio Baku strives to exceed the expectations of its customers by providing them with personalized and professional services and products that suit their needs and desires.</p>
- <h3>The services and products offered by Face Studio Baku</h3>
-<p>Face Studio Baku offers a wide range of services and products for your hair, skin, nails, and body. You can choose from various options such as:</p>
-<ul>
-<li>Haircuts, styling, coloring, extensions, treatments, keratin, botox, etc.</li>
-<li>Facials, peeling, microdermabrasion, mesotherapy, microneedling, etc.</li>
-<li>Manicures, pedicures, gel polish, nail art, extensions, etc.</li>
-<li>Waxing, threading, sugaring, laser hair removal, etc.</li>
-<li>Massages, body wraps, scrubs, slimming programs, etc.</li>
-<li>Makeup, eyelash extensions, brow shaping and tinting,</li>
-</ul>
-<p>All services and products are performed by qualified and experienced staff who use only the best brands and equipment in the market. Some of the brands that Face Studio Baku works with are L'Oréal Professionnel, Kérastase, Schwarzkopf Professional, OPI, CND, Dermalogica, Guinot, and many more.</p>
-<p>face studio baku salon<br />
-face studio baku facebook<br />
-face studio baku narimanov<br />
-face studio baku prices<br />
-face studio baku reviews<br />
-face studio baku hair<br />
-face studio baku makeup<br />
-face studio baku nails<br />
-face studio baku spa<br />
-face studio baku man<br />
-face studio baku address<br />
-face studio baku contact<br />
-face studio baku online<br />
-face studio baku instagram<br />
-face studio baku website<br />
-face studio baku services<br />
-face studio baku booking<br />
-face studio baku location<br />
-face studio baku offers<br />
-face studio baku careers<br />
-face studio baku courses<br />
-face studio baku products<br />
-face studio baku gallery<br />
-face studio baku team<br />
-face studio baku events<br />
-face studio baku gift card<br />
-face studio baku blog<br />
-face studio baku video<br />
-face studio baku testimonials<br />
-face studio baku promotions<br />
-face studio baku bridal<br />
-face studio baku eyelash extensions<br />
-face studio baku waxing<br />
-face studio baku massage<br />
-face studio baku facial treatments<br />
-face studio baku microblading<br />
-face studio baku laser hair removal<br />
-face studio baku botox<br />
-face studio baku fillers<br />
-face studio baku skin care<br />
-face studio baku acne treatment<br />
-face studio baku anti aging<br />
-face studio baku peeling<br />
-face studio baku microneedling<br />
-face studio baku hydrafacial <br />
-face studio baku prp therapy <br />
-face studio baku mesotherapy <br />
-face studio baku dermaplaning <br />
-face studio baku bb glow</p>
- <h3>The locations and contacts of Face Studio Baku</h3>
-<p>Face Studio Baku has 12 salons in different areas of the city. You can find them at:</p>
-<table>
-<tr><th>Salon</th><th>Address</th><th>Phone</th></tr>
-<tr><td>Xatai</td><td>Xətai prospekti 55</td><td>+ (994 ) 12 34 56</td></tr>
-<tr><td>Nizami</td><td>Nizami küçəsi 203</td><td>+ (994) 12 34 78</td></tr>
-<tr><td>Neftchilar</td><td>Neftçilər prospekti 76</td><td>+ (994) 12 34 90</td></tr>
-<tr><td>Bulbul</td><td>Bülbül prospekti 22</td><td>+ (994) 12 35 12</td></tr>
-<tr><td>Badamdar</td><td>Bədəmdar şosesi 2</td><td>+ (994) 12 35 34</td></tr>
-<tr><td>Elmler</td><td>Elmlər Akademiyası metro stansiyası yaxınlığında</td><td>+ (994) 12 35 56</td></tr>
-<tr><td>Ganjlik</td><td>Gənclik Mall, 3-cü mərtəbə</td><td>+ (994) 12 35 78</td></tr>
-<tr><td>Port Baku</td><td>Port Baku Mall, -1-ci mərtəbə</td><td>+ (994) 12 35 90</td></tr>
-<tr><td>Metro Park</td><td>Metro Park, -1-ci mərtəbə</td><td>+ (994) 12 36 12</td></tr>
-<tr><td>Amburan</td><td>Amburan Mall, -1-ci mərtəbə</td><td>+ (994) 12 36 34</td></tr>
-<tr><td>Baku Mall</td><td>Baku Mall, -1-ci mərtəbə</td><td>+ (994) 12 36 56</td></tr>
-<tr><td>28 Mall</td><td>28 Mall, -1-ci mərtəbə</td><td>+ (994) 12 36 78</td></tr>
-</table>
-<p>You can also visit their website at <a href="">www.facestudio.az</a>, where you can find more information about their salons, services, products, prices, and promotions. You can also follow them on social media platforms such as Facebook, Instagram, and YouTube, where they post regular updates, tips, and offers.</p>
- <h2>Why choose Face Studio Baku?</h2>
-<p>There are many reasons why you should choose Face Studio Baku as your preferred beauty salon in Azerbaijan. Here are some of them:</p>
- <h3>The benefits and advantages of Face Studio Baku</h3>
-<p>When you visit Face Studio Baku, you can enjoy the following benefits and advantages:</p>
-<ul>
-<li>You can get access to a wide range of services and products for your hair, skin, nails, and body, all under one roof.</li>
-<li>You can get high-quality services and products at affordable prices, with no hidden fees or extra charges.</li>
-<li>You can get personalized and professional advice from their staff, who will help you choose the best options for your needs and desires.</li>
-<li>You can get the latest technologies and techniques in the beauty industry, such as laser hair removal, microneedling, keratin treatment, etc.</li>
-<li>You can get a friendly and comfortable atmosphere, where you can relax and enjoy your time.</li>
-<li>You can get loyalty cards and gift vouchers that you can use for yourself or your loved ones.</li>
-<li>You can get free consultations and trials before you decide to book a service or buy a product.</li>
-<li>You can get free Wi-Fi and refreshments while you wait for your appointment or service.</li>
-<li>You can get flexible working hours and convenient locations that suit your schedule and preferences.</li>
-<li>You can get a satisfaction guarantee that ensures that you will be happy with the results or get a refund or a redo.</li>
-</ul>
- <h3>The testimonials and reviews of Face Studio Baku customers</h3>
-<p>Don't just take our word for it. Listen to what some of the customers of Face Studio Baku have to say about their experience:</p>
- <blockquote>"I love Face Studio Baku! They always do a great job with my hair and nails. The staff are very friendly and professional. The prices are reasonable and the quality is excellent. I highly recommend them to anyone who wants to look good and feel good." - Leyla Mammadova, customer since 2010.</blockquote>
- <blockquote>"Face Studio Baku is the best beauty salon in Baku. I have tried many of their services and products and I am always impressed by the results. They have the best equipment and the best staff in the city. They are very attentive and helpful. They always make me feel welcome and pampered." - Nigar Aliyeva, customer since 2012.</blockquote>
- <blockquote>"I have been a loyal customer of Face Studio Baku for over 5 years. They are the only salon that I trust with my beauty needs. They have everything I need, from haircuts to massages. They are very professional and knowledgeable. They always give me the best advice and the best service. They are simply amazing." - Elvin Hasanov, customer since 2016.</blockquote>
- <h3>The awards and recognitions of Face Studio Baku</h3>
-<p>Face Studio Baku is not only loved by its customers, but also by the industry and the media. It has received many awards and recognitions for its excellence and innovation in the beauty sector. Some of them are:</p>
-<ul>
-<li>The Best Beauty Salon Award by Baku Style Magazine in 2019.</li>
-<li>The Best Customer Service Award by Baku Business Journal in 2020.</li>
-<li>The Best Salon Network Award by Baku Beauty Expo in 2021.</li>
-<li>The Best Salon Brand Award by Baku Fashion Week in 2022.</li>
-<li>The Best Salon of the Year Award by Baku Lifestyle Magazine in 2023.</li>
-</ul>
- <h2>How to book an appointment with Face Studio Baku?</h2>
-<p>If you are interested in booking an appointment with Face Studio Baku, you have several options to do so. Here are some of them:</p>
- <h3>The online booking system of Face Studio Baku</h3>
-<p>The easiest and fastest way to book an appointment with Face Studio Baku is to use their online booking system. You can access it through their website or their mobile app. You just need to follow these simple steps:</p>
-<ol>
-<li>Select the salon, the service, the date, and the time that you prefer.</li>
-<li>Enter your personal details and your contact information.</li>
-<li>Confirm your booking and receive a confirmation email or SMS.</li>
-</ol>
-<p>You can also modify or cancel your booking online if you need to.</p>
- <h3>The payment methods and discounts of Face Studio Baku</h3>
-<p>When you book an appointment with Face Studio Baku, you can choose to pay online or at the salon. You can use various payment methods such as credit cards, debit cards, PayPal, or cash. You can also enjoy various discounts and offers that Face Studio Baku provides for its customers, such as:</p>
-<ul>
-<li>A 10% discount for first-time customers.</li>
-<li>A 15% discount for students and seniors.</li>
-<li>A 20% discount for referrals and loyalty card holders.</li>
-<li>A 25% discount for birthday celebrants and anniversary celebrants.</li>
-<li>A 30% discount for special occasions and holidays.</li>
-</ul>
- <h3>The cancellation policy and customer service of Face Studio Baku</h3>
-<p>If you need to cancel your appointment with Face Studio Baku, you can do so without any penalty if you notify them at least 24 hours before your scheduled time. If you cancel within 24 hours, you may be charged a cancellation fee of 10% of your total bill. If you do not show up for your appointment, you may be charged a no-show fee of 20% of your total bill.</p>
- <p>If you have any questions, complaints, or suggestions about your booking or your service with Face Studio Baku, you can contact their customer service team at any time. You can call them at + (994) 12 37 00 or email them at info@facestudio.az. They will be happy to assist you and resolve any issues that you may have.</p>
- <h2>Conclusion</h2>
-<p>Face Studio Baku is the king of beauty salons in Azerbaijan, offering a wide range of services and products for your hair, skin, nails, and body. It has a network of 12 salons across the city, each with its own unique design and style. It has a team of qualified and experienced staff who use only the best brands and equipment in the market. It has a loyal customer base who love its quality, affordability, professionalism, and friendliness. It has a reputation for excellence and innovation in the beauty industry, earning many awards and recognitions from the media and the public. It has an easy and convenient online booking system that allows you to choose your preferred salon, service, date, and time. It has various payment methods and discounts that make it even more attractive and accessible. It has a fair and flexible cancellation policy that respects your time and money. It has a responsive and helpful customer service team that listens to your feedback and solves your problems. If you are looking for a beauty salon that can make you look and feel your best, you should definitely choose Face Studio Baku. You will not regret it. Book your appointment today and see for yourself why Face Studio Baku is the king of beauty salons in Azerbaijan.</p>
- <h2>FAQs</h2>
-<p>Here are some of the frequently asked questions about Face Studio Baku:</p>
- <h3>Q: How can I find the nearest Face Studio Baku salon to me?</h3>
-<p>A: You can use the salon locator tool on their website or their mobile app. You can also call their customer service team and ask them for directions.</p>
- <h3>Q: How can I get a gift voucher from Face Studio Baku?</h3>
-<p>A: You can buy a gift voucher from any of their salons or online. You can choose the amount and the service that you want to give as a gift. You can also personalize the voucher with a message and a design.</p>
- <h3>Q: How can I join the loyalty program of Face Studio Baku?</h3>
-<p>A: You can join the loyalty program by filling out a form at any of their salons or online. You will receive a loyalty card that you can use to collect points every time you visit Face Studio Baku. You can redeem your points for discounts and free services.</p>
- <h3>Q: How can I leave a review or a suggestion for Face Studio Baku?</h3>
-<p>A: You can leave a review or a suggestion on their website, their mobile app, their social media pages, or their customer service email. They appreciate your feedback and they will use it to improve their services and products.</p>
- <h3>Q: How can I contact Face Studio Baku if I have a complaint or a problem?</h3>
-<p>A: You can contact Face Studio Baku by calling their customer service team at + (994) 12 37 00 or emailing them at info@facestudio.az. They will respond to your complaint or problem as soon as possible and try to resolve it to your satisfaction.</p> 197e85843d<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/JCB Mod for Bus Simulator Indonesia - How to Drive the Excavator and Crane with Smooth Controls and Sound Effects.md b/spaces/congsaPfin/Manga-OCR/logs/JCB Mod for Bus Simulator Indonesia - How to Drive the Excavator and Crane with Smooth Controls and Sound Effects.md
deleted file mode 100644
index 680636a5ae687b03e83d7b6dfd99c2ff83ee96d3..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/JCB Mod for Bus Simulator Indonesia - How to Drive the Excavator and Crane with Smooth Controls and Sound Effects.md	
+++ /dev/null
@@ -1,122 +0,0 @@
-<br />
-<h1>How to Download JCB Mod for Bus Simulator Indonesia</h1>
-<p>Bus Simulator Indonesia is a popular mobile game that lets you experience what it likes being a bus driver in Indonesia in a fun and authentic way. But did you know that you can also download and play with different mods that can add more variety, realism, and fun to your bus driving simulation? One of the most popular mods for Bus Simulator Indonesia is the JCB Mod, which allows you to drive a JCB backhoe loader instead of a bus. In this article, we will show you how to download JCB Mod for Bus Simulator Indonesia, what are its features, and how to play with it.</p>
-<h2>download jcb mod for bus simulator indonesia</h2><br /><p><b><b>Download Zip</b> ->>> <a href="https://urlca.com/2uO7pO">https://urlca.com/2uO7pO</a></b></p><br /><br />
- <h2>What is Bus Simulator Indonesia?</h2>
-<p>Bus Simulator Indonesia (aka BUSSID) is a mobile game developed by Maleo that simulates bus driving in Indonesia. The game features realistic and detailed 3D graphics, authentic Indonesian cities and places, Indonesian buses, cool and fun honks, online multiplayer convoy, vehicle mod system, and more.</p>
- <h3>Features of the game</h3>
-<p>Some of the top features of Bus Simulator Indonesia are:</p>
-<ul>
-<li>Simple and intuitive controls to help you get started</li>
-<li>Design your own livery for your bus</li>
-<li>Visit the country through many authentic cities and places</li>
-<li>Legit Indonesian buses with realistic and in-depth designs</li>
-<li>Stunning maps with accurate designs and realistic elements</li>
-<li>Cool, interesting, and iconic honks</li>
-<li>No obstructive ads while driving</li>
-<li>Leaderboard and data saved online</li>
-<li>Use your own 3D model using vehicle mod system</li>
-<li>Online multiplayer convoy</li>
-</ul>
- <h3>Why play with mods?</h3>
-<p>Mods are modifications or additions that can change or enhance your gaming experience. Playing with mods can make your game more interesting, diverse, and enjoyable. You can try different vehicles, scenarios, maps, sounds, accessories, and more with mods. Mods can also make your game more realistic or challenging by adding new features or difficulties.</p>
- <h2>What is JCB Mod?</h2>
-<p>JCB Mod is a modification for Bus Simulator Indonesia that allows you to drive a JCB backhoe loader instead of a bus. JCB is a British company that manufactures heavy equipment, such as excavators, tractors, loaders, and more. JCB Mod is one of the most downloaded and rated mods for Bus Simulator Indonesia, with over 1 million downloads and 4.5 stars rating.</p>
- <h3>Description of the mod</h3>
-<p>JCB Mod is a vehicle mod that replaces the default bus model with a JCB backhoe loader model. The mod has a realistic and detailed design, with accurate dimensions, colors, textures, and animations. The mod also has a custom horn sound, which is the iconic JCB beep. The mod is compatible with Bus Simulator Indonesia version 3.7.1 or higher.</p>
- <h3>How to install the mod</h3>
-<p>To install JCB Mod for Bus Simulator Indonesia, you need to follow these steps:</p>
-<ol>
-<li>Download the JCB Mod file from a trusted website, such as [Sourav Gaming], [MODBUSSID], or [YouTube]. The file should be in .bussidvehicle format.</li>
-<li>Open Bus Simulator Indonesia and go to the Garage menu.</li>
-<li>Select Import and choose the JCB Mod file from your device storage.</li>
-<li>Wait for the import process to finish and then select Use.</li>
-<li>Enjoy driving your JCB backhoe loader!</li>
-</ol>
- <h2>Tips and tricks for playing with JCB Mod</h2>
-<p>Playing with JCB Mod can be a lot of fun, but also challenging. Here are some tips and tricks to help you get the most out of your JCB driving experience:</p>
- <h3>How to use the animations</h3>
-<p>JCB Mod has several animations that you can use to control the backhoe loader. You can access them by tapping on the gear icon on the top right corner of the screen. You can then swipe left or right to see the different animations. Here are some of the animations you can use:</p>
-<p>download jcb backhoe loader mod for bus simulator indonesia<br />
-download jcb excavator with led mod for bus simulator indonesia<br />
-download jcb crane mod for bus simulator indonesia<br />
-download jcb mod for bussid v3.5<br />
-download jcb mod for bussid free<br />
-download jcb mod for bussid zip file<br />
-download jcb mod for bussid sourav gaming<br />
-download jcb mod for bussid gamer chunkz<br />
-download jcb mod for bussid apk<br />
-download jcb mod for bussid android<br />
-how to download jcb mod for bus simulator indonesia<br />
-how to install jcb mod for bus simulator indonesia<br />
-how to use jcb mod for bus simulator indonesia<br />
-best jcb mod for bus simulator indonesia<br />
-new jcb mod for bus simulator indonesia<br />
-latest jcb mod for bus simulator indonesia<br />
-realistic jcb mod for bus simulator indonesia<br />
-hd jcb mod for bus simulator indonesia<br />
-4k jcb mod for bus simulator indonesia<br />
-3d jcb mod for bus simulator indonesia<br />
-active jcb mod for bus simulator indonesia<br />
-smooth jcb mod for bus simulator indonesia<br />
-cool jcb mod for bus simulator indonesia<br />
-amazing jcb mod for bus simulator indonesia<br />
-awesome jcb mod for bus simulator indonesia<br />
-review of jcb mod for bus simulator indonesia<br />
-video of jcb mod for bus simulator indonesia<br />
-gameplay of jcb mod for bus simulator indonesia<br />
-tutorial of jcb mod for bus simulator indonesia<br />
-tips and tricks of jcb mod for bus simulator indonesia<br />
-features of jcb mod for bus simulator indonesia<br />
-animation of jcb mod for bus simulator indonesia<br />
-sound of jcb mod for bus simulator indonesia<br />
-speed of jcb mod for bus simulator indonesia<br />
-size of jcb mod for bus simulator indonesia<br />
-status of jcb mod for bus simulator indonesia<br />
-credits of jcb mod for bus simulator indonesia<br />
-link to download jcb mod for bus simulator indonesia<br />
-website to download jcb mod for bus simulator indonesia<br />
-blog to download jcb mod for bus simulator indonesia<br />
-youtube channel to download jcb mod for bus simulator indonesia<br />
-facebook group to download jcb mod for bus simulator indonesia<br />
-google drive to download jcb mod for bus simulator indonesia<br />
-mediafire to download jcb mod for bus simulator indonesia <br />
-mega to download jcb mod for bus simulator indonesia <br />
-zippyshare to download jcb mod for bus simulator indonesia <br />
-dropbox to download jcb mod for bus simulator indonesia <br />
-direct link to download jcb mod for bus simulator indonesia <br />
-no safelink to download jcb mod for bus simulator indonesia <br />
-no password to download jcb mod for bus simulator indonesia</p>
-<ul>
-<li>Loader up/down: This animation moves the front loader up or down.</li>
-<li>Loader tilt: This animation tilts the front loader forward or backward.</li>
-<li>Backhoe up/down: This animation moves the backhoe arm up or down.</li>
-<li>Backhoe extend/retract: This animation extends or retracts the backhoe arm.</li>
-<li>Backhoe bucket: This animation opens or closes the backhoe bucket.</li>
-<li>Stabilizer: This animation lowers or raises the stabilizer legs on the rear of the vehicle.</li>
-</ul>
- <h3>How to drive safely and realistically</h3>
-<p>JCB Mod is not a typical bus mod, so you need to be careful and realistic when driving it. Here are some tips to help you drive safely and realistically:</p>
-<ul>
-<li>Avoid driving too fast or too slow. The optimal speed for driving a JCB backhoe loader is around 40 km/h.</li>
-<li>Avoid driving on rough terrain or steep slopes. The JCB backhoe loader is not designed for off-road driving, so you might damage it or get stuck.</li>
-<li>Avoid hitting other vehicles or objects. The JCB backhoe loader is a large and heavy vehicle, so you might cause accidents or injuries if you hit something.</li>
-<li>Avoid using the animations while driving. The animations are meant for stationary use only, so using them while driving might cause instability or glitches.</li>
-<li>Avoid carrying passengers or cargo. The JCB backhoe loader is not a passenger or cargo vehicle, so carrying them might be illegal or unsafe.</li>
-</ul>
- <h2>Conclusion</h2>
-<p>JCB Mod is a fun and unique mod for Bus Simulator Indonesia that lets you drive a JCB backhoe loader instead of a bus. You can download JCB Mod from various websites, such as [Sourav Gaming], [MODBUSSID], or [YouTube]. You can install JCB Mod by importing it in Bus Simulator Indonesia and selecting it in the Garage menu. You can use different animations to control the backhoe loader and enjoy driving it in various places. However, you should also be careful and realistic when driving it, as it is not a typical bus mod.</p>
- <p>We hope you enjoyed this article and learned something new about JCB Mod for Bus Simulator Indonesia. If you have any questions, comments, or feedback, please feel free to leave them below. We would love to hear from you!</p>
- <h2>FAQs</h2>
-<h4>Q1. Where can I download JCB Mod for Bus Simulator Indonesia?</h4>
-<h4>A1. You can download JCB Mod from various websites, such as [Sourav Gaming], [MODBUSSID], or [YouTube].</h4>
-<h4>Q2. Is JCB Mod compatible with the latest version of Bus Simulator Indonesia?</h4>
-<h4>A2. Yes, JCB Mod is compatible with Bus Simulator Indonesia version 3.7.1 or higher.</h4>
-<h4>Q3. What are the benefits of playing with JCB Mod?</h4>
-<h4>A3. Playing with JCB Mod can enhance your gaming experience by adding more variety, realism, and fun to your bus driving simulation. You can enjoy driving a different vehicle, using different animations, and exploring different places with JCB Mod.</h4>
-<h4>Q4. How can I customize my JCB Mod?</h4>
-<h4>A4. You can customize your JCB Mod by changing its livery, color, horn, or accessories. You can also use your own 3D model using the vehicle mod system in Bus Simulator Indonesia.</h4>
-<h4>Q5. How can I join an online multiplayer convoy with JCB Mod?</h4>
-<h4>A5. You can join an online multiplayer convoy with JCB Mod by creating or joining a room in Bus Simulator Indonesia. You can invite your friends or other players to join your convoy and enjoy driving together.</h4></p> 197e85843d<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Learn How to Fill Any Shape with Circles in Illustrator.md b/spaces/congsaPfin/Manga-OCR/logs/Learn How to Fill Any Shape with Circles in Illustrator.md
deleted file mode 100644
index b495a6cd98fcbb0a2d9d0ae6b550ee1590ea3c03..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Learn How to Fill Any Shape with Circles in Illustrator.md	
+++ /dev/null
@@ -1,161 +0,0 @@
-
-<h1>How to Fill Objects with Circles in Adobe Illustrator</h1>
-<p>Adobe Illustrator is a powerful vector graphics software that allows you to create stunning artworks with shapes, paths, gradients, effects, and more. However, sometimes you may want to add some extra flair to your designs by filling them with circles of different sizes and colors. This can create a unique and eye-catching effect that can be used for logos, backgrounds, patterns, illustrations, and more.</p>
-<h2>circle fill script illustrator download</h2><br /><p><b><b>Download File</b> &#10042; <a href="https://urlca.com/2uO5AQ">https://urlca.com/2uO5AQ</a></b></p><br /><br />
-<p>But how can you fill an object with circles in Adobe Illustrator? You could try to manually draw and arrange each circle, but that would be very time-consuming and tedious. Or you could use a handy script called <strong>Circle Fill</strong> that does the job for you in seconds. In this article, we will show you what Circle Fill script is, how to download and install it, how to use it in Adobe Illustrator, and some tips and tricks for getting the most out of it.</p>
- <h2>What is Circle Fill Script and Why You Need It</h2>
-<p>Circle Fill script is a free and useful Adobe Illustrator script that fills outline shapes with packed circles. It was created by Hiroyuki Sato, a Japanese programmer who has developed many other scripts for Adobe Illustrator. You can find his website here: (https://shspage.com/aijs/en/).</p>
-<p>Circle Fill script has many features and benefits that make it a must-have tool for any Illustrator user who wants to create circle-filled artworks. Here are some of them:</p>
-<ul>
-<li>It works with any outline shape, such as rectangles, ellipses, polygons, stars, text, etc.</li>
-<li>It automatically adjusts the size and number of circles to fit the shape.</li>
-<li>It allows you to specify the minimum and maximum radius of the circles.</li>
-<li>It allows you to specify the gap between the circles.</li>
-<li>It allows you to apply random colors to the circles from your swatches panel.</li>
-<li>It creates a separate group for each circle-filled shape, so you can easily edit or move them later.</li>
-</ul>
-<p>To use Circle Fill script, you need to download and install it first. Here are the steps:</p>
- <h3>How to Download and Install Circle Fill Script</h3>
-<ol>
-<li>Go to this link: (https://assets.adobe.com/public/7cf1d...).</li>
-<li>Right-click on the file name <em>CircleFill.jsx</em> and choose <em>Save Link As...</em>.</li>
-<li>Save the file to your hard drive, preferably in the Scripts folder of your Adobe Illustrator installation. For example, on Windows it could be <em>C:\Program Files\Adobe\Adobe Illustrator CC 2020\Presets\en_US\Scripts</em>.</li>
-<li>Restart Adobe Illustrator if it was already running.</li>
-</ol>
- <h2>How to Use Circle Fill Script in Adobe Illustrator</h2>
-<p>Now that you have installed Circle Fill script, you are ready to use it in Adobe Illustrator. Here are the steps:</p>
- <h3>How to Select an Object and Run the Script</h3>
-<ol>
-<li>Create or open an outline shape that you want to fill with circles. For example, you can use the <em>Type Tool</em> (T) to type some text and then go to <em>Type > Create Outlines</em>.</li>
-<li>Select the shape with the <em>Selection Tool</em> (V).</li>
-<li>Go to <em>File > Scripts > Other Scripts...</em>.</li>
-<li>Navigate to the folder where you <li>Navigate to the folder where you saved the <em>CircleFill.jsx</em> file and select it.</li>
-<li>Click <em>Open</em> to run the script.</li>
-</ol>
- <h3>How to Adjust the Parameters of the Script</h3>
-<p>After you run the script, a dialog box will appear with some options that you can adjust to customize the circle filling effect. Here are the options and what they do:</p>
-<p>How to use circle fill script in Adobe Illustrator<br />
-Circle fill script for Adobe Illustrator tutorial<br />
-Adobe Illustrator circle fill script free download<br />
-Circle fill script Adobe Illustrator CC 2020<br />
-Best circle fill script for Adobe Illustrator<br />
-Circle fill script illustrator download link<br />
-Circle fill script illustrator alternative<br />
-Circle fill script illustrator review<br />
-Circle fill script illustrator demo<br />
-Circle fill script illustrator examples<br />
-Circle fill script illustrator tips and tricks<br />
-Circle fill script illustrator compatibility<br />
-Circle fill script illustrator installation guide<br />
-Circle fill script illustrator license<br />
-Circle fill script illustrator features<br />
-Circle fill script illustrator benefits<br />
-Circle fill script illustrator drawbacks<br />
-Circle fill script illustrator troubleshooting<br />
-Circle fill script illustrator support<br />
-Circle fill script illustrator feedback<br />
-Circle fill script illustrator update<br />
-Circle fill script illustrator vs other scripts<br />
-Circle fill script illustrator use cases<br />
-Circle fill script illustrator testimonials<br />
-Circle fill script illustrator FAQs<br />
-How to create circle patterns with circle fill script illustrator<br />
-How to color circles randomly with circle fill script illustrator<br />
-How to adjust circle size and spacing with circle fill script illustrator<br />
-How to apply circle fill script to any shape in illustrator<br />
-How to make circle logos with circle fill script illustrator<br />
-How to design circle icons with circle fill script illustrator<br />
-How to create circle infographics with circle fill script illustrator<br />
-How to make circle backgrounds with circle fill script illustrator<br />
-How to create circle art with circle fill script illustrator<br />
-How to make circle mandalas with circle fill script illustrator<br />
-How to create circle typography with circle fill script illustrator<br />
-How to make circle stickers with circle fill script illustrator<br />
-How to create circle animations with circle fill script illustrator<br />
-How to make circle posters with circle fill script illustrator<br />
-How to create circle flyers with circle fill script illustrator<br />
-How to make circle badges with circle fill script illustrator<br />
-How to create circle banners with circle fill script illustrator<br />
-How to make circle labels with circle fill script illustrator<br />
-How to create circle stamps with circle fill script illustrator<br />
-How to make circle buttons with circle fill script illustrator<br />
-How to create circle stickers with circle fill script illustrator</p>
-<ul>
-<li><strong>Min Radius</strong>: This is the minimum radius of the circles in pixels. You can enter any value between 0 and 100. The smaller the value, the more circles will be generated.</li>
-<li><strong>Max Radius</strong>: This is the maximum radius of the circles in pixels. You can enter any value between 0 and 100. The larger the value, the fewer circles will be generated.</li>
-<li><strong>Gap</strong>: This is the gap between the circles in pixels. You can enter any value between 0 and 100. The larger the value, the more space will be left between the circles.</li>
-<li><strong>Random Color</strong>: This is a checkbox that allows you to apply random colors to the circles from your swatches panel. If you check this option, each circle will have a different color from your swatches. If you uncheck this option, all circles will have the same color as your fill color.</li>
-</ul>
-<p>You can experiment with different values and see how they affect the result. You can also click <em>Preview</em> to see a preview of the circle filling effect before applying it. When you are happy with the settings, click <em>OK</em> to apply them.</p>
- <h3>How to Apply Random Colors to the Circles</h3>
-<p>If you want to apply random colors to the circles, you need to have some colors in your swatches panel first. You can use the default colors that come with Adobe Illustrator, or you can create your own custom colors and add them to your swatches panel. Here are some ways to do that:</p>
-<ul>
-<li>You can use the <em>Color Picker</em> tool (I) to pick a color from an image or another object on your artboard.</li>
-<li>You can use the <em>Color Panel</em> (Window > Color) to mix a color using sliders or numeric values.</li>
-<li>You can use the <em>Color Guide Panel</em> (Window > Color Guide) to find harmonious colors based on a base color or a color rule.</li>
-<li>You can use the <em>Swatch Libraries Menu</em> (Window > Swatch Libraries) to access various preset color libraries, such as Pantone, Web Safe, Gradients, Patterns, etc.</li>
-<li>You can use the <em>Edit Colors/Recolor Artwork</em> dialog box (Edit > Edit Colors > Recolor Artwork) to edit or recolor multiple colors at once.</li>
-</ul>
-<p>To add a color to your swatches panel, simply click on an empty swatch or drag and drop a color from another panel or object onto an empty swatch. You can also rename or delete your swatches by double-clicking on them or using the options menu in the swatches panel.</p>
- <h2>Tips and Tricks for Using Circle Fill Script</h2>
-<p>Circle Fill script is a versatile and fun tool that can help you create amazing circle-filled artworks in Adobe Illustrator. Here are some tips and tricks for using it effectively:</p>
- <h3>How to Create Different Shapes with Circles</h3>
-<p>You can use Circle Fill script to fill any outline shape with circles, but you can also use it to create different shapes with circles. For example, you can use it to create circles within circles, concentric circles, spiral circles, etc. Here are some steps:</p>
-<ol>
-<li>Create a circle with the <em>Ellipse Tool</em> (L).</li>
-<li>Select it and run Circle Fill script with your desired settings.</li>
-<li>Select one of the circles inside the original circle and run Circle Fill script again with different settings.</li>
-<li>Repeat this process until you get the desired effect.</li>
-</ol>
- <h3>How to Combine Circle Fill Script with Other Scripts</h3>
-<p>You can also combine Circle Fill script with other scripts to create more complex and interesting effects. For example, you can use it with scripts that transform, distort, rotate, duplicate, or animate objects. Here are some examples of scripts that you can use with Circle Fill script:</p>
-<ul>
-<li><strong>Roughen.jsx</strong>: This script applies a roughen effect to selected objects, making them look more organic and hand-drawn. You can find it here: [^ <li><strong>Roughen.jsx</strong>: This script applies a roughen effect to selected objects, making them look more organic and hand-drawn. You can find it here: (https://design.tutsplus.com/articles/20-free-and-useful-adobe-illustrator-scripts--vector-3849).</li>
-<li><strong>TransformEach.jsx</strong>: This script transforms each object in the selection individually, allowing you to scale, rotate, move, or shear them. You can find it here: (https://github.com/Silly-V/Adobe-Illustrator/blob/master/Transform%20Each/TransformEach.jsx).</li>
-<li><strong>Random Rotate.jsx</strong>: This script rotates each object in the selection randomly within a specified angle range. You can find it here: (https://github.com/Silly-V/Adobe-Illustrator/blob/master/Random%20Rotate/RandomRotate.jsx).</li>
-<li><strong>Random Swatches Fill.jsx</strong>: This script fills each object in the selection with a random swatch color. You can find it here: (https://github.com/Silly-V/Adobe-Illustrator/blob/master/Random%20Swatches%20Fill/RandomSwatchesFill.jsx).</li>
-<li><strong>Circle Animation.jsx</strong>: This script creates an animation of circles growing and shrinking. You can find it here: (https://github.com/Silly-V/Adobe-Illustrator/blob/master/Circle%20Animation/CircleAnimation.jsx).</li>
-</ul>
-<p>To use these scripts with Circle Fill script, you need to run them after you have filled your objects with circles. For example, you can use Roughen.jsx to make the circles look more natural, or TransformEach.jsx to scale or rotate them individually. You can also use Random Swatches Fill.jsx to change the colors of the circles, or Circle Animation.jsx to create a dynamic effect.</p>
- <h3>How to Save and Export Your Circle Filled Artwork</h3>
-<p>After you have created your circle filled artwork, you may want to save or export it for different purposes. For example, you may want to save it as an Illustrator file for further editing, or export it as a PNG or JPEG file for web or print use. Here are some steps:</p>
-<ol>
-<li>To save your artwork as an Illustrator file, go to <em>File > Save As...</em> and choose <em>Adobe Illustrator (*.AI)</em> as the format. You can also choose other options such as version compatibility, compression, and PDF compatibility.</li>
-<li>To export your artwork as a PNG or JPEG file, go to <em>File > Export > Export As...</em> and choose <em>PNG (*.PNG)</em> or <em>JPEG (*.JPG)</em> as the format. You can also choose other options such as resolution, quality, background color, and anti-aliasing.</li>
-</ol>
- <h2>Conclusion and FAQs</h2>
-<p>In this article, we have shown you how to fill objects with circles in Adobe Illustrator using Circle Fill script. We have also shown you how to download and install the script, how to adjust the parameters of the script, how to apply random colors to the circles, how to create different shapes with circles, how to combine Circle Fill script with other scripts, and how to save and export your circle filled artwork. We hope you have enjoyed this tutorial and learned something new and useful.</p>
-<p>If you have any questions or comments about Circle Fill script or this article, feel free to leave them below. We will try to answer them as soon as possible. Here are some FAQs that may help you:</p>
- <h4>Q: Where can I find more scripts for Adobe Illustrator?</h4>
-<p>A: There are many websites and blogs that offer free and useful scripts for Adobe Illustrator. Some of them are:</p>
-<ul>
-<li>[Envato Tuts+](https://design.tutsplus.com/categories/adobe-illustrator-scripts)</li>
-<li>[Hiroyuki Sato's website](https://shspage.com/aijs/en/)</li>
-<li>[Silly-V's GitHub repository](https://github.com/Silly-V/Adobe-Illustrator)</li>
-<li>[Vectorboom](http://vectorboom.com/load/freebies/freescripts/3-1-0-14)</li>
-<li>[CreativePro](https://creativepro.com/tag/scripts/)</li>
-</ul>
- <h4>Q: How can I create my own scripts for Adobe Illustrator?</h4>
-<p>A: If you have some programming skills and knowledge of JavaScript, AppleScript, or Visual Basic, you can create your own scripts for Adobe Illustrator using the Script Editor application that comes with your operating system. You can also use a text editor such as Notepad or Sublime Text to write your scripts. You can find more information and tutorials on how to create scripts for Adobe Illustrator here: (https://www.adobe.com/devnet/illustrator/scripting.html).</p>
- <h4>Q: How can I share my circle filled artwork with others?</h4>
-<p>A: There are many ways to share your circle filled artwork with others, such as:</p>
-<ul>
-<li>Posting it on social media platforms, such as Facebook, Twitter, Instagram, Pinterest, etc.</li>
-<li>Uploading it to online galleries or portfolios, such as Behance, Dribbble, DeviantArt, etc.</li>
-<li>Sending it via email or messaging apps, such as Gmail, WhatsApp, Telegram, etc.</li>
-<li>Printing it on paper or other materials, such as posters, stickers, cards, etc.</li>
-</ul>
-<p>However, before you share your circle filled artwork with others, make sure you have the permission and the rights to do so. If you have used any images, fonts, or other resources that are not your own or are not free to use, you may need to credit the original authors or obtain their consent. You may also need to respect the terms and conditions of the platforms or services that you use to share your artwork.</p>
- <h4>Q: How can I improve my skills in Adobe Illustrator?</h4>
-<p>A: Adobe Illustrator is a complex and powerful software that requires a lot of practice and learning to master. However, there are many resources and opportunities that can help you improve your skills in Adobe Illustrator, such as:</p>
-<ul>
-<li>Watching online tutorials and courses, such as [Lynda.com](https://www.lynda.com/Illustrator-training-tutorials/227-0.html), [Udemy](https://www.udemy.com/topic/adobe-illustrator/), [Skillshare](https://www.skillshare.com/browse/adobe-illustrator), etc.</li>
-<li>Reading books and magazines, such as [Adobe Illustrator Classroom in a Book](https://www.amazon.com/Adobe-Illustrator-Classroom-Book-release/dp/0136412671), [Illustrator WOW! Book](https://www.amazon.com/Illustrator-WOW-Book-Sharon-Steuer/dp/0134895641), [Computer Arts](https://www.creativebloq.com/computerarts), etc.</li>
-<li>Joining online communities and forums, such as [Adobe Forums](https://community.adobe.com/t5/illustrator/bd-p/illustrator?page=1&sort=latest_replies&filter=all), [Reddit](https://www.reddit.com/r/AdobeIllustrator/), [Stack Exchange](https://graphicdesign.stackexchange.com/questions/tagged/adobe-illustrator), etc.</li>
-<li>Participating in online challenges and contests, such as [Daily Logo Challenge](https://dailylogochallenge.com/), [LogoCore](https://logocore.com/challenge/), [99designs](https://99designs.com/contests), etc.</li>
-<li>Seeking feedback and critique from other designers and experts, such as [Behance Reviews](https://www.behance.net/reviews), [Dribbble Shots](https://dribbble.com/shots/popular/web-design), [LogoLounge](https://www.logolounge.com/), etc.</li>
-</ul>
- <h4>Q: How can I contact you if I have more questions or requests?</h4>
-<p>A: If you have more questions or requests about Circle Fill script or this article, you can contact me by leaving a comment below or sending me an email at <em>contentwriter@example.com</em>. I will be happy to hear from you and assist you with your needs.</p> 197e85843d<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Music Player APK - How to Play All Your Music Files and MVs on Android.md b/spaces/congsaPfin/Manga-OCR/logs/Music Player APK - How to Play All Your Music Files and MVs on Android.md
deleted file mode 100644
index 07f8c87d846822cb82c0d55e4f945d6d9d126208..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Music Player APK - How to Play All Your Music Files and MVs on Android.md	
+++ /dev/null
@@ -1,122 +0,0 @@
-
-<h1>How to Download Music Player APK for Android</h1>
- <p>Do you love listening to music on your Android device? Do you want to enjoy a powerful and beautiful music player app that can play all your music files and enhance your sound quality? If yes, then you should try Music Player APK, a free and easy-to-use app that can transform your music experience. In this article, we will show you what Music Player APK is, what features and benefits it offers, and how to download and install it on your Android device.</p>
-<h2>music player download apk</h2><br /><p><b><b>DOWNLOAD</b> &#128279; <a href="https://urlca.com/2uOdYx">https://urlca.com/2uOdYx</a></b></p><br /><br />
- <h2>What is Music Player APK?</h2>
- <p>Music Player APK is an audio player app that can play all your music and audio files on your Android device. It supports various formats such as MP3, WAV, OGG, FLAC, M4A, etc. It also has a powerful equalizer, bass boost, sound changer, and other sound effects that can improve your sound quality and make your music more enjoyable. You can also customize the background skin of the app according to your preference.</p>
- <h3>Features of Music Player APK</h3>
- <p>Some of the features of Music Player APK are:</p>
- <ul>
-<li>Quick search and browse all your music and audio files by albums, artists, genres, songs, playlists, folders, etc.</li>
-<li>Create and edit your own playlists and add your favorite songs to them.</li>
-<li>Supports various music formats such as MP3, WAV, OGG, FLAC, M4A, etc.</li>
-<li>Powerful equalizer with 10 presets and custom settings.</li>
-<li>Bass boost, virtualizer, reverb, and other sound effects to enhance your sound quality.</li>
-<li>Sound changer with different modes such as normal, classical, dance, flat, folk, heavy metal, hip hop, jazz, pop, rock, etc.</li>
-<li>Customize the background skin of the app with various themes and colors.</li>
-<li>Supports headset control and lock screen control.</li>
-<li>Supports widgets and notification bar control.</li>
-<li>Sleep timer and shake to change song.</li>
-</ul>
- <h3>Benefits of Music Player APK</h3>
- <p>Some of the benefits of Music Player APK are:</p>
- <ul>
-<li>It is free and easy to use.</li>
-<li>It can play all your music and audio files without any hassle.</li>
-<li>It can improve your sound quality and make your music more enjoyable.</li>
-<li>It can customize the appearance of the app according to your preference.</li>
-<li>It can save your battery life by using less resources.</li>
-</ul>
- <h2>How to Download and Install Music Player APK?</h2>
- <p>If you want to download and install Music Player APK on your Android device, you need to follow these steps:</p>
- <h3>Step 1: Find a Reliable Source</h3>
- <p>The first step is to find a reliable source where you can download the APK file of Music Player. You can use a trusted website such as [APKPure](^1^), where you can find the latest version of the app. You can also scan the QR code on the website to download the app directly on your device.</p>
- <h3>Step 2: Enable Unknown Sources</h3>
- <p>The second step is to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may also need to confirm this action by tapping OK or Allow.</p>
- <h3>Step 3: Download the APK File</h3>
- <p>The third step is to download the APK file of Music Player from the website or scan the QR code on your device. The download process may take a few minutes depending on your internet speed and device performance. You can check the progress of the download on the notification bar or the download manager of your device.</p>
-<p>music player app download apk<br />
-music player pro download apk<br />
-music player offline download apk<br />
-music player 3d download apk<br />
-music player with lyrics download apk<br />
-music player for android download apk<br />
-music player hd download apk<br />
-music player online download apk<br />
-music player free download apk<br />
-music player mod download apk<br />
-music player best download apk<br />
-music player equalizer download apk<br />
-music player latest download apk<br />
-music player premium download apk<br />
-music player video download apk<br />
-music player mp3 download apk<br />
-music player bass download apk<br />
-music player editor download apk<br />
-music player downloader download apk<br />
-music player widget download apk<br />
-music player themes download apk<br />
-music player ringtone download apk<br />
-music player folder download apk<br />
-music player playlist download apk<br />
-music player cutter download apk<br />
-music player booster download apk<br />
-music player converter download apk<br />
-music player recorder download apk<br />
-music player mixer download apk<br />
-music player radio download apk<br />
-music player streamer download apk<br />
-music player visualizer download apk<br />
-music player podcast download apk<br />
-music player audiobook download apk<br />
-music player karaoke download apk<br />
-music player dj download apk<br />
-music player guitar download apk<br />
-music player piano download apk<br />
-music player drum download apk<br />
-music player flute download apk<br />
-music player violin download apk<br />
-music player saxophone download apk<br />
-music player harmonica download apk<br />
-music player ukulele download apk<br />
-music player banjo download apk<br />
-music player mandolin download apk<br />
-music player accordion download apk<br />
-music player xylophone download apk<br />
-music player marimba download apk</p>
- <h3>Step 4: Install the APK File</h3>
- <p>The fourth and final step is to install the APK file of Music Player on your device. To do this, locate the downloaded file on your device and tap on it. You may need to grant some permissions to the app by tapping Install or Next. Once the installation is complete, you can tap Open or Done to launch the app or exit the installer.</p>
- <h2>How to Use Music Player APK?</h2>
- <p>Now that you have downloaded and installed Music Player APK on your device, you can start using it to enjoy your music. Here are some tips on how to use the app:</p>
- <h3>How to Browse and Play Music</h3>
- <p>To browse and play music, you can use the tabs at the bottom of the app to access different categories such as Albums, Artists, Genres, Songs, Playlists, and Folders. You can also use the search icon at the top right corner of the app to find any music or audio file by name. To play a song, simply tap on it and it will start playing. You can use the controls at the bottom of the app to pause, resume, skip, or repeat the song. You can also swipe left or right on the album art to change the song.</p>
- <h3>How to Adjust the Equalizer and Sound Effects</h3>
- <p>To adjust the equalizer and sound effects, you can tap on the equalizer icon at the top right corner of the app. This will open a menu where you can choose from 10 presets such as Normal, Classical, Dance, Flat, Folk, Heavy Metal, Hip Hop, Jazz, Pop, and Rock. You can also customize your own settings by dragging the sliders for each frequency band. You can also enable or disable other sound effects such as Bass Boost, Virtualizer, and Reverb by tapping on their icons.</p>
- <h3>How to Customize the Background Skin</h3>
- <p>To customize the background skin of the app, you can tap on the menu icon at the top left corner of the app and select Settings. Then, you can tap on Background Skin and choose from various themes and colors. You can also select Custom Background and choose an image from your gallery or camera to use as your background skin.</p>
- <h2>Conclusion</h2>
- <p>Music Player APK is a great app for music lovers who want to enjoy a powerful and beautiful music player app that can play all their music and audio files and enhance their sound quality. It is free and easy to use and offers various features and benefits such as quick search and browse, powerful equalizer and sound effects, customizable background skin, headset and lock screen control, widgets and notification bar control, sleep timer and shake to change song. If you want to download Music Player APK for Android, you can follow these steps:</p>
- <ol>
-<li>Find a reliable source where you can download the APK file of Music Player.</li>
-<li>Enable unknown sources on your device.</li>
-<li>Download the APK file of Music Player from the website or scan the QR code.</li>
-<li>Install the APK file of Music Player on your device.</li>
-</ol>
- <p>Once you have installed Music Player APK on your device, you can start using it to enjoy your music. You can browse and play music by albums, artists, genres, songs, playlists, folders, etc. You can adjust the equalizer and sound effects according to your preference. You can customize the background skin of the app with various themes and colors or your own image. You can also use other features such as headset and lock screen control, widgets and notification bar control, sleep timer and shake to change song.</p>
- <p>We hope this article has helped you learn how to download Music Player APK for Android and how to use it. If you have any questions or feedback, please feel free to leave a comment below.</p>
- <h2>FAQs</h2>
- <p>Here are some frequently asked questions about Music Player APK:</p>
- <ol>
-<li>Is Music Player APK safe to download and install?</li>
-<p>Yes, Music Player APK is safe to download and install as long as you use a trusted source such as [APKPure]. However, you should always be careful when downloading apps from unknown sources as they may contain malware or viruses that can harm your device.</p>
-<li>Does Music Player APK require any permissions?</li>
-<p>Yes, Music Player APK requires some permissions such as access to storage, microphone, phone calls, location, and internet. These permissions are necessary for the app to function properly and access your music and audio files, record audio, make phone calls, show your location, and connect to the internet. You can grant or deny these permissions when you install the app or later in the settings of your device.</p>
- <li>Does Music Player APK support online streaming?</li>
-<p>No, Music Player APK does not support online streaming. It can only play music and audio files that are stored on your device or external storage. If you want to stream music online, you can use other apps such as Spotify, YouTube Music, SoundCloud, etc.</p>
- <li>Does Music Player APK support lyrics display?</li>
-<p>No, Music Player APK does not support lyrics display. It can only show the album art, song title, artist name, and duration of the song. If you want to see the lyrics of the song, you can use other apps such as Musixmatch, Genius, Lyrics Mania, etc.</p>
- <li>Does Music Player APK have ads?</li>
-<p>No, Music Player APK does not have ads. It is a completely ad-free app that does not interrupt your music experience with annoying ads or pop-ups. You can enjoy your music without any distractions.</p>
-</ol></p> 197e85843d<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/Download Percy Jackson Sea of Monsters Dual Audio Hindi 720p 156 The Best Quality for the Best Price.md b/spaces/contluForse/HuggingGPT/assets/Download Percy Jackson Sea of Monsters Dual Audio Hindi 720p 156 The Best Quality for the Best Price.md
deleted file mode 100644
index e37ee98b1f75874ab283fa7aac33fd1e3644237d..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Download Percy Jackson Sea of Monsters Dual Audio Hindi 720p 156 The Best Quality for the Best Price.md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>percy jackson sea of monsters dual audio hindi 720p download 156</h2><br /><p><b><b>Download Zip</b> &#10027;&#10027;&#10027; <a href="https://ssurll.com/2uzvvZ">https://ssurll.com/2uzvvZ</a></b></p><br /><br />
-
- aaccfb2cb3<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/contluForse/HuggingGPT/assets/Drum Kit Builder Free Download Extra Quality.md b/spaces/contluForse/HuggingGPT/assets/Drum Kit Builder Free Download Extra Quality.md
deleted file mode 100644
index 2ae02f7f6d8a0ff7e78ca9d945c9b933914aa0b5..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Drum Kit Builder Free Download Extra Quality.md	
+++ /dev/null
@@ -1,22 +0,0 @@
-
-<h1>How to Build Your Own Custom Drum Kit for Free</h1>
-<p>If you are a drummer who wants to create your own unique sound, you might be interested in building your own custom drum kit. But how can you do that without spending a fortune on expensive drum parts and accessories?</p>
-<p>The answer is simple: use a drum kit builder software. A drum kit builder software is a program that lets you design and customize your own drum set using 3D graphics and realistic sounds. You can choose from different drum shells, heads, cymbals, hardware, and even upload your own images and logos. You can also adjust the size, position, angle, and tuning of each drum component to suit your preferences.</p>
-<h2>drum kit builder free download</h2><br /><p><b><b>Download File</b> &#10027;&#10027;&#10027; <a href="https://ssurll.com/2uzxkR">https://ssurll.com/2uzxkR</a></b></p><br /><br />
-<p>There are many drum kit builder software available online, but some of them are not free or have limited features. That's why we have compiled a list of the best free drum kit builder software that you can download and use right away. These are:</p>
-<ul>
-<li><strong>Drumstructor</strong>: This is a rising 3D drum configurator developed by drummers to build and design custom drum kits[^1^]. You can get creative with tons of options and tools, and save, load or share your dream drum sets. You can also support the developer on Patreon.</li>
-<li><strong>Masterworks Configurator</strong>: This is an official tool by Pearl Drums that lets you create your own Masterworks series drum kit[^2^]. You can choose from various shell materials, finishes, hardware colors, and sizes. You can also request a quote from Pearl dealers after you finish your design.</li>
-<li><strong>WFLIII Drums Custom Builder</strong>: This is a custom builder by WFLIII Drums, a company founded by William F. Ludwig III, the grandson of the founder of Ludwig Drums[^3^]. You can build your own snare drum, tom, or bass drum using their high-quality products and finishes. You can also contact them for help if you have any questions.</li>
-</ul>
-<p>These are just some of the free drum kit builder software that you can use to create your own custom drum kit. Of course, if you want to add more sounds and samples to your drum kit, you can also check out some of the free drum kits available online[^4^]. These include sample packs by Goldbaby Samples, 99 Drum Samples by 99Sounds, Analog Drums by Six Bit Deep, Reverb Drum Machines by Reverb, and many more.</p>
-<p>With these free tools and resources, you can unleash your creativity and build your own custom drum kit for free. Have fun and happy drumming!</p>
-
-<p>Now that you have built your own custom drum kit for free, you might be wondering how to play it. Well, there are two ways to do that: either use a MIDI controller or a virtual drum pad.</p>
-<p>A MIDI controller is a device that can send MIDI signals to your computer or other devices. You can use a MIDI controller to trigger the sounds of your drum kit using pads, keys, or pedals. Some examples of MIDI controllers are electronic drum kits, keyboards, drum machines, and foot controllers. To use a MIDI controller, you need to connect it to your computer via USB or MIDI cable, and then assign each pad or key to a corresponding sound in your drum kit builder software.</p>
-<p>A virtual drum pad is a software that simulates a drum pad on your screen. You can use a virtual drum pad to play your drum kit using your mouse, keyboard, or touchscreen. Some examples of virtual drum pads are Drumbit, HTML5 Drum Machine, and Virtual Drumming. To use a virtual drum pad, you need to open it in your browser or download it as an app, and then load your drum kit sounds into it.</p>
-<p>Both methods have their advantages and disadvantages. A MIDI controller can give you more realistic and expressive playing, but it can also be more expensive and complicated to set up. A virtual drum pad can be more convenient and accessible, but it can also be less responsive and accurate. You can choose the method that suits your needs and preferences best.</p>
-<p></p>
-<p>Once you have chosen your method of playing, you can start practicing and improving your drum skills. You can also record your performances and share them with others. You can also explore other genres and styles of drumming by using different drum kits and sounds. The possibilities are endless!</p> d5da3c52bf<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/dwpose/__init__.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/dwpose/__init__.py
deleted file mode 100644
index fcb3be19a83795ed13f7a022ad601ebdfd9a1e52..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/dwpose/__init__.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Openpose
-# Original from CMU https://github.com/CMU-Perceptual-Computing-Lab/openpose
-# 2nd Edited by https://github.com/Hzzone/pytorch-openpose
-# 3rd Edited by ControlNet
-# 4th Edited by ControlNet (added face and correct hands)
-
-import os
-os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
-
-import torch
-import numpy as np
-from . import util
-from .wholebody import Wholebody
-
-def draw_pose(pose, H, W):
-    bodies = pose['bodies']
-    faces = pose['faces']
-    hands = pose['hands']
-    candidate = bodies['candidate']
-    subset = bodies['subset']
-    canvas = np.zeros(shape=(H, W, 3), dtype=np.uint8)
-
-    canvas = util.draw_bodypose(canvas, candidate, subset)
-
-    canvas = util.draw_handpose(canvas, hands)
-
-    canvas = util.draw_facepose(canvas, faces)
-
-    return canvas
-
-
-class DWposeDetector:
-    def __init__(self):
-
-        self.pose_estimation = Wholebody()
-
-    def __call__(self, oriImg):
-        oriImg = oriImg.copy()
-        H, W, C = oriImg.shape
-        with torch.no_grad():
-            candidate, subset = self.pose_estimation(oriImg)
-            nums, keys, locs = candidate.shape
-            candidate[..., 0] /= float(W)
-            candidate[..., 1] /= float(H)
-            body = candidate[:,:18].copy()
-            body = body.reshape(nums*18, locs)
-            score = subset[:,:18]
-            for i in range(len(score)):
-                for j in range(len(score[i])):
-                    if score[i][j] > 0.3:
-                        score[i][j] = int(18*i+j)
-                    else:
-                        score[i][j] = -1
-
-            un_visible = subset<0.3
-            candidate[un_visible] = -1
-
-            foot = candidate[:,18:24]
-
-            faces = candidate[:,24:92]
-
-            hands = candidate[:,92:113]
-            hands = np.vstack([hands, candidate[:,113:]])
-            
-            bodies = dict(candidate=body, subset=score)
-            pose = dict(bodies=bodies, hands=hands, faces=faces)
-
-            return draw_pose(pose, H, W)
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/models/backbones/cgnet.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/models/backbones/cgnet.py
deleted file mode 100644
index 45c235e2e7fcef21e933ecb3ff88a37fa953abe6..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/models/backbones/cgnet.py
+++ /dev/null
@@ -1,367 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.utils.checkpoint as cp
-from annotator.mmpkg.mmcv.cnn import (ConvModule, build_conv_layer, build_norm_layer,
-                      constant_init, kaiming_init)
-from annotator.mmpkg.mmcv.runner import load_checkpoint
-from annotator.mmpkg.mmcv.utils.parrots_wrapper import _BatchNorm
-
-from annotator.mmpkg.mmseg.utils import get_root_logger
-from ..builder import BACKBONES
-
-
-class GlobalContextExtractor(nn.Module):
-    """Global Context Extractor for CGNet.
-
-    This class is employed to refine the joint feature of both local feature
-    and surrounding context.
-
-    Args:
-        channel (int): Number of input feature channels.
-        reduction (int): Reductions for global context extractor. Default: 16.
-        with_cp (bool): Use checkpoint or not. Using checkpoint will save some
-            memory while slowing down the training speed. Default: False.
-    """
-
-    def __init__(self, channel, reduction=16, with_cp=False):
-        super(GlobalContextExtractor, self).__init__()
-        self.channel = channel
-        self.reduction = reduction
-        assert reduction >= 1 and channel >= reduction
-        self.with_cp = with_cp
-        self.avg_pool = nn.AdaptiveAvgPool2d(1)
-        self.fc = nn.Sequential(
-            nn.Linear(channel, channel // reduction), nn.ReLU(inplace=True),
-            nn.Linear(channel // reduction, channel), nn.Sigmoid())
-
-    def forward(self, x):
-
-        def _inner_forward(x):
-            num_batch, num_channel = x.size()[:2]
-            y = self.avg_pool(x).view(num_batch, num_channel)
-            y = self.fc(y).view(num_batch, num_channel, 1, 1)
-            return x * y
-
-        if self.with_cp and x.requires_grad:
-            out = cp.checkpoint(_inner_forward, x)
-        else:
-            out = _inner_forward(x)
-
-        return out
-
-
-class ContextGuidedBlock(nn.Module):
-    """Context Guided Block for CGNet.
-
-    This class consists of four components: local feature extractor,
-    surrounding feature extractor, joint feature extractor and global
-    context extractor.
-
-    Args:
-        in_channels (int): Number of input feature channels.
-        out_channels (int): Number of output feature channels.
-        dilation (int): Dilation rate for surrounding context extractor.
-            Default: 2.
-        reduction (int): Reduction for global context extractor. Default: 16.
-        skip_connect (bool): Add input to output or not. Default: True.
-        downsample (bool): Downsample the input to 1/2 or not. Default: False.
-        conv_cfg (dict): Config dict for convolution layer.
-            Default: None, which means using conv2d.
-        norm_cfg (dict): Config dict for normalization layer.
-            Default: dict(type='BN', requires_grad=True).
-        act_cfg (dict): Config dict for activation layer.
-            Default: dict(type='PReLU').
-        with_cp (bool): Use checkpoint or not. Using checkpoint will save some
-            memory while slowing down the training speed. Default: False.
-    """
-
-    def __init__(self,
-                 in_channels,
-                 out_channels,
-                 dilation=2,
-                 reduction=16,
-                 skip_connect=True,
-                 downsample=False,
-                 conv_cfg=None,
-                 norm_cfg=dict(type='BN', requires_grad=True),
-                 act_cfg=dict(type='PReLU'),
-                 with_cp=False):
-        super(ContextGuidedBlock, self).__init__()
-        self.with_cp = with_cp
-        self.downsample = downsample
-
-        channels = out_channels if downsample else out_channels // 2
-        if 'type' in act_cfg and act_cfg['type'] == 'PReLU':
-            act_cfg['num_parameters'] = channels
-        kernel_size = 3 if downsample else 1
-        stride = 2 if downsample else 1
-        padding = (kernel_size - 1) // 2
-
-        self.conv1x1 = ConvModule(
-            in_channels,
-            channels,
-            kernel_size,
-            stride,
-            padding,
-            conv_cfg=conv_cfg,
-            norm_cfg=norm_cfg,
-            act_cfg=act_cfg)
-
-        self.f_loc = build_conv_layer(
-            conv_cfg,
-            channels,
-            channels,
-            kernel_size=3,
-            padding=1,
-            groups=channels,
-            bias=False)
-        self.f_sur = build_conv_layer(
-            conv_cfg,
-            channels,
-            channels,
-            kernel_size=3,
-            padding=dilation,
-            groups=channels,
-            dilation=dilation,
-            bias=False)
-
-        self.bn = build_norm_layer(norm_cfg, 2 * channels)[1]
-        self.activate = nn.PReLU(2 * channels)
-
-        if downsample:
-            self.bottleneck = build_conv_layer(
-                conv_cfg,
-                2 * channels,
-                out_channels,
-                kernel_size=1,
-                bias=False)
-
-        self.skip_connect = skip_connect and not downsample
-        self.f_glo = GlobalContextExtractor(out_channels, reduction, with_cp)
-
-    def forward(self, x):
-
-        def _inner_forward(x):
-            out = self.conv1x1(x)
-            loc = self.f_loc(out)
-            sur = self.f_sur(out)
-
-            joi_feat = torch.cat([loc, sur], 1)  # the joint feature
-            joi_feat = self.bn(joi_feat)
-            joi_feat = self.activate(joi_feat)
-            if self.downsample:
-                joi_feat = self.bottleneck(joi_feat)  # channel = out_channels
-            # f_glo is employed to refine the joint feature
-            out = self.f_glo(joi_feat)
-
-            if self.skip_connect:
-                return x + out
-            else:
-                return out
-
-        if self.with_cp and x.requires_grad:
-            out = cp.checkpoint(_inner_forward, x)
-        else:
-            out = _inner_forward(x)
-
-        return out
-
-
-class InputInjection(nn.Module):
-    """Downsampling module for CGNet."""
-
-    def __init__(self, num_downsampling):
-        super(InputInjection, self).__init__()
-        self.pool = nn.ModuleList()
-        for i in range(num_downsampling):
-            self.pool.append(nn.AvgPool2d(3, stride=2, padding=1))
-
-    def forward(self, x):
-        for pool in self.pool:
-            x = pool(x)
-        return x
-
-
-@BACKBONES.register_module()
-class CGNet(nn.Module):
-    """CGNet backbone.
-
-    A Light-weight Context Guided Network for Semantic Segmentation
-    arXiv: https://arxiv.org/abs/1811.08201
-
-    Args:
-        in_channels (int): Number of input image channels. Normally 3.
-        num_channels (tuple[int]): Numbers of feature channels at each stages.
-            Default: (32, 64, 128).
-        num_blocks (tuple[int]): Numbers of CG blocks at stage 1 and stage 2.
-            Default: (3, 21).
-        dilations (tuple[int]): Dilation rate for surrounding context
-            extractors at stage 1 and stage 2. Default: (2, 4).
-        reductions (tuple[int]): Reductions for global context extractors at
-            stage 1 and stage 2. Default: (8, 16).
-        conv_cfg (dict): Config dict for convolution layer.
-            Default: None, which means using conv2d.
-        norm_cfg (dict): Config dict for normalization layer.
-            Default: dict(type='BN', requires_grad=True).
-        act_cfg (dict): Config dict for activation layer.
-            Default: dict(type='PReLU').
-        norm_eval (bool): Whether to set norm layers to eval mode, namely,
-            freeze running stats (mean and var). Note: Effect on Batch Norm
-            and its variants only. Default: False.
-        with_cp (bool): Use checkpoint or not. Using checkpoint will save some
-            memory while slowing down the training speed. Default: False.
-    """
-
-    def __init__(self,
-                 in_channels=3,
-                 num_channels=(32, 64, 128),
-                 num_blocks=(3, 21),
-                 dilations=(2, 4),
-                 reductions=(8, 16),
-                 conv_cfg=None,
-                 norm_cfg=dict(type='BN', requires_grad=True),
-                 act_cfg=dict(type='PReLU'),
-                 norm_eval=False,
-                 with_cp=False):
-
-        super(CGNet, self).__init__()
-        self.in_channels = in_channels
-        self.num_channels = num_channels
-        assert isinstance(self.num_channels, tuple) and len(
-            self.num_channels) == 3
-        self.num_blocks = num_blocks
-        assert isinstance(self.num_blocks, tuple) and len(self.num_blocks) == 2
-        self.dilations = dilations
-        assert isinstance(self.dilations, tuple) and len(self.dilations) == 2
-        self.reductions = reductions
-        assert isinstance(self.reductions, tuple) and len(self.reductions) == 2
-        self.conv_cfg = conv_cfg
-        self.norm_cfg = norm_cfg
-        self.act_cfg = act_cfg
-        if 'type' in self.act_cfg and self.act_cfg['type'] == 'PReLU':
-            self.act_cfg['num_parameters'] = num_channels[0]
-        self.norm_eval = norm_eval
-        self.with_cp = with_cp
-
-        cur_channels = in_channels
-        self.stem = nn.ModuleList()
-        for i in range(3):
-            self.stem.append(
-                ConvModule(
-                    cur_channels,
-                    num_channels[0],
-                    3,
-                    2 if i == 0 else 1,
-                    padding=1,
-                    conv_cfg=conv_cfg,
-                    norm_cfg=norm_cfg,
-                    act_cfg=act_cfg))
-            cur_channels = num_channels[0]
-
-        self.inject_2x = InputInjection(1)  # down-sample for Input, factor=2
-        self.inject_4x = InputInjection(2)  # down-sample for Input, factor=4
-
-        cur_channels += in_channels
-        self.norm_prelu_0 = nn.Sequential(
-            build_norm_layer(norm_cfg, cur_channels)[1],
-            nn.PReLU(cur_channels))
-
-        # stage 1
-        self.level1 = nn.ModuleList()
-        for i in range(num_blocks[0]):
-            self.level1.append(
-                ContextGuidedBlock(
-                    cur_channels if i == 0 else num_channels[1],
-                    num_channels[1],
-                    dilations[0],
-                    reductions[0],
-                    downsample=(i == 0),
-                    conv_cfg=conv_cfg,
-                    norm_cfg=norm_cfg,
-                    act_cfg=act_cfg,
-                    with_cp=with_cp))  # CG block
-
-        cur_channels = 2 * num_channels[1] + in_channels
-        self.norm_prelu_1 = nn.Sequential(
-            build_norm_layer(norm_cfg, cur_channels)[1],
-            nn.PReLU(cur_channels))
-
-        # stage 2
-        self.level2 = nn.ModuleList()
-        for i in range(num_blocks[1]):
-            self.level2.append(
-                ContextGuidedBlock(
-                    cur_channels if i == 0 else num_channels[2],
-                    num_channels[2],
-                    dilations[1],
-                    reductions[1],
-                    downsample=(i == 0),
-                    conv_cfg=conv_cfg,
-                    norm_cfg=norm_cfg,
-                    act_cfg=act_cfg,
-                    with_cp=with_cp))  # CG block
-
-        cur_channels = 2 * num_channels[2]
-        self.norm_prelu_2 = nn.Sequential(
-            build_norm_layer(norm_cfg, cur_channels)[1],
-            nn.PReLU(cur_channels))
-
-    def forward(self, x):
-        output = []
-
-        # stage 0
-        inp_2x = self.inject_2x(x)
-        inp_4x = self.inject_4x(x)
-        for layer in self.stem:
-            x = layer(x)
-        x = self.norm_prelu_0(torch.cat([x, inp_2x], 1))
-        output.append(x)
-
-        # stage 1
-        for i, layer in enumerate(self.level1):
-            x = layer(x)
-            if i == 0:
-                down1 = x
-        x = self.norm_prelu_1(torch.cat([x, down1, inp_4x], 1))
-        output.append(x)
-
-        # stage 2
-        for i, layer in enumerate(self.level2):
-            x = layer(x)
-            if i == 0:
-                down2 = x
-        x = self.norm_prelu_2(torch.cat([down2, x], 1))
-        output.append(x)
-
-        return output
-
-    def init_weights(self, pretrained=None):
-        """Initialize the weights in backbone.
-
-        Args:
-            pretrained (str, optional): Path to pre-trained weights.
-                Defaults to None.
-        """
-        if isinstance(pretrained, str):
-            logger = get_root_logger()
-            load_checkpoint(self, pretrained, strict=False, logger=logger)
-        elif pretrained is None:
-            for m in self.modules():
-                if isinstance(m, (nn.Conv2d, nn.Linear)):
-                    kaiming_init(m)
-                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
-                    constant_init(m, 1)
-                elif isinstance(m, nn.PReLU):
-                    constant_init(m, 0)
-        else:
-            raise TypeError('pretrained must be a str or None')
-
-    def train(self, mode=True):
-        """Convert the model into training mode will keeping the normalization
-        layer freezed."""
-        super(CGNet, self).train(mode)
-        if mode and self.norm_eval:
-            for m in self.modules():
-                # trick: eval have effect on BatchNorm only
-                if isinstance(m, _BatchNorm):
-                    m.eval()
diff --git a/spaces/csuhan/opendet2/opendet2/modeling/losses/unknown_probability_loss.py b/spaces/csuhan/opendet2/opendet2/modeling/losses/unknown_probability_loss.py
deleted file mode 100644
index c5adbae644957e7b79fbac5bbdd54d714f94f789..0000000000000000000000000000000000000000
--- a/spaces/csuhan/opendet2/opendet2/modeling/losses/unknown_probability_loss.py
+++ /dev/null
@@ -1,93 +0,0 @@
-
-import torch
-import torch.distributions as dists
-import torch.nn as nn
-import torch.nn.functional as F
-from torch import Tensor
-
-
-class UPLoss(nn.Module):
-    """Unknown Probability Loss
-    """
-
-    def __init__(self,
-                 num_classes: int,
-                 sampling_metric: str = "min_score",
-                 topk: int = 3,
-                 alpha: float = 1.0):
-        super().__init__()
-        self.num_classes = num_classes
-        assert sampling_metric in ["min_score", "max_entropy", "random"]
-        self.sampling_metric = sampling_metric
-        # if topk==-1, sample len(fg)*2 examples
-        self.topk = topk
-        self.alpha = alpha
-
-    def _soft_cross_entropy(self, input: Tensor, target: Tensor):
-        logprobs = F.log_softmax(input, dim=1)
-        return -(target * logprobs).sum() / input.shape[0]
-
-    def _sampling(self, scores: Tensor, labels: Tensor):
-        fg_inds = labels != self.num_classes
-        fg_scores, fg_labels = scores[fg_inds], labels[fg_inds]
-        bg_scores, bg_labels = scores[~fg_inds], labels[~fg_inds]
-
-        # remove unknown classes
-        _fg_scores = torch.cat(
-            [fg_scores[:, :self.num_classes-1], fg_scores[:, -1:]], dim=1)
-        _bg_scores = torch.cat(
-            [bg_scores[:, :self.num_classes-1], bg_scores[:, -1:]], dim=1)
-
-        num_fg = fg_scores.size(0)
-        topk = num_fg if (self.topk == -1) or (num_fg <
-                                               self.topk) else self.topk
-        # use maximum entropy as a metric for uncertainty
-        # we select topk proposals with maximum entropy
-        if self.sampling_metric == "max_entropy":
-            pos_metric = dists.Categorical(
-                _fg_scores.softmax(dim=1)).entropy()
-            neg_metric = dists.Categorical(
-                _bg_scores.softmax(dim=1)).entropy()
-        # use minimum score as a metric for uncertainty
-        # we select topk proposals with minimum max-score
-        elif self.sampling_metric == "min_score":
-            pos_metric = -_fg_scores.max(dim=1)[0]
-            neg_metric = -_bg_scores.max(dim=1)[0]
-        # we randomly select topk proposals
-        elif self.sampling_metric == "random":
-            pos_metric = torch.rand(_fg_scores.size(0),).to(scores.device)
-            neg_metric = torch.rand(_bg_scores.size(0),).to(scores.device)
-
-        _, pos_inds = pos_metric.topk(topk)
-        _, neg_inds = neg_metric.topk(topk)
-        fg_scores, fg_labels = fg_scores[pos_inds], fg_labels[pos_inds]
-        bg_scores, bg_labels = bg_scores[neg_inds], bg_labels[neg_inds]
-
-        return fg_scores, bg_scores, fg_labels, bg_labels
-
-    def forward(self, scores: Tensor, labels: Tensor):
-        fg_scores, bg_scores, fg_labels, bg_labels = self._sampling(
-            scores, labels)
-        # sample both fg and bg
-        scores = torch.cat([fg_scores, bg_scores])
-        labels = torch.cat([fg_labels, bg_labels])
-
-        num_sample, num_classes = scores.shape
-        mask = torch.arange(num_classes).repeat(
-            num_sample, 1).to(scores.device)
-        inds = mask != labels[:, None].repeat(1, num_classes)
-        mask = mask[inds].reshape(num_sample, num_classes-1)
-
-        gt_scores = torch.gather(
-            F.softmax(scores, dim=1), 1, labels[:, None]).squeeze(1)
-        mask_scores = torch.gather(scores, 1, mask)
-
-        gt_scores[gt_scores < 0] = 0.0
-        targets = torch.zeros_like(mask_scores)
-        num_fg = fg_scores.size(0)
-        targets[:num_fg, self.num_classes-2] = gt_scores[:num_fg] * \
-            (1-gt_scores[:num_fg]).pow(self.alpha)
-        targets[num_fg:, self.num_classes-1] = gt_scores[num_fg:] * \
-            (1-gt_scores[num_fg:]).pow(self.alpha)
-
-        return self._soft_cross_entropy(mask_scores, targets.detach())
diff --git a/spaces/danielcwang-optum/6-TreemapAndSunburst/app.py b/spaces/danielcwang-optum/6-TreemapAndSunburst/app.py
deleted file mode 100644
index 7e82b33b6fcf4e043710475b5be4d99624c99459..0000000000000000000000000000000000000000
--- a/spaces/danielcwang-optum/6-TreemapAndSunburst/app.py
+++ /dev/null
@@ -1,230 +0,0 @@
-import streamlit as st
-import numpy as np
-import plotly.express as px
-import pandas as pd
-import plotly.graph_objects as go
-
-st.set_page_config(page_title="Plotly Graphing Libraries",layout='wide')
-
-import streamlit as st
-
-uploaded_files = st.file_uploader("Choose a CSV file", accept_multiple_files=True)
-for uploaded_file in uploaded_files:
-    bytes_data = uploaded_file.read()
-    st.write("filename:", uploaded_file.name)
-    st.write(bytes_data)
-    
-    if st.checkbox("FileDetails"):
-
-        filevalue = uploaded_file.getvalue()
-        st.write(filevalue)
-        st.write(uploaded_file.name)
-        st.write(uploaded_file.type)
-        st.write(uploaded_file.size)
-        #st.write(uploaded_file.last_modified)
-        #st.write(uploaded_file.charset)
-        st.write(uploaded_file.getbuffer())
-        st.write(uploaded_file.getbuffer().nbytes)
-        st.write(uploaded_file.getbuffer().tobytes())
-        st.write(uploaded_file.getbuffer().tolist())
-        st.write(uploaded_file.getbuffer().itemsize)
-        st.write(uploaded_file.getbuffer().ndim)
-        st.write(uploaded_file.getbuffer().shape)
-        st.write(uploaded_file.getbuffer().strides)
-        st.write(uploaded_file.getbuffer().suboffsets)
-        st.write(uploaded_file.getbuffer().readonly)
-        st.write(uploaded_file.getbuffer().c_contiguous)
-        st.write(uploaded_file.getbuffer().f_contiguous)
-        st.write(uploaded_file.getbuffer().contiguous)
-        st.write(uploaded_file.getbuffer().itemsize)
-        st.write(uploaded_file.getbuffer().nbytes)
-        st.write(uploaded_file.getbuffer().ndim)
-        st.write(uploaded_file.getbuffer().shape)
-        st.write(uploaded_file.getbuffer().strides)
-        st.write(uploaded_file.getbuffer().suboffsets)
-        st.write(uploaded_file.getbuffer().readonly)
-        st.write(uploaded_file.getbuffer().c_contiguous)
-        st.write(uploaded_file.getbuffer().f_contiguous)
-        st.write(uploaded_file.getbuffer().contiguous)
-        st.write(uploaded_file.getbuffer().itemsize)
-        st.write(uploaded_file.getbuffer().nbytes)
-        st.write(uploaded_file.getbuffer().ndim)
-        st.write(uploaded_file.getbuffer().shape)
-        st.write(uploaded_file.getbuffer().strides)
-        st.write(uploaded_file.getbuffer().suboffsets)
-        st.write(uploaded_file.getbuffer().readonly)
-        st.write(uploaded_file.getbuffer().c_contiguous)
-        st.write(uploaded_file.getbuffer().f_contiguous)
-        myDF = pd.DataFrame(uploaded_file.getbuffer().tolist())
-        
-
-        st.markdown("# Treemaps from upload data file: https://plotly.com/python/treemaps/")
-        #df = myDF.query("year == 2007")
-        df = myDF
-        fig = px.treemap(df, path=[px.Constant("time"), 'message', 'name'], values='content',
-                        color='lifeExp', hover_data=['iso_alpha'],
-                        color_continuous_scale='RdBu',
-                        color_continuous_midpoint=np.average(df['name'], weights=df['content']))  # todo - debug this and get it working with the data
-        fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
-        #fig.show()
-        st.plotly_chart(fig, use_container_width=True)
-        
-        
-    
-   
-#show replace     
-    if st.checkbox("replace"):
-        mydf = st.dataframe(df)
-        columns = st.selectbox("Select  column", df.columns)
-        old_values = st.multiselect("Current Values",list(df[columns].unique()),list(df[columns].unique()))
-        with st.form(key='my_form'):
-            col1,col2 = st.beta_columns(2)
-            st_input = st.number_input if is_numeric_dtype(df[columns]) else st.text_input
-        with col1:
-            old_val = st_input("old value")
-        with col2:
-            new_val = st_input("new value")
-            if st.form_submit_button("Replace"):
-                df[columns]=df[columns].replace(old_val,new_val)
-                st.success("{} replace with {} successfully ".format(old_val,new_val))
-                excel = df.to_excel(r"F:\book2.xlsx", index = False, header=True,encoding="utf-8")
-                df =pd.read_excel(r"F:\book2.xlsx")       
-                mydf.add_rows(df)    
-
-st.markdown("WebGL Rendering with 1,000,000 Points")
-import plotly.graph_objects as go
-import numpy as np
-N = 1000000
-fig = go.Figure()
-fig.add_trace(
-    go.Scattergl(
-        x = np.random.randn(N),
-        y = np.random.randn(N),
-        mode = 'markers',
-        marker = dict(
-            line = dict(
-                width = 1,
-                color = 'DarkSlateGrey')
-        )
-    )
-)
-#fig.show()
-st.plotly_chart(fig, use_container_width=True)
-
-
-
-st.markdown("# WebGL Graph - ScatterGL")
-fig = go.Figure()
-trace_num = 10
-point_num = 5000
-for i in range(trace_num):
-    fig.add_trace(
-        go.Scattergl(
-                x = np.linspace(0, 1, point_num),
-                y = np.random.randn(point_num)+(i*5)
-        )
-    )
-fig.update_layout(showlegend=False)
-#fig.show()
-st.plotly_chart(fig, use_container_width=True)
-
-
-st.markdown("# Treemaps: https://plotly.com/python/treemaps/")
-df = px.data.gapminder().query("year == 2007")
-fig = px.treemap(df, path=[px.Constant("world"), 'continent', 'country'], values='pop',
-                  color='lifeExp', hover_data=['iso_alpha'],
-                  color_continuous_scale='RdBu',
-                  color_continuous_midpoint=np.average(df['lifeExp'], weights=df['pop']))
-fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
-#fig.show()
-st.plotly_chart(fig, use_container_width=True)
-
-
-st.markdown("# Sunburst: https://plotly.com/python/sunburst-charts/")
-
-
-st.markdown("# Life Expectancy Sunburst")
-df = px.data.gapminder().query("year == 2007")
-fig = px.sunburst(df, path=['continent', 'country'], values='pop',
-                  color='lifeExp', hover_data=['iso_alpha'],
-                  color_continuous_scale='RdBu',
-                  color_continuous_midpoint=np.average(df['lifeExp'], weights=df['pop']))
-st.plotly_chart(fig, use_container_width=True)
-
-
-st.markdown("# Coffee Aromas and Tastes Sunburst")
-df1 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/718417069ead87650b90472464c7565dc8c2cb1c/sunburst-coffee-flavors-complete.csv')
-df2 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/718417069ead87650b90472464c7565dc8c2cb1c/coffee-flavors.csv')
-fig = go.Figure()
-fig.add_trace(go.Sunburst(
-    ids=df1.ids,
-    labels=df1.labels,
-    parents=df1.parents,
-    domain=dict(column=0)
-))
-fig.add_trace(go.Sunburst(
-    ids=df2.ids,
-    labels=df2.labels,
-    parents=df2.parents,
-    domain=dict(column=1),
-    maxdepth=2
-))
-fig.update_layout(
-    grid= dict(columns=2, rows=1),
-    margin = dict(t=0, l=0, r=0, b=0)
-)
-st.plotly_chart(fig, use_container_width=True)
-
-
-
-
-
-# Sunburst
-#data = dict(
-#    character=["Eve", "Cain", "Seth", "Enos", "Noam", "Abel", "Awan", "Enoch", "Azura"],
-#    parent=["", "Eve", "Eve", "Seth", "Seth", "Eve", "Eve", "Awan", "Eve" ],
-#    value=[10, 14, 12, 10, 2, 6, 6, 4, 4])
-#fig = px.sunburst(
-#    data,
-#    names='character',
-#    parents='parent',
-#    values='value',
-#)
-#fig.show()
-#st.plotly_chart(fig, use_container_width=True)
-
-
-df = px.data.tips()
-fig = px.treemap(df, path=[px.Constant("all"), 'sex', 'day', 'time'], 
-                 values='total_bill', color='time',
-                  color_discrete_map={'(?)':'lightgrey', 'Lunch':'gold', 'Dinner':'darkblue'})
-fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
-#fig.show()
-fig.update_traces(marker=dict(cornerradius=5))
-
-st.plotly_chart(fig, use_container_width=True)
-
-
-df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/96c0bd/sunburst-coffee-flavors-complete.csv')
-fig = go.Figure(go.Treemap(
-    ids = df.ids,
-    labels = df.labels,
-    parents = df.parents,
-    pathbar_textfont_size=15,
-    root_color="lightgrey"
-))
-fig.update_layout(
-    uniformtext=dict(minsize=10, mode='hide'),
-    margin = dict(t=50, l=25, r=25, b=25)
-)
-#fig.show()
-st.plotly_chart(fig, use_container_width=True)
-
-
-df = pd.read_pickle('bloom_dataset.pkl')
-fig = px.treemap(df, path=[px.Constant("ROOTS"), 'Macroarea', 'Family', 'Genus', 'Language', 'dataset_name'],
-                 values='num_bytes', maxdepth=4)
-fig.update_traces(root_color="pink")
-fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
-
-st.plotly_chart(fig, use_container_width=True)
\ No newline at end of file
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/CurImagePlugin.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/CurImagePlugin.py
deleted file mode 100644
index 94efff3415679a5bf5b7038f9a1da15ebc6d04ca..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/CurImagePlugin.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# Windows Cursor support for PIL
-#
-# notes:
-#       uses BmpImagePlugin.py to read the bitmap data.
-#
-# history:
-#       96-05-27 fl     Created
-#
-# Copyright (c) Secret Labs AB 1997.
-# Copyright (c) Fredrik Lundh 1996.
-#
-# See the README file for information on usage and redistribution.
-#
-from . import BmpImagePlugin, Image
-from ._binary import i16le as i16
-from ._binary import i32le as i32
-
-#
-# --------------------------------------------------------------------
-
-
-def _accept(prefix):
-    return prefix[:4] == b"\0\0\2\0"
-
-
-##
-# Image plugin for Windows Cursor files.
-
-
-class CurImageFile(BmpImagePlugin.BmpImageFile):
-    format = "CUR"
-    format_description = "Windows Cursor"
-
-    def _open(self):
-        offset = self.fp.tell()
-
-        # check magic
-        s = self.fp.read(6)
-        if not _accept(s):
-            msg = "not a CUR file"
-            raise SyntaxError(msg)
-
-        # pick the largest cursor in the file
-        m = b""
-        for i in range(i16(s, 4)):
-            s = self.fp.read(16)
-            if not m:
-                m = s
-            elif s[0] > m[0] and s[1] > m[1]:
-                m = s
-        if not m:
-            msg = "No cursors were found"
-            raise TypeError(msg)
-
-        # load as bitmap
-        self._bitmap(i32(m, 12) + offset)
-
-        # patch up the bitmap height
-        self._size = self.size[0], self.size[1] // 2
-        d, e, o, a = self.tile[0]
-        self.tile[0] = d, (0, 0) + self.size, o, a
-
-        return
-
-
-#
-# --------------------------------------------------------------------
-
-Image.register_open(CurImageFile.format, CurImageFile, _accept)
-
-Image.register_extension(CurImageFile.format, ".cur")
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/anyio/abc/_subprocesses.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/anyio/abc/_subprocesses.py
deleted file mode 100644
index 704b44a2dda9e21997acf52c268e414d01bd2eb5..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/anyio/abc/_subprocesses.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from __future__ import annotations
-
-from abc import abstractmethod
-from signal import Signals
-
-from ._resources import AsyncResource
-from ._streams import ByteReceiveStream, ByteSendStream
-
-
-class Process(AsyncResource):
-    """An asynchronous version of :class:`subprocess.Popen`."""
-
-    @abstractmethod
-    async def wait(self) -> int:
-        """
-        Wait until the process exits.
-
-        :return: the exit code of the process
-        """
-
-    @abstractmethod
-    def terminate(self) -> None:
-        """
-        Terminates the process, gracefully if possible.
-
-        On Windows, this calls ``TerminateProcess()``.
-        On POSIX systems, this sends ``SIGTERM`` to the process.
-
-        .. seealso:: :meth:`subprocess.Popen.terminate`
-        """
-
-    @abstractmethod
-    def kill(self) -> None:
-        """
-        Kills the process.
-
-        On Windows, this calls ``TerminateProcess()``.
-        On POSIX systems, this sends ``SIGKILL`` to the process.
-
-        .. seealso:: :meth:`subprocess.Popen.kill`
-        """
-
-    @abstractmethod
-    def send_signal(self, signal: Signals) -> None:
-        """
-        Send a signal to the subprocess.
-
-        .. seealso:: :meth:`subprocess.Popen.send_signal`
-
-        :param signal: the signal number (e.g. :data:`signal.SIGHUP`)
-        """
-
-    @property
-    @abstractmethod
-    def pid(self) -> int:
-        """The process ID of the process."""
-
-    @property
-    @abstractmethod
-    def returncode(self) -> int | None:
-        """
-        The return code of the process. If the process has not yet terminated, this will be
-        ``None``.
-        """
-
-    @property
-    @abstractmethod
-    def stdin(self) -> ByteSendStream | None:
-        """The stream for the standard input of the process."""
-
-    @property
-    @abstractmethod
-    def stdout(self) -> ByteReceiveStream | None:
-        """The stream for the standard output of the process."""
-
-    @property
-    @abstractmethod
-    def stderr(self) -> ByteReceiveStream | None:
-        """The stream for the standard error output of the process."""
diff --git a/spaces/declare-lab/tango/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py b/spaces/declare-lab/tango/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py
deleted file mode 100644
index 16287d64d154872f50b49b822daec79641f11f11..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py
+++ /dev/null
@@ -1,141 +0,0 @@
-# coding=utf-8
-# Copyright 2023 HuggingFace Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-
-import numpy as np
-
-from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
-from diffusers.utils.testing_utils import (
-    is_onnx_available,
-    load_image,
-    nightly,
-    require_onnxruntime,
-    require_torch_gpu,
-)
-
-from ...test_pipelines_onnx_common import OnnxPipelineTesterMixin
-
-
-if is_onnx_available():
-    import onnxruntime as ort
-
-
-class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase):
-    # FIXME: add fast tests
-    pass
-
-
-@nightly
-@require_onnxruntime
-@require_torch_gpu
-class OnnxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
-    @property
-    def gpu_provider(self):
-        return (
-            "CUDAExecutionProvider",
-            {
-                "gpu_mem_limit": "15000000000",  # 15GB
-                "arena_extend_strategy": "kSameAsRequested",
-            },
-        )
-
-    @property
-    def gpu_options(self):
-        options = ort.SessionOptions()
-        options.enable_mem_pattern = False
-        return options
-
-    def test_inference_default_pndm(self):
-        init_image = load_image(
-            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
-            "/in_paint/overture-creations-5sI6fQgYIuo.png"
-        )
-        mask_image = load_image(
-            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
-            "/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
-        )
-        pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained(
-            "runwayml/stable-diffusion-inpainting",
-            revision="onnx",
-            safety_checker=None,
-            feature_extractor=None,
-            provider=self.gpu_provider,
-            sess_options=self.gpu_options,
-        )
-        pipe.set_progress_bar_config(disable=None)
-
-        prompt = "A red cat sitting on a park bench"
-
-        generator = np.random.RandomState(0)
-        output = pipe(
-            prompt=prompt,
-            image=init_image,
-            mask_image=mask_image,
-            guidance_scale=7.5,
-            num_inference_steps=10,
-            generator=generator,
-            output_type="np",
-        )
-        images = output.images
-        image_slice = images[0, 255:258, 255:258, -1]
-
-        assert images.shape == (1, 512, 512, 3)
-        expected_slice = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464])
-
-        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
-
-    def test_inference_k_lms(self):
-        init_image = load_image(
-            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
-            "/in_paint/overture-creations-5sI6fQgYIuo.png"
-        )
-        mask_image = load_image(
-            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
-            "/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
-        )
-        lms_scheduler = LMSDiscreteScheduler.from_pretrained(
-            "runwayml/stable-diffusion-inpainting", subfolder="scheduler", revision="onnx"
-        )
-        pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained(
-            "runwayml/stable-diffusion-inpainting",
-            revision="onnx",
-            scheduler=lms_scheduler,
-            safety_checker=None,
-            feature_extractor=None,
-            provider=self.gpu_provider,
-            sess_options=self.gpu_options,
-        )
-        pipe.set_progress_bar_config(disable=None)
-
-        prompt = "A red cat sitting on a park bench"
-
-        generator = np.random.RandomState(0)
-        output = pipe(
-            prompt=prompt,
-            image=init_image,
-            mask_image=mask_image,
-            guidance_scale=7.5,
-            num_inference_steps=20,
-            generator=generator,
-            output_type="np",
-        )
-        images = output.images
-        image_slice = images[0, 255:258, 255:258, -1]
-
-        assert images.shape == (1, 512, 512, 3)
-        expected_slice = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125])
-
-        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
diff --git a/spaces/deepset/should-i-follow/utils/ui.py b/spaces/deepset/should-i-follow/utils/ui.py
deleted file mode 100644
index a64946f73522ec023ba5724e61a40b6aabdb9fd5..0000000000000000000000000000000000000000
--- a/spaces/deepset/should-i-follow/utils/ui.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import streamlit as st
-from PIL import Image
-
-def set_state_if_absent(key, value):
-    if key not in st.session_state:
-        st.session_state[key] = value
-
-def set_initial_state():
-    set_state_if_absent("username", "Provide a Mastodon username (e.g. xyz@mastodon.social)")
-    set_state_if_absent("result", None)
-    set_state_if_absent("haystack_started", False)
-
-def reset_results(*args):
-    st.session_state.result = None
-
-def set_openai_api_key(api_key: str):
-    st.session_state["OPENAI_API_KEY"] = api_key
-
-def sidebar():
-    with st.sidebar:
-        image = Image.open('logo/haystack-logo-colored.png')
-        st.markdown("Thanks for coming to this 🤗 Space.\n\n"
-        "This is a project for fun, and is not a final product."
-        " There's a lot that can be improved to make this app better.\n\n"
-        "**Take results with a grain of** 🧂\n\n"
-        "For more on how this was built, instructions to run locally and to contribute: [visit GitHub](https://github.com/TuanaCelik/should-i-follow#readme)")
-
-        st.markdown(
-            "## How to use\n"
-            "1. Enter your [OpenAI API key](https://platform.openai.com/account/api-keys) below\n"
-            "2. Enter a Mastodon username in the searchbar\n"
-            "3. Enjoy 🤗\n"
-        )
-
-        api_key_input = st.text_input(
-            "OpenAI API Key",
-            type="password",
-            placeholder="Paste your OpenAI API key here (sk-...)",
-            help="You can get your API key from https://platform.openai.com/account/api-keys.",
-            value=st.session_state.get("OPENAI_API_KEY", ""),
-        )
-
-        if api_key_input:
-            set_openai_api_key(api_key_input)
-
-        st.markdown("---")
-        st.markdown(
-            "## How this works\n"
-            "This app was built with [Haystack](https://haystack.deepset.ai) using the"
-            " [`PromptNode`](https://docs.haystack.deepset.ai/docs/prompt_node) and custom [`PromptTemplate`](https://docs.haystack.deepset.ai/docs/prompt_node#templates).\n\n"
-            " The source code is also on [GitHub](https://github.com/TuanaCelik/should-i-follow)"
-            " with instructions to run locally.\n"
-            "You can see how the `PromptNode` was set up [here](https://github.com/TuanaCelik/should-i-follow/blob/main/utils/haystack.py)")
-        st.markdown("---")
-        st.markdown("Made by [tuanacelik](https://twitter.com/tuanacelik)")
-        st.markdown("---")
-        st.markdown("""Thanks to [mmz_001](https://twitter.com/mm_sasmitha) 
-                        for open sourcing [KnowledgeGPT](https://knowledgegpt.streamlit.app/) which helped me with this sidebar 🙏🏽""")
-        st.image(image, width=250)
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/CRACK Mathew Lane DrMS V4.0 VST RTAS X32 X64 - DYNAMiCS [deepstatus].md b/spaces/diacanFperku/AutoGPT/CRACK Mathew Lane DrMS V4.0 VST RTAS X32 X64 - DYNAMiCS [deepstatus].md
deleted file mode 100644
index d7a3de1727c258dd85c0946b78eeaa909e32106b..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/CRACK Mathew Lane DrMS V4.0 VST RTAS X32 X64 - DYNAMiCS [deepstatus].md	
+++ /dev/null
@@ -1,12 +0,0 @@
-
-<p>Microsoft Office 2010 activation key [free-products-key-for-office-2010. ] Microsoft Office 2013 Crack Key Generator Lotto crack: Office 2013 Office 365 Office 2013 key generator". Simcard Tracker 2.2 Crack Serial Key :2.2 Mathew Lane DrMS V4.0 VST RTAS X32 X64 DYNAMiCS [deepstatus] -modports. </p>
-<h2>CRACK Mathew Lane DrMS V4.0 VST RTAS X32 X64 - DYNAMiCS [deepstatus]</h2><br /><p><b><b>Download</b> &#10026; <a href="https://gohhs.com/2uFUfs">https://gohhs.com/2uFUfs</a></b></p><br /><br />
-<p>Official Serial Number for the Windows 10 license server release 13. Windows 10 serial: Is this a copy of the Windows-10-desktop-free-upgrade-l. Mathew Lane DrMS V4.0 VST RTAS X32 X64 - DYNAMiCS [deepstatus]. </p>
-<p>01/01/2019 connect Mathew Lane DrMS V4.0 VST RTAS X32 X64 - DYNAMiCS [deepstatus] Serial Key. You will receive the newest link crack Mathew Lane DrMS V4.0 VST RTAS X32 X64 - DYNAMiCS [deepstatus] Serial Key Mathew Lane DrMS V4.0 VST RTAS X32 X64 - DYNAMiCS [deepstatus] Serial Key. </p>
-<p>INSTALL!! Default Thumbnail Mathew Lane DrMS V4.0 VST RTAS X32 X64 DYNAMiCS [deepstatus] Serial Key Default. Results 1 - 10 of 15000. Extra-Quality-Rondo-Music-Agile-Guitars-Serial-Number. Mathew Lane DrMS V4.0 VST RTAS X32 X64 - DYNAMiCS [deepstatus] Serial Key. </p>
-<p>But the most important thing to remember is that each time you log in to the site, the player thinks she is getting a new. Torrent Deep Status Mathew Lane DrMS V4.0 VST RTAS X32 X64 - DYNAMiCS [deepstatus] Serial Key. </p>
-<p></p>
-<p>mathew-lane-drms-v4-0-vst-rtas-x32-x64-dynamics-deepstatus-serial-key. And you will have no permission to watch, record, download and crack. CRACK Mathew Lane DrMS V4.0 VST RTAS X32 X64 - DYNAMiCS [deepstatus]. </p>
-<p>Download CRACK Mathew Lane DrMS V4.0 VST RTAS X32 X64 - DYNAMiCS [deepstatus] Serial Key. But the most important thing to remember is that each time you log in to the site, the player thinks she is getting a new. Torrent Deep Status Mathew Lane DrMS V4.0 VST RTAS X32 X64 - DYNAMiCS [deepstatus] Serial Key. </p> 899543212b<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Physical Chemistry By Pc Rakshit In Pdf.md b/spaces/diacanFperku/AutoGPT/Physical Chemistry By Pc Rakshit In Pdf.md
deleted file mode 100644
index ceeac3e33263a6e385dc5cd6020d9e4e08981001..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Physical Chemistry By Pc Rakshit In Pdf.md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>Physical Chemistry By Pc Rakshit In Pdf</h2><br /><p><b><b>Download File</b> &#10003;&#10003;&#10003; <a href="https://gohhs.com/2uFUzv">https://gohhs.com/2uFUzv</a></b></p><br /><br />
-
-Rakshit, P.C. Physical Chemistry, 5th Edition, Sadhana Press, Calcutta (1988). 67. Raphael, D. Levine, Molecular Reaction Dynamics, Cambridge University ... 4d29de3e1b<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/diacanFperku/AutoGPT/Quest Software Secure Copy 7.3.0.335 Crack _TOP_ [Full].md b/spaces/diacanFperku/AutoGPT/Quest Software Secure Copy 7.3.0.335 Crack _TOP_ [Full].md
deleted file mode 100644
index a9dfee9fefc7298e51535bae2e3e509fc6de6186..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Quest Software Secure Copy 7.3.0.335 Crack _TOP_ [Full].md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>Quest Software Secure Copy 7.3.0.335 Crack [Full]</h2><br /><p><b><b>DOWNLOAD</b> &#10004; <a href="https://gohhs.com/2uFSXx">https://gohhs.com/2uFSXx</a></b></p><br /><br />
-<br />
-Quest Support Product Release Notification - Secure Copy 7.3 more. Subscribe to product RSS feed and receive the latest Knowledge Articles, New Release ... 4d29de3e1b<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2/text/cleaner.py b/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2/text/cleaner.py
deleted file mode 100644
index 64bd5f7296f66c94f3a335666c53706bb5fe5b39..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2/text/cleaner.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from text import chinese, cleaned_text_to_sequence
-
-
-language_module_map = {
-    'ZH': chinese
-}
-
-
-def clean_text(text, language):
-    language_module = language_module_map[language]
-    norm_text = language_module.text_normalize(text)
-    phones, tones, word2ph = language_module.g2p(norm_text)
-    return norm_text, phones, tones, word2ph
-
-def clean_text_bert(text, language):
-    language_module = language_module_map[language]
-    norm_text = language_module.text_normalize(text)
-    phones, tones, word2ph = language_module.g2p(norm_text)
-    bert = language_module.get_bert_feature(norm_text, word2ph)
-    return phones, tones, bert
-
-def text_to_sequence(text, language):
-    norm_text, phones, tones, word2ph = clean_text(text, language)
-    return cleaned_text_to_sequence(phones, tones, language)
-
-if __name__ == '__main__':
-    pass
diff --git a/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/app.py b/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/app.py
deleted file mode 100644
index 4fe60035f007126110b071794da844944364cce9..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/app.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import sys, os 
-
-if sys.platform == "darwin":
-    os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
-
-import logging
-
-logging.getLogger("numba").setLevel(logging.WARNING)
-logging.getLogger("markdown_it").setLevel(logging.WARNING)
-logging.getLogger("urllib3").setLevel(logging.WARNING)
-logging.getLogger("matplotlib").setLevel(logging.WARNING)
-
-logging.basicConfig(level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s")
-
-logger = logging.getLogger(__name__)
-
-import torch
-import argparse
-import commons
-import utils
-from models import SynthesizerTrn
-from text.symbols import symbols
-from text import cleaned_text_to_sequence, get_bert
-from text.cleaner import clean_text
-import gradio as gr
-import webbrowser
-
-
-net_g = None
-
-
-def get_text(text, language_str, hps):
-    norm_text, phone, tone, word2ph = clean_text(text, language_str)
-    phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
-
-    if hps.data.add_blank:
-        phone = commons.intersperse(phone, 0)
-        tone = commons.intersperse(tone, 0)
-        language = commons.intersperse(language, 0)
-        for i in range(len(word2ph)):
-            word2ph[i] = word2ph[i] * 2
-        word2ph[0] += 1
-    bert = get_bert(norm_text, word2ph, language_str)
-    del word2ph
-
-    assert bert.shape[-1] == len(phone)
-
-    phone = torch.LongTensor(phone)
-    tone = torch.LongTensor(tone)
-    language = torch.LongTensor(language)
-
-    return bert, phone, tone, language
-import soundfile as sf
-def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid):
-    global net_g
-    bert, phones, tones, lang_ids = get_text(text, "ZH", hps)
-    with torch.no_grad():
-        x_tst=phones.to(device).unsqueeze(0)
-        tones=tones.to(device).unsqueeze(0)
-        lang_ids=lang_ids.to(device).unsqueeze(0)
-        bert = bert.to(device).unsqueeze(0)
-        x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
-        del phones
-        speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
-        audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, sdp_ratio=sdp_ratio
-                           , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy()
-        del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
-        sf.write("tmp.wav", audio, 44100)
-        return audio
-def convert_wav_to_ogg(wav_file):
-    os.makedirs('out', exist_ok=True)
-    filename = os.path.splitext(os.path.basename(wav_file.name))[0]
-    output_path_ogg = os.path.join('out', f"out.ogg")
-
-    renamed_input_path = os.path.join('in', f"in.wav")
-    os.makedirs('in', exist_ok=True)
-    os.rename(wav_file.name, renamed_input_path)
-    command = ["ffmpeg", "-i", renamed_input_path, "-acodec", "libopus", "-y", output_path_ogg]
-    os.system(" ".join(command))
-    return output_path_ogg
-def tts_fn(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale):
-    with torch.no_grad():
-        audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker)
-    with open('tmp.wav', 'rb') as wav_file:
-        newogg = convert_wav_to_ogg(wav_file)    
-    return "Success", (hps.data.sampling_rate, audio),newogg
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()
-    parser.add_argument("--model_dir", default="./logs/bfy/bfy_b.pth", help="path of your model")
-    parser.add_argument("--config_dir", default="./configs/config.json", help="path of your config file")
-    parser.add_argument("--share", default=False, help="make link public")
-    parser.add_argument("-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log")
-
-    args = parser.parse_args()
-    if args.debug:
-        logger.info("Enable DEBUG-LEVEL log")
-        logging.basicConfig(level=logging.DEBUG)
-    hps = utils.get_hparams_from_file(args.config_dir)
-    device = "cuda:0" if torch.cuda.is_available() else "cpu"
-    '''
-    device = (
-        "cuda:0"
-        if torch.cuda.is_available()
-        else (
-            "mps"
-            if sys.platform == "darwin" and torch.backends.mps.is_available()
-            else "cpu"
-        )
-    )
-    '''
-    net_g = SynthesizerTrn(
-        len(symbols),
-        hps.data.filter_length // 2 + 1,
-        hps.train.segment_size // hps.data.hop_length,
-        n_speakers=hps.data.n_speakers,
-        **hps.model).to(device)
-    _ = net_g.eval()
-
-    _ = utils.load_checkpoint(args.model_dir, net_g, None, skip_optimizer=True)
-
-    speaker_ids = hps.data.spk2id
-    speakers = list(speaker_ids.keys())
-    with gr.Blocks() as app:
-        with gr.Row():
-            with gr.Column():
-
-
-                gr.Markdown(value="""
-                步非烟 Ver.b Bert-Vits2在线语音生成\n
-                1、模型作者:数字星瞳企划 https://t.me/xingtong25680 \n
-                2、原项目地址:https://github.com/Stardust-minus/Bert-VITS2\n
-                3、使用此模型进行二创请注明AI生成,以及原项目地址\n
-                4、素材来自散文朗读比赛,严禁将此项目用于一切违反《中华人民共和国宪法》,《中华人民共和国刑法》,《中华人民共和国治安管理处罚法》和《中华人民共和国民法典》之用途。严禁用于任何政治相关用途。 \n
-                """)
-                text = gr.TextArea(label="Text", placeholder="Input Text Here",
-                                      value="这里是数字星瞳企画,请在电报搜索星瞳全拼加二五六八零,获取最新更新进展。")
-                speaker = gr.Dropdown(choices=speakers, value=speakers[0], label='Speaker')
-                sdp_ratio = gr.Slider(minimum=0, maximum=1, value=0.2, step=0.01, label='语调变化')
-                noise_scale = gr.Slider(minimum=0.1, maximum=1.5, value=0.6, step=0.01, label='感情变化')
-                noise_scale_w = gr.Slider(minimum=0.1, maximum=1.4, value=0.8, step=0.01, label='音节发音长度变化')
-                length_scale = gr.Slider(minimum=0.1, maximum=2, value=1, step=0.01, label='语速')
-                btn = gr.Button("开启AI语音之旅吧!", variant="primary")
-            with gr.Column():
-                text_output = gr.Textbox(label="Message")
-                audio_output = gr.Audio(label="Output Audio")
-                ogg_output = gr.File(label="Converted OGG file")
-                gr.Markdown(value="""
-                模型汇总:\n
-                星瞳整合 https://huggingface.co/spaces/digitalxingtong/Xingtong-All-in-One\n
-                步非烟 Ver.a https://huggingface.co/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2 \n
-                步非烟 Ver.b https://huggingface.co/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2 \n
-                步非烟 Ver.c https://huggingface.co/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2 \n
-                男声朗读 https://huggingface.co/spaces/digitalxingtong/Kanghui-Read-Bert-VITS2 \n
-                男声朗读(长文本) https://huggingface.co/spaces/digitalxingtong/Kanghui-Longread-Bert-VITS2\n
-                IGN 中国 https://huggingface.co/spaces/digitalxingtong/Ign-Read-Bert-VITS2 \n
-                IGN 中国(长文本)https://huggingface.co/spaces/digitalxingtong/Ign-Longread-Bert-VITS2 \n
-                """)
-        btn.click(tts_fn,
-                inputs=[text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale],
-                outputs=[text_output, audio_output,ogg_output])
-    
-        
-    app.launch(show_error=True)
diff --git a/spaces/digitalxingtong/Jiaran-Bert-VITS2/README_zh.md b/spaces/digitalxingtong/Jiaran-Bert-VITS2/README_zh.md
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Jiaran-Bert-VITS2/README_zh.md
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/spaces/digitalxingtong/Miiu-Bert-Vits2/README_zh.md b/spaces/digitalxingtong/Miiu-Bert-Vits2/README_zh.md
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Miiu-Bert-Vits2/README_zh.md
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/spaces/dolceschokolade/chatbot-mini/CONTRIBUTING.md b/spaces/dolceschokolade/chatbot-mini/CONTRIBUTING.md
deleted file mode 100644
index 2fc863718e9eaa6d9d1a2f4f35c1319bd57366f9..0000000000000000000000000000000000000000
--- a/spaces/dolceschokolade/chatbot-mini/CONTRIBUTING.md
+++ /dev/null
@@ -1,45 +0,0 @@
-# Contributing Guidelines
-
-**Welcome to Chatbot UI!**
-
-We appreciate your interest in contributing to our project.
-
-Before you get started, please read our guidelines for contributing.
-
-## Types of Contributions
-
-We welcome the following types of contributions:
-
-- Bug fixes
-- New features
-- Documentation improvements
-- Code optimizations
-- Translations
-- Tests
-
-## Getting Started
-
-To get started, fork the project on GitHub and clone it locally on your machine. Then, create a new branch to work on your changes.
-
-```
-git clone https://github.com/mckaywrigley/chatbot-ui.git
-cd chatbot-ui
-git checkout -b my-branch-name
-
-```
-
-Before submitting your pull request, please make sure your changes pass our automated tests and adhere to our code style guidelines.
-
-## Pull Request Process
-
-1. Fork the project on GitHub.
-2. Clone your forked repository locally on your machine.
-3. Create a new branch from the main branch.
-4. Make your changes on the new branch.
-5. Ensure that your changes adhere to our code style guidelines and pass our automated tests.
-6. Commit your changes and push them to your forked repository.
-7. Submit a pull request to the main branch of the main repository.
-
-## Contact
-
-If you have any questions or need help getting started, feel free to reach out to me on [Twitter](https://twitter.com/mckaywrigley).
diff --git a/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/modules/llama_attn_hijack.py b/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/modules/llama_attn_hijack.py
deleted file mode 100644
index e953f523d6c54581af1a30deb8b922f85b3e557a..0000000000000000000000000000000000000000
--- a/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/modules/llama_attn_hijack.py
+++ /dev/null
@@ -1,171 +0,0 @@
-import logging
-import math
-import sys
-from typing import Optional, Tuple
-
-import torch
-import torch.nn as nn
-import transformers.models.llama.modeling_llama
-
-import modules.shared as shared
-
-if shared.args.xformers:
-    try:
-        import xformers.ops
-    except Exception:
-        logging.error("xformers not found! Please install it before trying to use it.", file=sys.stderr)
-
-
-def hijack_llama_attention():
-    if shared.args.xformers:
-        transformers.models.llama.modeling_llama.LlamaAttention.forward = xformers_forward
-        logging.info("Replaced attention with xformers_attention")
-    elif shared.args.sdp_attention:
-        transformers.models.llama.modeling_llama.LlamaAttention.forward = sdp_attention_forward
-        logging.info("Replaced attention with sdp_attention")
-
-
-def xformers_forward(
-    self,
-    hidden_states: torch.Tensor,
-    attention_mask: Optional[torch.Tensor] = None,
-    position_ids: Optional[torch.LongTensor] = None,
-    past_key_value: Optional[Tuple[torch.Tensor]] = None,
-    output_attentions: bool = False,
-    use_cache: bool = False,
-) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
-    bsz, q_len, _ = hidden_states.size()
-
-    query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
-    key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
-    value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
-
-    kv_seq_len = key_states.shape[-2]
-    if past_key_value is not None:
-        kv_seq_len += past_key_value[0].shape[-2]
-    cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
-    query_states, key_states = transformers.models.llama.modeling_llama.apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
-    # [bsz, nh, t, hd]
-
-    if past_key_value is not None:
-        # reuse k, v, self_attention
-        key_states = torch.cat([past_key_value[0], key_states], dim=2)
-        value_states = torch.cat([past_key_value[1], value_states], dim=2)
-
-    past_key_value = (key_states, value_states) if use_cache else None
-
-    # We only apply xformers optimizations if we don't need to output the whole attention matrix
-    if not output_attentions:
-        query_states = query_states.transpose(1, 2)
-        key_states = key_states.transpose(1, 2)
-        value_states = value_states.transpose(1, 2)
-
-        # This is a nasty hack. We know attention_mask in transformers is either LowerTriangular or all Zeros.
-        # We therefore check if one element in the upper triangular portion is zero. If it is, then the mask is all zeros.
-        if attention_mask is None or attention_mask[0, 0, 0, 1] == 0:
-            # input and output should be of form (bsz, q_len, num_heads, head_dim)
-            attn_output = xformers.ops.memory_efficient_attention(query_states, key_states, value_states, attn_bias=None)
-        else:
-            # input and output should be of form (bsz, q_len, num_heads, head_dim)
-            attn_output = xformers.ops.memory_efficient_attention(query_states, key_states, value_states, attn_bias=xformers.ops.LowerTriangularMask())
-        attn_weights = None
-    else:
-        attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
-
-        if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
-            raise ValueError(
-                f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
-                f" {attn_weights.size()}"
-            )
-
-        if attention_mask is not None:
-            if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
-                raise ValueError(
-                    f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
-                )
-            attn_weights = attn_weights + attention_mask
-            attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
-
-        # upcast attention to fp32
-        attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
-        attn_output = torch.matmul(attn_weights, value_states)
-
-        if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
-            raise ValueError(
-                f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
-                f" {attn_output.size()}"
-            )
-
-        attn_output = attn_output.transpose(1, 2)
-
-    attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
-    attn_output = self.o_proj(attn_output)
-    return attn_output, attn_weights, past_key_value
-
-
-def sdp_attention_forward(
-    self,
-    hidden_states: torch.Tensor,
-    attention_mask: Optional[torch.Tensor] = None,
-    position_ids: Optional[torch.LongTensor] = None,
-    past_key_value: Optional[Tuple[torch.Tensor]] = None,
-    output_attentions: bool = False,
-    use_cache: bool = False,
-) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
-    bsz, q_len, _ = hidden_states.size()
-
-    query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
-    key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
-    value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
-
-    kv_seq_len = key_states.shape[-2]
-    if past_key_value is not None:
-        kv_seq_len += past_key_value[0].shape[-2]
-    cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
-    query_states, key_states = transformers.models.llama.modeling_llama.apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
-    # [bsz, nh, t, hd]
-
-    if past_key_value is not None:
-        # reuse k, v, self_attention
-        key_states = torch.cat([past_key_value[0], key_states], dim=2)
-        value_states = torch.cat([past_key_value[1], value_states], dim=2)
-
-    past_key_value = (key_states, value_states) if use_cache else None
-
-    # We only apply sdp attention if we don't need to output the whole attention matrix
-    if not output_attentions:
-        attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask, is_causal=False)
-        attn_weights = None
-    else:
-        attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
-
-        if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
-            raise ValueError(
-                f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
-                f" {attn_weights.size()}"
-            )
-
-        if attention_mask is not None:
-            if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
-                raise ValueError(
-                    f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
-                )
-            attn_weights = attn_weights + attention_mask
-            attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
-
-        # upcast attention to fp32
-        attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
-        attn_output = torch.matmul(attn_weights, value_states)
-
-        if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
-            raise ValueError(
-                f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
-                f" {attn_output.size()}"
-            )
-
-    attn_output = attn_output.transpose(1, 2)
-    attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
-
-    attn_output = self.o_proj(attn_output)
-
-    return attn_output, attn_weights, past_key_value
diff --git a/spaces/edisonlee55/hysts-anime-face-detector/anime_face_detector/configs/mmdet/faster-rcnn.py b/spaces/edisonlee55/hysts-anime-face-detector/anime_face_detector/configs/mmdet/faster-rcnn.py
deleted file mode 100644
index 00fd519ea808d3bd504f888ebb3e80db999990f9..0000000000000000000000000000000000000000
--- a/spaces/edisonlee55/hysts-anime-face-detector/anime_face_detector/configs/mmdet/faster-rcnn.py
+++ /dev/null
@@ -1,66 +0,0 @@
-model = dict(type='FasterRCNN',
-             backbone=dict(type='ResNet',
-                           depth=50,
-                           num_stages=4,
-                           out_indices=(0, 1, 2, 3),
-                           frozen_stages=1,
-                           norm_cfg=dict(type='BN', requires_grad=True),
-                           norm_eval=True,
-                           style='pytorch'),
-             neck=dict(type='FPN',
-                       in_channels=[256, 512, 1024, 2048],
-                       out_channels=256,
-                       num_outs=5),
-             rpn_head=dict(type='RPNHead',
-                           in_channels=256,
-                           feat_channels=256,
-                           anchor_generator=dict(type='AnchorGenerator',
-                                                 scales=[8],
-                                                 ratios=[0.5, 1.0, 2.0],
-                                                 strides=[4, 8, 16, 32, 64]),
-                           bbox_coder=dict(type='DeltaXYWHBBoxCoder',
-                                           target_means=[0.0, 0.0, 0.0, 0.0],
-                                           target_stds=[1.0, 1.0, 1.0, 1.0])),
-             roi_head=dict(
-                 type='StandardRoIHead',
-                 bbox_roi_extractor=dict(type='SingleRoIExtractor',
-                                         roi_layer=dict(type='RoIAlign',
-                                                        output_size=7,
-                                                        sampling_ratio=0),
-                                         out_channels=256,
-                                         featmap_strides=[4, 8, 16, 32]),
-                 bbox_head=dict(type='Shared2FCBBoxHead',
-                                in_channels=256,
-                                fc_out_channels=1024,
-                                roi_feat_size=7,
-                                num_classes=1,
-                                bbox_coder=dict(
-                                    type='DeltaXYWHBBoxCoder',
-                                    target_means=[0.0, 0.0, 0.0, 0.0],
-                                    target_stds=[0.1, 0.1, 0.2, 0.2]),
-                                reg_class_agnostic=False)),
-             test_cfg=dict(rpn=dict(nms_pre=1000,
-                                    max_per_img=1000,
-                                    nms=dict(type='nms', iou_threshold=0.7),
-                                    min_bbox_size=0),
-                           rcnn=dict(score_thr=0.05,
-                                     nms=dict(type='nms', iou_threshold=0.5),
-                                     max_per_img=100)))
-test_pipeline = [
-    dict(type='LoadImageFromFile'),
-    dict(type='MultiScaleFlipAug',
-         img_scale=(1333, 800),
-         flip=False,
-         transforms=[
-             dict(type='Resize', keep_ratio=True),
-             dict(type='RandomFlip'),
-             dict(type='Normalize',
-                  mean=[123.675, 116.28, 103.53],
-                  std=[58.395, 57.12, 57.375],
-                  to_rgb=True),
-             dict(type='Pad', size_divisor=32),
-             dict(type='DefaultFormatBundle'),
-             dict(type='Collect', keys=['img'])
-         ])
-]
-data = dict(test=dict(pipeline=test_pipeline))
diff --git a/spaces/eghth/wdferg/README.md b/spaces/eghth/wdferg/README.md
deleted file mode 100644
index b158855afb45d32cc197620b8de836528b8aa25a..0000000000000000000000000000000000000000
--- a/spaces/eghth/wdferg/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Wdferg
-emoji: 🚀
-colorFrom: yellow
-colorTo: red
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ehristoforu/Hubsd/app.py b/spaces/ehristoforu/Hubsd/app.py
deleted file mode 100644
index 07e1a79ce21a1b85553498e024393b5e1fa4f62e..0000000000000000000000000000000000000000
--- a/spaces/ehristoforu/Hubsd/app.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import gradio as gr
-with gr.Blocks(css=".gradio-container {background-color: grey}") as demo:
-    with gr.Tab(label = "SD 1.5"):
-         gr.load("models/runwayml/stable-diffusion-v1-5")
-    with gr.Tab(label = "SD 2.1"):
-         gr.load("models/runwayml/stabilityai/stable-diffusion-2-1")
-demo.launch()
\ No newline at end of file
diff --git a/spaces/elkraken/Video-Object-Detection/utils/torch_utils.py b/spaces/elkraken/Video-Object-Detection/utils/torch_utils.py
deleted file mode 100644
index bee0ad57517a334748afe7db19f6e45bd657afe6..0000000000000000000000000000000000000000
--- a/spaces/elkraken/Video-Object-Detection/utils/torch_utils.py
+++ /dev/null
@@ -1,374 +0,0 @@
-# YOLOR PyTorch utils
-
-import datetime
-import logging
-import math
-import os
-import platform
-import subprocess
-import time
-from contextlib import contextmanager
-from copy import deepcopy
-from pathlib import Path
-
-import torch
-import torch.backends.cudnn as cudnn
-import torch.nn as nn
-import torch.nn.functional as F
-import torchvision
-
-try:
-    import thop  # for FLOPS computation
-except ImportError:
-    thop = None
-logger = logging.getLogger(__name__)
-
-
-@contextmanager
-def torch_distributed_zero_first(local_rank: int):
-    """
-    Decorator to make all processes in distributed training wait for each local_master to do something.
-    """
-    if local_rank not in [-1, 0]:
-        torch.distributed.barrier()
-    yield
-    if local_rank == 0:
-        torch.distributed.barrier()
-
-
-def init_torch_seeds(seed=0):
-    # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
-    torch.manual_seed(seed)
-    if seed == 0:  # slower, more reproducible
-        cudnn.benchmark, cudnn.deterministic = False, True
-    else:  # faster, less reproducible
-        cudnn.benchmark, cudnn.deterministic = True, False
-
-
-def date_modified(path=__file__):
-    # return human-readable file modification date, i.e. '2021-3-26'
-    t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
-    return f'{t.year}-{t.month}-{t.day}'
-
-
-def git_describe(path=Path(__file__).parent):  # path must be a directory
-    # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
-    s = f'git -C {path} describe --tags --long --always'
-    try:
-        return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
-    except subprocess.CalledProcessError as e:
-        return ''  # not a git repository
-
-
-def select_device(device='', batch_size=None):
-    # device = 'cpu' or '0' or '0,1,2,3'
-    s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} '  # string
-    cpu = device.lower() == 'cpu'
-    if cpu:
-        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'  # force torch.cuda.is_available() = False
-    elif device:  # non-cpu device requested
-        os.environ['CUDA_VISIBLE_DEVICES'] = device  # set environment variable
-        assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested'  # check availability
-
-    cuda = not cpu and torch.cuda.is_available()
-    if cuda:
-        n = torch.cuda.device_count()
-        if n > 1 and batch_size:  # check that batch_size is compatible with device_count
-            assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
-        space = ' ' * len(s)
-        for i, d in enumerate(device.split(',') if device else range(n)):
-            p = torch.cuda.get_device_properties(i)
-            s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n"  # bytes to MB
-    else:
-        s += 'CPU\n'
-
-    logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s)  # emoji-safe
-    return torch.device('cuda:0' if cuda else 'cpu')
-
-
-def time_synchronized():
-    # pytorch-accurate time
-    if torch.cuda.is_available():
-        torch.cuda.synchronize()
-    return time.time()
-
-
-def profile(x, ops, n=100, device=None):
-    # profile a pytorch module or list of modules. Example usage:
-    #     x = torch.randn(16, 3, 640, 640)  # input
-    #     m1 = lambda x: x * torch.sigmoid(x)
-    #     m2 = nn.SiLU()
-    #     profile(x, [m1, m2], n=100)  # profile speed over 100 iterations
-
-    device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
-    x = x.to(device)
-    x.requires_grad = True
-    print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
-    print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
-    for m in ops if isinstance(ops, list) else [ops]:
-        m = m.to(device) if hasattr(m, 'to') else m  # device
-        m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m  # type
-        dtf, dtb, t = 0., 0., [0., 0., 0.]  # dt forward, backward
-        try:
-            flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2  # GFLOPS
-        except:
-            flops = 0
-
-        for _ in range(n):
-            t[0] = time_synchronized()
-            y = m(x)
-            t[1] = time_synchronized()
-            try:
-                _ = y.sum().backward()
-                t[2] = time_synchronized()
-            except:  # no backward method
-                t[2] = float('nan')
-            dtf += (t[1] - t[0]) * 1000 / n  # ms per op forward
-            dtb += (t[2] - t[1]) * 1000 / n  # ms per op backward
-
-        s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
-        s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
-        p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0  # parameters
-        print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
-
-
-def is_parallel(model):
-    return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
-
-
-def intersect_dicts(da, db, exclude=()):
-    # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
-    return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
-
-
-def initialize_weights(model):
-    for m in model.modules():
-        t = type(m)
-        if t is nn.Conv2d:
-            pass  # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
-        elif t is nn.BatchNorm2d:
-            m.eps = 1e-3
-            m.momentum = 0.03
-        elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
-            m.inplace = True
-
-
-def find_modules(model, mclass=nn.Conv2d):
-    # Finds layer indices matching module class 'mclass'
-    return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
-
-
-def sparsity(model):
-    # Return global model sparsity
-    a, b = 0., 0.
-    for p in model.parameters():
-        a += p.numel()
-        b += (p == 0).sum()
-    return b / a
-
-
-def prune(model, amount=0.3):
-    # Prune model to requested global sparsity
-    import torch.nn.utils.prune as prune
-    print('Pruning model... ', end='')
-    for name, m in model.named_modules():
-        if isinstance(m, nn.Conv2d):
-            prune.l1_unstructured(m, name='weight', amount=amount)  # prune
-            prune.remove(m, 'weight')  # make permanent
-    print(' %.3g global sparsity' % sparsity(model))
-
-
-def fuse_conv_and_bn(conv, bn):
-    # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
-    fusedconv = nn.Conv2d(conv.in_channels,
-                          conv.out_channels,
-                          kernel_size=conv.kernel_size,
-                          stride=conv.stride,
-                          padding=conv.padding,
-                          groups=conv.groups,
-                          bias=True).requires_grad_(False).to(conv.weight.device)
-
-    # prepare filters
-    w_conv = conv.weight.clone().view(conv.out_channels, -1)
-    w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
-    fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
-
-    # prepare spatial bias
-    b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
-    b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
-    fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
-
-    return fusedconv
-
-
-def model_info(model, verbose=False, img_size=640):
-    # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
-    n_p = sum(x.numel() for x in model.parameters())  # number parameters
-    n_g = sum(x.numel() for x in model.parameters() if x.requires_grad)  # number gradients
-    if verbose:
-        print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
-        for i, (name, p) in enumerate(model.named_parameters()):
-            name = name.replace('module_list.', '')
-            print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
-                  (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
-
-    try:  # FLOPS
-        from thop import profile
-        stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
-        img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device)  # input
-        flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2  # stride GFLOPS
-        img_size = img_size if isinstance(img_size, list) else [img_size, img_size]  # expand if int/float
-        fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride)  # 640x640 GFLOPS
-    except (ImportError, Exception):
-        fs = ''
-
-    logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
-
-
-def load_classifier(name='resnet101', n=2):
-    # Loads a pretrained model reshaped to n-class output
-    model = torchvision.models.__dict__[name](pretrained=True)
-
-    # ResNet model properties
-    # input_size = [3, 224, 224]
-    # input_space = 'RGB'
-    # input_range = [0, 1]
-    # mean = [0.485, 0.456, 0.406]
-    # std = [0.229, 0.224, 0.225]
-
-    # Reshape output to n classes
-    filters = model.fc.weight.shape[1]
-    model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
-    model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
-    model.fc.out_features = n
-    return model
-
-
-def scale_img(img, ratio=1.0, same_shape=False, gs=32):  # img(16,3,256,416)
-    # scales img(bs,3,y,x) by ratio constrained to gs-multiple
-    if ratio == 1.0:
-        return img
-    else:
-        h, w = img.shape[2:]
-        s = (int(h * ratio), int(w * ratio))  # new size
-        img = F.interpolate(img, size=s, mode='bilinear', align_corners=False)  # resize
-        if not same_shape:  # pad/crop img
-            h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
-        return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447)  # value = imagenet mean
-
-
-def copy_attr(a, b, include=(), exclude=()):
-    # Copy attributes from b to a, options to only include [...] and to exclude [...]
-    for k, v in b.__dict__.items():
-        if (len(include) and k not in include) or k.startswith('_') or k in exclude:
-            continue
-        else:
-            setattr(a, k, v)
-
-
-class ModelEMA:
-    """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
-    Keep a moving average of everything in the model state_dict (parameters and buffers).
-    This is intended to allow functionality like
-    https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
-    A smoothed version of the weights is necessary for some training schemes to perform well.
-    This class is sensitive where it is initialized in the sequence of model init,
-    GPU assignment and distributed training wrappers.
-    """
-
-    def __init__(self, model, decay=0.9999, updates=0):
-        # Create EMA
-        self.ema = deepcopy(model.module if is_parallel(model) else model).eval()  # FP32 EMA
-        # if next(model.parameters()).device.type != 'cpu':
-        #     self.ema.half()  # FP16 EMA
-        self.updates = updates  # number of EMA updates
-        self.decay = lambda x: decay * (1 - math.exp(-x / 2000))  # decay exponential ramp (to help early epochs)
-        for p in self.ema.parameters():
-            p.requires_grad_(False)
-
-    def update(self, model):
-        # Update EMA parameters
-        with torch.no_grad():
-            self.updates += 1
-            d = self.decay(self.updates)
-
-            msd = model.module.state_dict() if is_parallel(model) else model.state_dict()  # model state_dict
-            for k, v in self.ema.state_dict().items():
-                if v.dtype.is_floating_point:
-                    v *= d
-                    v += (1. - d) * msd[k].detach()
-
-    def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
-        # Update EMA attributes
-        copy_attr(self.ema, model, include, exclude)
-
-
-class BatchNormXd(torch.nn.modules.batchnorm._BatchNorm):
-    def _check_input_dim(self, input):
-        # The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc
-        # is this method that is overwritten by the sub-class
-        # This original goal of this method was for tensor sanity checks
-        # If you're ok bypassing those sanity checks (eg. if you trust your inference
-        # to provide the right dimensional inputs), then you can just use this method
-        # for easy conversion from SyncBatchNorm
-        # (unfortunately, SyncBatchNorm does not store the original class - if it did
-        #  we could return the one that was originally created)
-        return
-
-def revert_sync_batchnorm(module):
-    # this is very similar to the function that it is trying to revert:
-    # https://github.com/pytorch/pytorch/blob/c8b3686a3e4ba63dc59e5dcfe5db3430df256833/torch/nn/modules/batchnorm.py#L679
-    module_output = module
-    if isinstance(module, torch.nn.modules.batchnorm.SyncBatchNorm):
-        new_cls = BatchNormXd
-        module_output = BatchNormXd(module.num_features,
-                                               module.eps, module.momentum,
-                                               module.affine,
-                                               module.track_running_stats)
-        if module.affine:
-            with torch.no_grad():
-                module_output.weight = module.weight
-                module_output.bias = module.bias
-        module_output.running_mean = module.running_mean
-        module_output.running_var = module.running_var
-        module_output.num_batches_tracked = module.num_batches_tracked
-        if hasattr(module, "qconfig"):
-            module_output.qconfig = module.qconfig
-    for name, child in module.named_children():
-        module_output.add_module(name, revert_sync_batchnorm(child))
-    del module
-    return module_output
-
-
-class TracedModel(nn.Module):
-
-    def __init__(self, model=None, device=None, img_size=(640,640)): 
-        super(TracedModel, self).__init__()
-        
-        print(" Convert model to Traced-model... ") 
-        self.stride = model.stride
-        self.names = model.names
-        self.model = model
-
-        self.model = revert_sync_batchnorm(self.model)
-        self.model.to('cpu')
-        self.model.eval()
-
-        self.detect_layer = self.model.model[-1]
-        self.model.traced = True
-        
-        rand_example = torch.rand(1, 3, img_size, img_size)
-        
-        traced_script_module = torch.jit.trace(self.model, rand_example, strict=False)
-        #traced_script_module = torch.jit.script(self.model)
-        traced_script_module.save("traced_model.pt")
-        print(" traced_script_module saved! ")
-        self.model = traced_script_module
-        self.model.to(device)
-        self.detect_layer.to(device)
-        print(" model is traced! \n") 
-
-    def forward(self, x, augment=False, profile=False):
-        out = self.model(x)
-        out = self.detect_layer(out)
-        return out
diff --git a/spaces/elyza/ELYZA-japanese-Llama-2-7b-instruct-demo/README.md b/spaces/elyza/ELYZA-japanese-Llama-2-7b-instruct-demo/README.md
deleted file mode 100644
index 4864c957986d300bf083b2f23e5e552626bb19bc..0000000000000000000000000000000000000000
--- a/spaces/elyza/ELYZA-japanese-Llama-2-7b-instruct-demo/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-title: ELYZA-japanese-Llama-2-7b-instruct-demo
-emoji: ✨
-colorFrom: purple
-colorTo: gray
-sdk: gradio
-sdk_version: 3.41.0
-app_file: app.py
-pinned: true
-suggested_hardware: a10g-small
-duplicated_from: elyza/ELYZA-japanese-Llama-2-7b-instruct-demo
----
-
-# ELYZA-japanese-Llama-2-7b-instruct-demo
-## 概要
-- [ELYZA-japanese-Llama-2-7b](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-7b)は、[株式会社ELYZA](https://elyza.ai/) (以降「当社」と呼称) が[Llama2](https://ai.meta.com/llama/)をベースとして日本語能力を拡張するために事前学習を行ったモデルです。
-- [ELYZA-japanese-Llama-2-7b-instruct](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-7b-instruct)は[ELYZA-japanese-Llama-2-7b](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-7b)を弊社独自のinstruction tuning用データセットで事後学習したモデルです。
-    - 本デモではこのモデルが使われています。
-- [ELYZA-japanese-Llama-2-7b-fast-instruct](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-7b-fast-instruct)は[ELYZA-japanese-Llama-2-7b](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-7b)に日本語語彙を追加した[ELYZA-japanese-Llama-2-7b-fast](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-7b-fast)を弊社独自のinstruction tuning用データセットで事後学習したモデルです。
-    - このモデルを使ったデモは[こちら](https://huggingface.co/spaces/elyza/ELYZA-japanese-Llama-2-7b-fast-instruct-demo)です
-- 詳細は[Blog記事](https://note.com/elyza/n/na405acaca130)を参照してください。
-- 本デモではこちらの[Llama-2 7B Chat](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat)のデモをベースにさせていただきました。
-
-## License
-- Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved.
-
-## 免責事項
-- 当社は、本デモについて、ユーザーの特定の目的に適合すること、期待する機能・正確性・有用性を有すること、出力データが完全性、正確性、有用性を有すること、ユーザーによる本サービスの利用がユーザーに適用のある法令等に適合すること、継続的に利用できること、及び不具合が生じないことについて、明示又は黙示を問わず何ら保証するものではありません。
-- 当社は、本デモに関してユーザーが被った損害等につき、一切の責任を負わないものとし、ユーザーはあらかじめこれを承諾するものとします。
-- 当社は、本デモを通じて、ユーザー又は第三者の個人情報を取得することを想定しておらず、ユーザーは、本デモに、ユーザー又は第三者の氏名その他の特定の個人を識別することができる情報等を入力等してはならないものとします。
-- ユーザーは、当社が本デモ又は本デモに使用されているアルゴリズム等の改善・向上に使用することを許諾するものとします。
-
-## 本デモで入力・出力されたデータの記録・利用に関して
-- 本デモで入力・出力されたデータは当社にて記録させていただき、今後の本デモ又は本デモに使用されているアルゴリズム等の改善・向上に使用させていただく場合がございます。
-
-## We are hiring!
-- 当社 (株式会社ELYZA) に興味のある方、ぜひお話ししませんか?
-- 機械学習エンジニア・インターン募集: https://open.talentio.com/r/1/c/elyza/homes/2507
-- カジュアル面談はこちら: https://chillout.elyza.ai/elyza-japanese-llama2-7b
diff --git a/spaces/elyza/ELYZA-japanese-Llama-2-7b-instruct-demo/USE_POLICY.md b/spaces/elyza/ELYZA-japanese-Llama-2-7b-instruct-demo/USE_POLICY.md
deleted file mode 100644
index abbcc199b2d1e4feb5d7e40c0bd67e1b0ce29e97..0000000000000000000000000000000000000000
--- a/spaces/elyza/ELYZA-japanese-Llama-2-7b-instruct-demo/USE_POLICY.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# Llama 2 Acceptable Use Policy
-
-Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy).
-
-## Prohibited Uses
-We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 
-
-1. Violate the law or others’ rights, including to:
-    1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 
-        1. Violence or terrorism 
-        2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
-        3. Human trafficking, exploitation, and sexual violence
-        4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
-        5. Sexual solicitation
-        6. Any other criminal activity
-    2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
-    3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
-    4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 
-    5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
-    6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
-    7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 
-
-
-
-2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following:
-    1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
-    2. Guns and illegal weapons (including weapon development)
-    3. Illegal drugs and regulated/controlled substances
-    4. Operation of critical infrastructure, transportation technologies, or heavy machinery
-    5. Self-harm or harm to others, including suicide, cutting, and eating disorders
-    6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
-
-
-
-3. Intentionally deceive or mislead others, including use of Llama 2 related to the following:
-    1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
-    2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
-    3. Generating, promoting, or further distributing spam
-    4. Impersonating another individual without consent, authorization, or legal right
-    5. Representing that the use of Llama 2 or outputs are human-generated
-    6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 
-4. Fail to appropriately disclose to end users any known dangers of your AI system 
-
-Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
-
-* Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
-* Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
-* Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
-* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [LlamaUseReport@meta.com](mailto:LlamaUseReport@meta.com)
-
diff --git a/spaces/enzostvs/stable-diffusion-tpu/next.config.js b/spaces/enzostvs/stable-diffusion-tpu/next.config.js
deleted file mode 100644
index 4ef48a6a9031e3d4ee1a08339493e1bded88b670..0000000000000000000000000000000000000000
--- a/spaces/enzostvs/stable-diffusion-tpu/next.config.js
+++ /dev/null
@@ -1,21 +0,0 @@
-/** @type {import('next').NextConfig} */
-const nextConfig = {
-  experimental: {
-    serverActions: true,
-    serverComponentsExternalPackages: ['sharp', 'onnxruntime-node'],
-  },
-  images: {
-    remotePatterns: [
-      {
-        protocol: "https",
-        hostname: "huggingface.co",
-      },
-      {
-        protocol: "https",
-        hostname: "aeiljuispo.cloudimg.io",
-      },
-    ],
-  },
-}
-
-module.exports = nextConfig
diff --git a/spaces/epexVfeibi/Imagedeblurr/A Beautiful Mind Dual Audio HOT.md b/spaces/epexVfeibi/Imagedeblurr/A Beautiful Mind Dual Audio HOT.md
deleted file mode 100644
index 03decf5fe0adbb28d2dfab5617fc022daa177a40..0000000000000000000000000000000000000000
--- a/spaces/epexVfeibi/Imagedeblurr/A Beautiful Mind Dual Audio HOT.md	
+++ /dev/null
@@ -1,13 +0,0 @@
-<br />
-<h1>A Beautiful Mind Dual Audio: A Biographical Drama Film You Must Watch</h1>
-<p>If you are looking for a movie that will inspire you with the story of a mathematical genius who overcame his mental illness and won the Nobel Prize, then you should watch <em>A Beautiful Mind</em> in dual audio. This 2001 film is based on the life of John Nash, a brilliant but troubled mathematician who made groundbreaking contributions to game theory, cryptography, and economics. The film was directed by Ron Howard and starred Russell Crowe as Nash, Jennifer Connelly as his wife Alicia, and Ed Harris as his mysterious handler.</p>
-<h2>A beautiful mind dual audio</h2><br /><p><b><b>DOWNLOAD</b> &#8230; <a href="https://jinyurl.com/2uEobP">https://jinyurl.com/2uEobP</a></b></p><br /><br />
-<p><em>A Beautiful Mind</em> was a critical and commercial success, winning four Academy Awards, including Best Picture, Best Director, Best Adapted Screenplay, and Best Supporting Actress. It also received positive reviews from critics and audiences alike, who praised the performances, the direction, the screenplay, and the depiction of Nash's struggle with schizophrenia. The film was also praised for its accuracy and sensitivity in portraying Nash's life and achievements.</p>
-<p>If you want to watch <em>A Beautiful Mind</em> in dual audio, you can download it from KatMovieHD.la, a website that offers free download or watch online of movies and TV shows in Hindi dubbed and English. You can choose from different quality options, such as 480p, 720p, 1080p, or Blu-Ray. You can also enjoy the original Hindi (ORG) DD 5.1 and English audio tracks for a better viewing experience.</p>
-<p><em>A Beautiful Mind</em> is a movie that will make you think, feel, and appreciate the power of the human mind and spirit. It is a movie that will show you how a beautiful mind can overcome any obstacle and achieve greatness. It is a movie that you must watch in dual audio.</p>
-
-<p><em>A Beautiful Mind</em> also explores the personal and professional life of John Nash, who was born on June 13, 1928, in Bluefield, West Virginia[^1^] [^2^] [^3^]. His father was an electrical engineer and his mother was a schoolteacher before she married[^1^] [^2^] [^3^]. Nash showed an early interest in mathematics and science and was encouraged by his parents to pursue his education[^2^]. He graduated from Carnegie Institute of Technology (now Carnegie Mellon University) in 1948 with bachelor's and master's degrees in mathematics[^1^] [^2^]. He then went to Princeton University, where he completed his Ph.D. in mathematics in 1950 at the age of 22[^1^] [^2^]. His doctoral dissertation, titled "Non-Cooperative Games", introduced the concept of Nash equilibrium, which is a fundamental theorem in game theory[^1^] [^2^].</p>
-<p></p>
-<p>Nash's academic career took him to the Massachusetts Institute of Technology (MIT), where he became a professor of mathematics in 1951[^1^] [^2^]. He also worked as a consultant for the RAND Corporation, a think tank that conducted research for the U.S. military[^1^] [^2^]. During this time, he met and married Alicia Larde, a physics student at MIT, and they had a son, John Charles Martin Nash[^1^] [^2^]. However, Nash also suffered from paranoid schizophrenia, a mental disorder that caused him to have delusions and hallucinations[^1^] [^2^]. He was hospitalized several times and underwent various treatments, including electroconvulsive therapy[^1^] [^2^]. His condition affected his family life and his professional work, and he eventually left MIT in 1959[^1^] [^2^].</p> d5da3c52bf<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/fastx/customer-support-chatbot/app.py b/spaces/fastx/customer-support-chatbot/app.py
deleted file mode 100644
index f58b179caf678dc68932ef0408eca3d4c3f0499d..0000000000000000000000000000000000000000
--- a/spaces/fastx/customer-support-chatbot/app.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Import packages
-import openai
-from llama_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper, ServiceContext
-from langchain.chat_models import ChatOpenAI
-import gradio as gr
-import sys
-import os
-import PyPDF2
-
-#os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
-# OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
-
-'''
-def construct_index(directory_path):
-    max_input_size = 4096
-    num_outputs = 512
-    max_chunk_overlap = 20
-    chunk_size_limit = 600
-    prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
-    llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
-    documents = SimpleDirectoryReader(directory_path).load_data()
-    service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
-    index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
-    index.save_to_disk('index.json')
-    return index
-'''
-
-def chatbot(input_text, openai_api_key):
-    os.environ["OPENAI_API_KEY"] = openai_api_key
-    index = GPTSimpleVectorIndex.load_from_disk('index.json')
-    response = index.query(input_text, response_mode="compact")
-    return response.response
-
-# chat = gr.Interface(fn=chatbot,
-#                     inputs=gr.components.Textbox(lines=7, label="Ask your question to ChatGPT"),
-#                     outputs="text",
-#                     title="Custom-trained AI Chatbot for employee tax assessment 2022")
-
-# Documentation how to make gradio interfaces: https://gradio.app/quickstart/
-
-with gr.Blocks() as chat:
-
-    with gr.Column(elem_id="col-container"):
-        gr.Markdown("""## Trained with custom data""",
-                    elem_id="header")
-
-    with gr.Column():
-        gr.Markdown("Enter your OpenAI API Key.")
-        openai_api_key = gr.Textbox(value='', placeholder="OpenAI API Key", type="password", label="Enter OpenAI API Key")
-    
-    text_input = gr.Textbox(lines=7, label="Enter your question")
-    output = gr.Textbox(label="Response")
-    greet_btn = gr.Button("Generate Response")
-    greet_btn.click(fn=chatbot, inputs=[text_input, openai_api_key], outputs=output)
-
-chat.launch()
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Angry Birds Classic 2012 APK The Original and Best Bird-Slinging Game.md b/spaces/fatiXbelha/sd/Angry Birds Classic 2012 APK The Original and Best Bird-Slinging Game.md
deleted file mode 100644
index f317c0c3ee176b990923ed195ce75467cdcf5ecf..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Angry Birds Classic 2012 APK The Original and Best Bird-Slinging Game.md	
+++ /dev/null
@@ -1,135 +0,0 @@
-
-<h1>Angry Birds Classic 2012 APK: A Blast from the Past</h1>
- <p>Do you remember the time when you used to fling angry birds at green pigs using a slingshot? Do you miss the fun and addictive gameplay that made you spend hours on your phone or tablet? If you do, then you might want to download Angry Birds Classic 2012 APK, a recreation of the original game that started it all.</p>
-<h2>angry birds classic 2012 apk</h2><br /><p><b><b>DOWNLOAD</b> &#10026; <a href="https://urllie.com/2uNEBX">https://urllie.com/2uNEBX</a></b></p><br /><br />
- <h2>What is Angry Birds Classic 2012 APK?</h2>
- <p>Angry Birds Classic 2012 APK is a paid app that lets you play the classic version of Angry Birds on your Android device. It is not available on the Google Play Store, but you can download it from other sources online. It contains all the levels, episodes, birds, and features that made Angry Birds one of the most popular mobile games ever.</p>
- <h3>The origin and popularity of Angry Birds</h3>
- <p>Angry Birds was first released in December 2009 by Rovio Entertainment, a Finnish video game developer. It was inspired by a sketch of stylized wingless birds and a game called Crush the Castle.   The game was praised for its successful combination of fun gameplay, comical style, and low price.  It became a huge hit on iOS devices and later expanded to other platforms such as Android, Windows Phone, PC, Mac, and more.</p>
- <p>By January 2014, there had been over 2 billion downloads across all platforms, making it the most downloaded freemium game series of all time.  It also spawned many spin-offs, merchandise, animated series, and movies.  However, in early 2019, Rovio discontinued and removed most of the Angry Birds games from the app stores, except for Angry Birds Friends.  Many fans were disappointed by this decision and wanted to play the classic games again.</p>
- <h3>The features and gameplay of Angry Birds</h3>
- <p>The gameplay of Angry Birds is simple but challenging. You have to use a slingshot to launch different types of birds at structures made of wood, ice, stone, or other materials that are occupied by green pigs. Your goal is to destroy all the pigs on each level using as few birds as possible. You can also collect stars, golden eggs, and other items along the way.</p>
- <p>The game <p>The game has many episodes, each with a different theme and setting. Some of the most famous ones are Poached Eggs, Mighty Hoax, Danger Above, The Big Setup, Ham 'Em High, Mine and Dine, Birdday Party, Bad Piggies, Surf and Turf, and Red's Mighty Feathers. Each episode has several levels that vary in difficulty and complexity.</p>
-<p>angry birds classic 2012 apk download<br />
-angry birds classic 2012 apk free<br />
-angry birds classic 2012 apk mod<br />
-angry birds classic 2012 apk old version<br />
-angry birds classic 2012 apk uptodown<br />
-angry birds classic 2012 apk android<br />
-angry birds classic 2012 apk full<br />
-angry birds classic 2012 apk offline<br />
-angry birds classic 2012 apk latest<br />
-angry birds classic 2012 apk unlimited<br />
-angry birds classic 2012 apk hack<br />
-angry birds classic 2012 apk original<br />
-angry birds classic 2012 apk file<br />
-angry birds classic 2012 apk mirror<br />
-angry birds classic 2012 apk update<br />
-angry birds classic 2012 apk revdl<br />
-angry birds classic 2012 apk pure<br />
-angry birds classic 2012 apk obb<br />
-angry birds classic 2012 apk data<br />
-angry birds classic 2012 apk rexdl<br />
-angry birds classic 2012 apk cracked<br />
-angry birds classic 2012 apk premium<br />
-angry birds classic 2012 apk pro<br />
-angry birds classic 2012 apk unlocked<br />
-angry birds classic 2012 apk mega<br />
-angry birds classic 2012 apk mediafire<br />
-angry birds classic 2012 apk google drive<br />
-angry birds classic 2012 apk direct link<br />
-angry birds classic 2012 apk no ads<br />
-angry birds classic 2012 apk no root<br />
-angry birds classic 2012 apk for pc<br />
-angry birds classic 2012 apk for ios<br />
-angry birds classic 2012 apk for windows<br />
-angry birds classic 2012 apk for mac<br />
-angry birds classic 2012 apk for tablet<br />
-angry birds classic 2012 apk for firestick<br />
-angry birds classic 2012 apk for chromebook<br />
-angry birds classic 2012 apk for smart tv<br />
-angry birds classic 2012 apk for bluestacks<br />
-angry birds classic 2012 apk for emulator</p>
- <p>The game also has many types of birds, each with a unique ability and personality. Some of the most iconic ones are Red, the leader of the flock; Chuck, the yellow speedster; Bomb, the black explosive; Matilda, the white egg-layer; The Blues, the blue triplets; Hal, the green boomerang; Terence, the big brother; Stella, the pink bubbler; Bubbles, the orange inflator; and Mighty Eagle, the powerful savior. Each bird has a different strength and weakness that you have to consider when choosing which one to use.</p>
- <h3>The reasons to download Angry Birds Classic 2012 APK</h3>
- <p>There are many reasons why you might want to download Angry Birds Classic 2012 APK. Here are some of them:</p>
- <ul>
-<li>You want to relive the nostalgia of playing one of the best mobile games ever.</li>
-<li>You want to enjoy the original graphics, sounds, and music that made Angry Birds so charming and memorable.</li>
-<li>You want to challenge yourself with hundreds of levels that test your skills and creativity.</li>
-<li>You want to have fun with the different birds and their abilities that offer endless possibilities.</li>
-<li>You want to support Rovio and show them that you still love Angry Birds and want them to bring back the classic games.</li>
-</ul>
- <h2>How to Download and Install Angry Birds Classic 2012 APK?</h2>
- <p>If you are convinced that you want to download Angry Birds Classic 2012 APK, then you need to follow these steps:</p>
- <h3>The steps to download Angry Birds Classic 2012 APK</h3>
- <ol>
-<li>Go to a reliable website that offers Angry Birds Classic 2012 APK for download. You can search for it on Google or Bing, but make sure that you check the reviews and ratings of the website before downloading anything.</li>
-<li>Click on the download button and wait for the file to be downloaded on your device. The file size is about 40 MB, so it should not take too long.</li>
-<li>Once the file is downloaded, go to your device's settings and enable the installation of apps from unknown sources. This is necessary because Angry Birds Classic 2012 APK is not from the Google Play Store.</li>
-<li>Locate the file on your device's storage and tap on it to start the installation process. Follow the instructions on the screen and wait for the app to be installed.</li>
-<li>After the installation is complete, you can launch the app and start playing Angry Birds Classic 2012 APK.</li>
-</ol>
- <h3>The tips and tricks to play Angry Birds Classic 2012 APK</h3>
- <p>If you want to play Angry Birds Classic 2012 APK like a pro, then you need to know these tips and tricks:</p>
- <ul>
-<li>Aim carefully and use the trajectory line as a guide. You can also zoom in or out to see more or less of the level.</li>
-<li>Use the right bird for the right situation. For example, use Chuck for breaking wood, Bomb for blowing up stone, Matilda for dropping eggs on pigs, etc.</li>
-<li>Try to hit multiple targets with one bird. You can do this by bouncing off walls, hitting TNT crates, triggering chain reactions, etc.</li>
-<li>Use the Mighty Eagle if you are stuck or want to get three stars on every level. You can use it once per hour for free or buy more feathers with real money.</li>
-<li>Have fun and experiment with different strategies. There is no one right way to play Angry Birds Classic 2012 APK.</li>
-</ul>
- <h3>The benefits and drawbacks of Angry Birds Classic 2012 APK</h3>
- <p>Like any other app, Angry Birds Classic 2012 APK has its benefits and drawbacks. Here are some of them:</p>
- <table>
-<tr><th>Benefits</th><th>Drawbacks</th></tr>
-<tr><td>It is fun and addictive.</td><td>It can be frustrating and repetitive.</td></tr>
-<tr><td>It is challenging and rewarding.</td><td>It can be hard and unfair.</td></tr>
-<tr><td>It is nostalgic and memorable.</td><td>It can be outdated and boring.</td></tr>
-<tr><td>It is cheap and accessible.</td><td>It can be risky and illegal.</td></tr>
-<tr><td>It <tr><td>It is cheap and accessible.</td><td>It can be risky and illegal.</td></tr>
-<tr><td>It is supported and updated.</td><td>It can be buggy and incompatible.</td></tr>
-</table>
- <h2>How to Enjoy Angry Birds Classic 2012 APK?</h2>
- <p>Now that you have downloaded and installed Angry Birds Classic 2012 APK, you might be wondering how to enjoy it to the fullest. Here are some suggestions:</p>
- <h3>The best levels and episodes of Angry Birds Classic 2012 APK</h3>
- <p>Angry Birds Classic 2012 APK has over 500 levels across 15 episodes, so you will never run out of content to play. However, some levels and episodes are more fun and memorable than others. Here are some of the best ones:</p>
- <ul>
-<li>Poached Eggs: The first episode of the game, where you learn the basics and meet the original birds and pigs.</li>
-<li>Mighty Hoax: The second episode of the game, where you face the King Pig and his cardboard cutouts.</li>
-<li>Danger Above: The fourth episode of the game, where you fly high in the sky and encounter new birds and obstacles.</li>
-<li>The Big Setup: The ninth episode of the game, where you switch roles with the pigs and use their contraptions to defeat them.</li>
-<li>Birdday Party: The thirteenth episode of the game, where you celebrate the birthday of Angry Birds with cake and candles.</li>
-</ul>
- <h3>The best birds and abilities of Angry Birds Classic 2012 APK</h3>
- <p>Angry Birds Classic 2012 APK has 10 different types of birds, each with a unique ability and personality. However, some birds and abilities are more useful and fun than others. Here are some of the best ones:</p>
- <ul>
-<li>Red: The leader of the flock, who has no special ability but is reliable and versatile.</li>
-<li>Bomb: The black explosive, who can blow up anything in his vicinity with a tap or on impact.</li>
-<li>Stella: The pink bubbler, who can create a bubble that lifts up anything inside it with a tap or on impact.</li>
-<li>Bubbles: The orange inflator, who can inflate himself to a huge size with a tap or on impact, pushing away anything around him.</li>
-<li>Mighty Eagle: The powerful savior, who can destroy everything on the screen with a can of sardines.</li>
-</ul>
- <h3>The best reviews and ratings of Angry Birds Classic 2012 APK</h3>
- <p>Angry Birds Classic 2012 APK has received many positive reviews and ratings from users and critics alike. Here are some of them:</p>
- <blockquote>"Angry Birds Classic 2012 APK is a must-have for any fan of the original game. It brings back all the fun and nostalgia of flinging birds at pigs with a slingshot. It is well worth the price and the download." - User review on ApkPure.com </blockquote>
- <blockquote>"Angry Birds Classic 2012 APK is a great way to relive the glory days of one of the most popular mobile games ever. It has all the levels, episodes, birds, and features that made Angry Birds so addictive and enjoyable. It is a blast from the past that you don't want to miss." - Critic review on AndroidAuthority.com </blockquote>
- <blockquote>"Angry Birds Classic 2012 APK is a perfect recreation of the classic version of Angry Birds that started it all. It has everything that made Angry Birds a phenomenon: fun gameplay, comical style, low price, and endless content. It is a timeless masterpiece that you should download right now." - Critic review on TechRadar.com </blockquote>
- <h2>Conclusion</h2>
- <p>In conclusion, Angry Birds Classic 2012 APK is an app that lets you play the classic version of Angry Birds on your Android device. It is not available on the Google Play Store, but you can download it from other sources online. It contains all the levels, episodes, birds, and features that made Angry Birds one of the most popular mobile games ever. It is fun, challenging, nostalgic, cheap, accessible, supported, and updated. It is also risky, illegal, hard, unfair, outdated, boring, buggy, and incompatible. It depends on your perspective and preference. However, if you love Angry Birds and want to experience it again in its original form, then you should definitely give Angry Birds Classic 2012 APK a try. You won't regret it.</p>
- <p>Here are some FAQs that you might have about Angry Birds Classic 2012 APK:</p>
- <h3>FAQs</h3>
- <ol>
-<li <li>Is Angry Birds Classic 2012 APK safe to download and install?</li>
-<p>Angry Birds Classic 2012 APK is not officially endorsed or supported by Rovio Entertainment, the developer of Angry Birds. Therefore, it may contain viruses, malware, or other harmful elements that could damage your device or compromise your privacy. You should download and install Angry Birds Classic 2012 APK at your own risk and discretion. You should also scan the file with a reputable antivirus software before opening it.</p>
- <li>Is Angry Birds Classic 2012 APK legal to download and install?</li>
-<p>Angry Birds Classic 2012 APK is not authorized or licensed by Rovio Entertainment, the owner of Angry Birds. Therefore, it may violate the intellectual property rights and terms of service of Rovio Entertainment. You should download and install Angry Birds Classic 2012 APK at your own risk and responsibility. You should also respect the rights and wishes of Rovio Entertainment and delete Angry Birds Classic 2012 APK if they ask you to do so.</p>
- <li>Is Angry Birds Classic 2012 APK compatible with my device?</li>
-<p>Angry Birds Classic 2012 APK is designed to work on Android devices that have Android 4.1 or higher. However, it may not work on some devices due to different specifications, settings, or updates. You should check the compatibility of Angry Birds Classic 2012 APK with your device before downloading and installing it. You should also make sure that you have enough storage space and battery life to run Angry Birds Classic 2012 APK smoothly.</p>
- <li>Is Angry Birds Classic 2012 APK free to download and play?</li>
-<p>Angry Birds Classic 2012 APK is a paid app that costs $0.99 to download and play. However, some websites may offer it for free or for a lower price. You should be careful of these websites as they may be fraudulent or illegal. You should only download and play Angry Birds Classic 2012 APK from trusted and reputable sources that charge a fair and reasonable price.</p>
- <li>Is Angry Birds Classic 2012 APK worth downloading and playing?</li>
-<p>Angry Birds Classic 2012 APK is worth downloading and playing if you are a fan of the original game and want to relive the nostalgia of flinging birds at pigs with a slingshot. It is also worth downloading and playing if you are looking for a fun and addictive game that will keep you entertained for hours. However, you should also be aware of the risks and drawbacks of downloading and playing Angry Birds Classic 2012 APK, such as safety, legality, compatibility, cost, and quality issues.</p>
- <p>I hope this article has helped you learn more about Angry Birds Classic 2012 APK and how to download, install, and enjoy it. If you have any questions or comments, please feel free to leave them below. Thank you for reading!</p> 401be4b1e0<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Enjoy the Thrill of Driving in Srilanka with Driving Simulator Srilanka Old Version - Free Download.md b/spaces/fatiXbelha/sd/Enjoy the Thrill of Driving in Srilanka with Driving Simulator Srilanka Old Version - Free Download.md
deleted file mode 100644
index 2aca5f4fa1958751f3ccbe16a18ac350c852a22b..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Enjoy the Thrill of Driving in Srilanka with Driving Simulator Srilanka Old Version - Free Download.md	
+++ /dev/null
@@ -1,110 +0,0 @@
-
-<h1>Driving Simulator Srilanka: A Review of the Game and How to Download the Old Version</h1>
- <p>If you are a fan of driving simulator games and want to experience the realistic 3D environment and vehicles of Sri Lanka, then you might want to check out Driving Simulator Srilanka. This is a Sri Lankan based driving simulator game that offers a variety of gameplay modes, features, and customization options. In this article, we will review the game and show you how to download the old version of Driving Simulator Srilanka, which some players prefer over the latest version. We will also suggest some alternatives to Driving Simulator Srilanka that you might enjoy.</p>
- <h2>What is Driving Simulator Srilanka?</h2>
- <p>Driving Simulator Srilanka is a free Android game developed by P.G.Dhanushka Chathuranga and released in 2020. It is one of the most popular driving simulator games in Sri Lanka, with over 1 million downloads on Google Play Store. The game lets you drive various types of buses in different locations and scenarios, such as city driving, village driving, hill driving, and off-road driving. You can also customize your bus with different designs, liveries, horns, lights, spoilers, carpets, and reverse cams. The game aims to provide a realistic and immersive driving experience with high-end graphics, sounds, and physics.</p>
-<h2>driving simulator srilanka old version download</h2><br /><p><b><b>Download File</b> &#10038;&#10038;&#10038; <a href="https://urllie.com/2uNA0j">https://urllie.com/2uNA0j</a></b></p><br /><br />
- <h3>Features of the game</h3>
- <p>Some of the features that make Driving Simulator Srilanka stand out from other driving simulator games are:</p>
- <ul>
-<li><b>Realistic AI traffic system:</b> The game simulates the traffic behavior and rules of Sri Lanka, such as overtaking, honking, signaling, and obeying traffic lights. You have to be careful not to crash into other vehicles or pedestrians, or you will lose points and damage your bus.</li>
-<li><b>Realistic sky system:</b> The game features a dynamic sky system that changes according to the time of day and weather conditions. You can see the sun rise and set, the clouds move, and the rain fall. The sky system also affects the lighting and shadows of the environment.</li>
-<li><b>Realistic bus horns:</b> The game offers a variety of realistic bus horns that you can use to communicate with other drivers and pedestrians. You can also download free horns from the game's website or create your own horns using an audio editor.</li>
-<li><b>Realistic rotatable interiors:</b> The game allows you to rotate the camera inside the bus and see the details of the dashboard, steering wheel, pedals, mirrors, seats, windows, doors, and TV. You can also switch between different camera views, such as first-person view, third-person view, top view, and rear view.</li>
-<li><b>High-end graphics support for all mobile configurations:</b> The game supports a range of graphics settings that you can adjust according to your device's performance and battery life. You can choose between low, medium, high, and ultra graphics quality, as well as enable or disable anti-aliasing, shadows, reflections, motion blur, lens flare, bloom, and ambient occlusion.</li>
-</ul>
- <h3>How to play the game</h3>
- <p>To play Driving Simulator Srilanka, you need to download and install it from Google Play Store or from its official website. The game requires Android 4.4 or higher and at least 125 MB of free storage space. Once you launch the game, you can choose between two modes: Free Drive or Career Mode.</p>
- <ul>
-<li>< <b>Free Drive</b> mode lets you drive any bus you want in any location you want without any time limit or mission. You can explore the map, enjoy the scenery, and practice your driving skills. You can also change the weather, time of day, traffic density, and bus damage level from the settings menu. <b>Career Mode</b> mode lets you complete various missions and earn money and experience points. You can use the money to buy new buses or upgrade your existing ones. You can also unlock new locations and scenarios as you progress through the game. The missions include transporting passengers, delivering goods, racing against other buses, and more. The game controls are simple and intuitive. You can use the on-screen buttons to steer, accelerate, brake, reverse, honk, signal, and open or close the doors. You can also use the tilt or swipe options to control the steering. You can see your speed, fuel level, damage level, and score on the top of the screen. <h2>Why download the old version of Driving Simulator Srilanka?</h2>
- <p>Driving Simulator Srilanka is constantly updated with new features, improvements, and bug fixes. However, some players prefer to download and play the old version of the game for various reasons. Here are some of the advantages and disadvantages of downloading the old version of Driving Simulator Srilanka.</p>
- <h3>Advantages of the old version</h3>
- <p>Some of the advantages of downloading the old version of Driving Simulator Srilanka are:</p>
- <ul>
-<li><b>Better compatibility:</b> The old version of the game might run better on older or low-end devices that cannot handle the high-end graphics and physics of the latest version. The old version might also have fewer crashes and glitches than the new version.</li>
-<li><b>Nostalgia:</b> The old version of the game might bring back some fond memories for players who have been playing Driving Simulator Srilanka since its launch. They might enjoy the original features, designs, and sounds of the game that have changed over time.</li>
-<li><b>Challenge:</b> The old version of the game might offer a different or harder challenge for players who have mastered the new version. They might find it fun to try out the old missions, locations, and buses that are no longer available in the new version.</li>
-</ul>
- <h3>Disadvantages of the old version</h3>
- <p>Some of the disadvantages of downloading the old version of Driving Simulator Srilanka are:</p>
- <ul>
-<li><b>Missing features:</b> The old version of the game might not have some of the features that make Driving Simulator Srilanka more enjoyable and realistic. For example, the old version might not have the dynamic sky system, the realistic AI traffic system, or the customizable bus horns.</li>
-<li><b>Lack of support:</b> The old version of the game might not receive any updates or bug fixes from the developer. This means that any issues or errors that occur in the old version might not be resolved or addressed.</li>
-<li><b>Risk of malware:</b> The old version of the game might not be available on Google Play Store or on its official website. This means that players who want to download it might have to rely on third-party sources that are not verified or secure. These sources might contain malware or viruses that could harm their devices or compromise their privacy.</li>
-</ul>
- <h2>How to download the old version of Driving Simulator Srilanka?</h2>
- <p>If you still want to download and play the old version of Driving Simulator Srilanka despite its disadvantages, you will need to follow some steps to do so. Here is a step-by-step guide on how to download and install the old version of Driving Simulator Srilanka on your Android device.</p>
- <h3>Step-by-step guide</h3>
- <ol>
-<li><b>Uninstall the new version of Driving Simulator Srilanka:</b> If you have already installed the new version of Driving Simulator Srilanka from Google Play Store or from its official website, you will need to uninstall it first. To do this, go to your device's settings, tap on apps, find Driving Simulator Srilanka, and tap on uninstall. Alternatively, you can long-press on the game's icon on your home screen and drag it to the uninstall option.</li>
-<li><b>Find the old version of Driving Simulator Srilanka:</b> The next step is to find the old version of Driving Simulator Srilanka that you want to download. You can search for it on Google or on other websites that offer APK files for Android apps. Some of the websites that might have the old version of Driving Simulator Srilanka are APKPure, APKMirror, and Uptodown. However, be careful when downloading from these sources, as they might not be safe or reliable. Always check the reviews, ratings, and permissions of the app before downloading it.</li>
-<li><b>Enable unknown sources on your device:</b> Before you can install the old version of Driving Simulator Srilanka, you will need to enable unknown sources on your device. This will allow you to install apps from sources other than Google Play Store. To do this, go to your device's settings, tap on security, and toggle on the unknown sources option. You might also need to grant permission to your browser or file manager to install apps from unknown sources.</li>
-<li><b>Download and install the old version of Driving Simulator Srilanka:</b> Once you have enabled unknown sources on your device, you can download and install the old version of Driving Simulator Srilanka. To do this, go to the website where you found the old version of Driving Simulator Srilanka, tap on the download button, and wait for the APK file to be downloaded. Then, go to your device's file manager, find the downloaded APK file, and tap on it. You might see a warning message that says installing this app might harm your device. Ignore this message and tap on install anyway. Wait for the installation process to finish and then launch the game.</li>
-</ol>
- <h3>Tips and tricks for downloading and installing the old version</h3>
- <p>Here are some tips and tricks that might help you download and install the old version of Driving Simulator Srilanka more easily and safely.</p>
- <ul>
-<li><b>Back up your data:</b> Before you uninstall the new version of Driving Simulator Srilanka, you might want to back up your data, such as your progress, achievements, and customizations. You can do this by using a cloud service or a local backup app. This way, you can restore your data if you decide to switch back to the new version later.</li>
-<li><b>Check the compatibility:</b> Before you download and install the old version of Driving Simulator Srilanka, you might want to check if it is compatible with your device and Android version. You can do this by reading the description and requirements of the app on the website where you found it. You can also check the reviews and comments of other users who have downloaded it.</li>
-<li><b>Disable automatic updates:</b> After you have installed the old version of Driving Simulator Srilanka, you might want to disable automatic updates for it. This will prevent Google Play Store from updating it to the new version without your consent. To do this, go to Google Play Store, tap on menu, tap on settings, tap on auto-update apps, and choose don't auto-update apps.</li>
-</ul>
- <h2>Alternatives to Driving Simulator Srilanka</h2>
- <p>If you are looking for some alternatives to Driving Simulator Srilanka that offer similar or better gameplay experiences, here are some suggestions for you.</p>
-<p>* Driving Simulator Srilanka APK free download<br />
-* Driving Simulator Srilanka game with realistic 3D environment<br />
-* Driving Simulator Srilanka 2.5 latest version update<br />
-* Driving Simulator Srilanka bus customization and design<br />
-* Driving Simulator Srilanka city, village, hill and off-road driving<br />
-* Driving Simulator Srilanka game features and gameplay<br />
-* Driving Simulator Srilanka game review and rating<br />
-* Driving Simulator Srilanka how to install and play<br />
-* Driving Simulator Srilanka old update with Dam Rajina bus<br />
-* Driving Simulator Srilanka simulation game for Android<br />
-* Driving Simulator Srilanka video tutorial and walkthrough<br />
-* Driving Simulator Srilanka best driving simulator game in Sri Lanka<br />
-* Driving Simulator Srilanka download link and APK file<br />
-* Driving Simulator Srilanka realistic AI traffic and sky system<br />
-* Driving Simulator Srilanka developer P.G.Dhanushka Chathuranga<br />
-* Driving Simulator Srilanka game tips and tricks<br />
-* Driving Simulator Srilanka game comparison with other driving games<br />
-* Driving Simulator Srilanka game feedback and suggestions<br />
-* Driving Simulator Srilanka game problems and solutions<br />
-* Driving Simulator Srilanka game mod APK and cheats<br />
-* Driving Simulator Srilanka game support and contact information<br />
-* Driving Simulator Srilanka game news and updates<br />
-* Driving Simulator Srilanka game screenshots and videos<br />
-* Driving Simulator Srilanka game requirements and compatibility<br />
-* Driving Simulator Srilanka game download size and speed</p>
- <h3>Other driving simulator games for Android</h3>
- <p>Some of the other driving simulator games for Android that you might enjoy are:</p>
- <ul>
-<li><b>Bus Simulator Indonesia:</b> This is a bus driving simulator game that lets you drive various types of buses in Indonesia. You can customize your bus with different designs, stickers, horns, and accessories. You can also enjoy realistic traffic behavior, weather effects, day-night cycle, and online multiplayer mode.</li>
-<li><b>Euro Truck Driver 2018:</b> This is a truck driving simulator game that lets you drive various types of trucks across Europe. You can deliver cargo to different cities, explore different roads and landmarks, and upgrade your truck with different parts and features. You can also enjoy realistic physics, graphics, sounds, and weather conditions.</li>
-<li><b>Car Parking Multiplayer:</b> This is a car parking simulator game that lets you park various types of cars in different scenarios. You can also drive around in an open world map with other players online and chat with them. You can also customize your car with different colors, wheels, stickers, and accessories. You can also enjoy realistic graphics, sounds, and physics.</li>
-<li><b>Real Driving Sim:</b> This is a car driving simulator game that lets you drive various types of cars in a huge open world map. You can choose between different modes, such as free roam, career, racing, and challenges. You can also upgrade your car with different engines, transmissions, brakes, and suspensions. You can also enjoy realistic graphics, sounds, weather, and traffic.</li>
-</ul>
- <h3>Other Sri Lankan based games for Android</h3>
- <p>Some of the other Sri Lankan based games for Android that you might enjoy are:</p>
- <ul>
-<li><b>Sri Lanka Train Simulator 2019:</b> This is a train simulator game that lets you drive various types of trains in Sri Lanka. You can choose between different routes, such as Colombo to Kandy, Galle to Matara, and Jaffna to Anuradhapura. You can also enjoy realistic graphics, sounds, and scenery.</li>
-<li><b>Sri Lanka Cricket Premier League 2020:</b> This is a cricket game that lets you play the Sri Lanka Cricket Premier League 2020 tournament. You can choose between different teams, such as Colombo Kings, Dambulla Viiking, Galle Gladiators, and Jaffna Stallions. You can also enjoy realistic graphics, animations, and commentary.</li>
-<li><b>Sri Lanka Ravana Game:</b> This is an action-adventure game that lets you play as Ravana, the legendary king of Lanka. You can explore the ancient city of Lanka, fight against enemies, collect weapons and items, and solve puzzles. You can also enjoy realistic graphics, sounds, and music.</li>
-</ul>
- <h2>Conclusion</h2>
- <p>In conclusion, Driving Simulator Srilanka is a fun and realistic driving simulator game that lets you drive various types of buses in different locations and scenarios in Sri Lanka. You can also customize your bus with different designs, horns, lights, and accessories. However, some players might prefer to download the old version of Driving Simulator Srilanka for various reasons, such as better compatibility, nostalgia, or challenge. In this article, we have reviewed the game and showed you how to download the old version of Driving Simulator Srilanka on your Android device. We have also suggested some alternatives to Driving Simulator Srilanka that you might enjoy.</p>
- <h3>FAQs</h3>
- <p>Here are some frequently asked questions about Driving Simulator Srilanka and its old version.</p>
- <ol>
-<li><b>Is Driving Simulator Srilanka free?</b><br>
-Yes, Driving Simulator Srilanka is free to download and play on Android devices. However, it contains ads and in-app purchases that you can disable or buy with real money.</li>
-<li><b>What is the latest version of Driving Simulator Srilanka?</b><br>
-The latest version of Driving Simulator Srilanka is 1.1.2, which was released on June 16th 2021. It added new buses, locations, features, and improvements to the game.</li>
-<li><b>What is the oldest version of Driving Simulator Srilanka?</b><br>
-The oldest version of Driving Simulator Srilanka is 0.1, which was released on April 4th 2020. It was the first version of the game that introduced the basic gameplay mechanics and graphics.</li>
-<li><b>How can I contact the developer of Driving Simulator Srilanka?</b><br>
-You can contact the developer of Driving Simulator Srilanka by sending an email to pgdhanushkachathuranga@gmail.com or by visiting their Facebook page. You can also leave a review or a comment on Google Play Store or on their website.</li>
-<li><b>How can I support the development of Driving Simulator Srilanka?</b><br>
-You can support the development of Driving Simulator Srilanka by rating and reviewing the game on Google Play Store or on their website, by sharing the game with your friends and family, by reporting any bugs or issues that you encounter in the game, or by making a donation through their website.</li>
-</ol></p> 197e85843d<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/feng2022/styleganhuman_copy/dnnlib/tflib/ops/upfirdn_2d.py b/spaces/feng2022/styleganhuman_copy/dnnlib/tflib/ops/upfirdn_2d.py
deleted file mode 100644
index 22e4b14fd5436e42336d3dd82f6135876076c518..0000000000000000000000000000000000000000
--- a/spaces/feng2022/styleganhuman_copy/dnnlib/tflib/ops/upfirdn_2d.py
+++ /dev/null
@@ -1,366 +0,0 @@
-# Copyright (c) SenseTime Research. All rights reserved.
-
-# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
-#
-# This work is made available under the Nvidia Source Code License-NC.
-# To view a copy of this license, visit
-# https://nvlabs.github.io/stylegan2/license.html
-
-"""Custom TensorFlow ops for efficient resampling of 2D images."""
-
-import os
-import numpy as np
-import tensorflow as tf
-from .. import custom_ops
-
-def _get_plugin():
-    return custom_ops.get_plugin(os.path.splitext(__file__)[0] + '.cu')
-
-#----------------------------------------------------------------------------
-
-def upfirdn_2d(x, k, upx=1, upy=1, downx=1, downy=1, padx0=0, padx1=0, pady0=0, pady1=0, impl='cuda'):
-    r"""Pad, upsample, FIR filter, and downsample a batch of 2D images.
-
-    Accepts a batch of 2D images of the shape `[majorDim, inH, inW, minorDim]`
-    and performs the following operations for each image, batched across
-    `majorDim` and `minorDim`:
-
-    1. Pad the image with zeros by the specified number of pixels on each side
-       (`padx0`, `padx1`, `pady0`, `pady1`). Specifying a negative value
-       corresponds to cropping the image.
-
-    2. Upsample the image by inserting the zeros after each pixel (`upx`, `upy`).
-
-    3. Convolve the image with the specified 2D FIR filter (`k`), shrinking the
-       image so that the footprint of all output pixels lies within the input image.
-
-    4. Downsample the image by throwing away pixels (`downx`, `downy`).
-
-    This sequence of operations bears close resemblance to scipy.signal.upfirdn().
-    The fused op is considerably more efficient than performing the same calculation
-    using standard TensorFlow ops. It supports gradients of arbitrary order.
-
-    Args:
-        x:      Input tensor of the shape `[majorDim, inH, inW, minorDim]`.
-        k:      2D FIR filter of the shape `[firH, firW]`.
-        upx:    Integer upsampling factor along the X-axis (default: 1).
-        upy:    Integer upsampling factor along the Y-axis (default: 1).
-        downx:  Integer downsampling factor along the X-axis (default: 1).
-        downy:  Integer downsampling factor along the Y-axis (default: 1).
-        padx0:  Number of pixels to pad on the left side (default: 0).
-        padx1:  Number of pixels to pad on the right side (default: 0).
-        pady0:  Number of pixels to pad on the top side (default: 0).
-        pady1:  Number of pixels to pad on the bottom side (default: 0).
-        impl:   Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
-
-    Returns:
-        Tensor of the shape `[majorDim, outH, outW, minorDim]`, and same datatype as `x`.
-    """
-
-    impl_dict = {
-        'ref':  _upfirdn_2d_ref,
-        'cuda': _upfirdn_2d_cuda,
-    }
-    return impl_dict[impl](x=x, k=k, upx=upx, upy=upy, downx=downx, downy=downy, padx0=padx0, padx1=padx1, pady0=pady0, pady1=pady1)
-
-#----------------------------------------------------------------------------
-
-def _upfirdn_2d_ref(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):
-    """Slow reference implementation of `upfirdn_2d()` using standard TensorFlow ops."""
-
-    x = tf.convert_to_tensor(x)
-    k = np.asarray(k, dtype=np.float32)
-    assert x.shape.rank == 4
-    inH = x.shape[1].value
-    inW = x.shape[2].value
-    minorDim = _shape(x, 3)
-    kernelH, kernelW = k.shape
-    assert inW >= 1 and inH >= 1
-    assert kernelW >= 1 and kernelH >= 1
-    assert isinstance(upx, int) and isinstance(upy, int)
-    assert isinstance(downx, int) and isinstance(downy, int)
-    assert isinstance(padx0, int) and isinstance(padx1, int)
-    assert isinstance(pady0, int) and isinstance(pady1, int)
-
-    # Upsample (insert zeros).
-    x = tf.reshape(x, [-1, inH, 1, inW, 1, minorDim])
-    x = tf.pad(x, [[0, 0], [0, 0], [0, upy - 1], [0, 0], [0, upx - 1], [0, 0]])
-    x = tf.reshape(x, [-1, inH * upy, inW * upx, minorDim])
-
-    # Pad (crop if negative).
-    x = tf.pad(x, [[0, 0], [max(pady0, 0), max(pady1, 0)], [max(padx0, 0), max(padx1, 0)], [0, 0]])
-    x = x[:, max(-pady0, 0) : x.shape[1].value - max(-pady1, 0), max(-padx0, 0) : x.shape[2].value - max(-padx1, 0), :]
-
-    # Convolve with filter.
-    x = tf.transpose(x, [0, 3, 1, 2])
-    x = tf.reshape(x, [-1, 1, inH * upy + pady0 + pady1, inW * upx + padx0 + padx1])
-    w = tf.constant(k[::-1, ::-1, np.newaxis, np.newaxis], dtype=x.dtype)
-    x = tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='VALID', data_format='NCHW')
-    x = tf.reshape(x, [-1, minorDim, inH * upy + pady0 + pady1 - kernelH + 1, inW * upx + padx0 + padx1 - kernelW + 1])
-    x = tf.transpose(x, [0, 2, 3, 1])
-
-    # Downsample (throw away pixels).
-    return x[:, ::downy, ::downx, :]
-
-#----------------------------------------------------------------------------
-
-def _upfirdn_2d_cuda(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):
-    """Fast CUDA implementation of `upfirdn_2d()` using custom ops."""
-
-    x = tf.convert_to_tensor(x)
-    k = np.asarray(k, dtype=np.float32)
-    majorDim, inH, inW, minorDim = x.shape.as_list()
-    kernelH, kernelW = k.shape
-    assert inW >= 1 and inH >= 1
-    assert kernelW >= 1 and kernelH >= 1
-    assert isinstance(upx, int) and isinstance(upy, int)
-    assert isinstance(downx, int) and isinstance(downy, int)
-    assert isinstance(padx0, int) and isinstance(padx1, int)
-    assert isinstance(pady0, int) and isinstance(pady1, int)
-
-    outW = (inW * upx + padx0 + padx1 - kernelW) // downx + 1
-    outH = (inH * upy + pady0 + pady1 - kernelH) // downy + 1
-    assert outW >= 1 and outH >= 1
-
-    kc = tf.constant(k, dtype=x.dtype)
-    gkc = tf.constant(k[::-1, ::-1], dtype=x.dtype)
-    gpadx0 = kernelW - padx0 - 1
-    gpady0 = kernelH - pady0 - 1
-    gpadx1 = inW * upx - outW * downx + padx0 - upx + 1
-    gpady1 = inH * upy - outH * downy + pady0 - upy + 1
-
-    @tf.custom_gradient
-    def func(x):
-        y = _get_plugin().up_fir_dn2d(x=x, k=kc, upx=upx, upy=upy, downx=downx, downy=downy, padx0=padx0, padx1=padx1, pady0=pady0, pady1=pady1)
-        y.set_shape([majorDim, outH, outW, minorDim])
-        @tf.custom_gradient
-        def grad(dy):
-            dx = _get_plugin().up_fir_dn2d(x=dy, k=gkc, upx=downx, upy=downy, downx=upx, downy=upy, padx0=gpadx0, padx1=gpadx1, pady0=gpady0, pady1=gpady1)
-            dx.set_shape([majorDim, inH, inW, minorDim])
-            return dx, func
-        return y, grad
-    return func(x)
-
-#----------------------------------------------------------------------------
-
-def filter_2d(x, k, gain=1, data_format='NCHW', impl='cuda'):
-    r"""Filter a batch of 2D images with the given FIR filter.
-
-    Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
-    and filters each image with the given filter. The filter is normalized so that
-    if the input pixels are constant, they will be scaled by the specified `gain`.
-    Pixels outside the image are assumed to be zero.
-
-    Args:
-        x:            Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
-        k:            FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
-        gain:         Scaling factor for signal magnitude (default: 1.0).
-        data_format:  `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
-        impl:         Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
-
-    Returns:
-        Tensor of the same shape and datatype as `x`.
-    """
-
-    k = _setup_kernel(k) * gain
-    p = k.shape[0] - 1
-    return _simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)
-
-#----------------------------------------------------------------------------
-
-def upsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):
-    r"""Upsample a batch of 2D images with the given filter.
-
-    Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
-    and upsamples each image with the given filter. The filter is normalized so that
-    if the input pixels are constant, they will be scaled by the specified `gain`.
-    Pixels outside the image are assumed to be zero, and the filter is padded with
-    zeros so that its shape is a multiple of the upsampling factor.
-
-    Args:
-        x:            Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
-        k:            FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
-                      The default is `[1] * factor`, which corresponds to nearest-neighbor
-                      upsampling.
-        factor:       Integer upsampling factor (default: 2).
-        gain:         Scaling factor for signal magnitude (default: 1.0).
-        data_format:  `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
-        impl:         Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
-
-    Returns:
-        Tensor of the shape `[N, C, H * factor, W * factor]` or
-        `[N, H * factor, W * factor, C]`, and same datatype as `x`.
-    """
-
-    assert isinstance(factor, int) and factor >= 1
-    if k is None:
-        k = [1] * factor
-    k = _setup_kernel(k) * (gain * (factor ** 2))
-    p = k.shape[0] - factor
-    return _simple_upfirdn_2d(x, k, up=factor, pad0=(p+1)//2+factor-1, pad1=p//2, data_format=data_format, impl=impl)
-
-#----------------------------------------------------------------------------
-
-def downsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):
-    r"""Downsample a batch of 2D images with the given filter.
-
-    Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
-    and downsamples each image with the given filter. The filter is normalized so that
-    if the input pixels are constant, they will be scaled by the specified `gain`.
-    Pixels outside the image are assumed to be zero, and the filter is padded with
-    zeros so that its shape is a multiple of the downsampling factor.
-
-    Args:
-        x:            Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
-        k:            FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
-                      The default is `[1] * factor`, which corresponds to average pooling.
-        factor:       Integer downsampling factor (default: 2).
-        gain:         Scaling factor for signal magnitude (default: 1.0).
-        data_format:  `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
-        impl:         Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
-
-    Returns:
-        Tensor of the shape `[N, C, H // factor, W // factor]` or
-        `[N, H // factor, W // factor, C]`, and same datatype as `x`.
-    """
-
-    assert isinstance(factor, int) and factor >= 1
-    if k is None:
-        k = [1] * factor
-    k = _setup_kernel(k) * gain
-    p = k.shape[0] - factor
-    return _simple_upfirdn_2d(x, k, down=factor, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)
-
-#----------------------------------------------------------------------------
-
-def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):
-    r"""Fused `upsample_2d()` followed by `tf.nn.conv2d()`.
-
-    Padding is performed only once at the beginning, not between the operations.
-    The fused op is considerably more efficient than performing the same calculation
-    using standard TensorFlow ops. It supports gradients of arbitrary order.
-
-    Args:
-        x:            Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
-        w:            Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`.
-                      Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.
-        k:            FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
-                      The default is `[1] * factor`, which corresponds to nearest-neighbor
-                      upsampling.
-        factor:       Integer upsampling factor (default: 2).
-        gain:         Scaling factor for signal magnitude (default: 1.0).
-        data_format:  `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
-        impl:         Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
-
-    Returns:
-        Tensor of the shape `[N, C, H * factor, W * factor]` or
-        `[N, H * factor, W * factor, C]`, and same datatype as `x`.
-    """
-
-    assert isinstance(factor, int) and factor >= 1
-
-    # Check weight shape.
-    w = tf.convert_to_tensor(w)
-    assert w.shape.rank == 4
-    convH = w.shape[0].value
-    convW = w.shape[1].value
-    inC = _shape(w, 2)
-    outC = _shape(w, 3)
-    assert convW == convH
-
-    # Setup filter kernel.
-    if k is None:
-        k = [1] * factor
-    k = _setup_kernel(k) * (gain * (factor ** 2))
-    p = (k.shape[0] - factor) - (convW - 1)
-
-    # Determine data dimensions.
-    if data_format == 'NCHW':
-        stride = [1, 1, factor, factor]
-        output_shape = [_shape(x, 0), outC, (_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW]
-        num_groups = _shape(x, 1) // inC
-    else:
-        stride = [1, factor, factor, 1]
-        output_shape = [_shape(x, 0), (_shape(x, 1) - 1) * factor + convH, (_shape(x, 2) - 1) * factor + convW, outC]
-        num_groups = _shape(x, 3) // inC
-
-    # Transpose weights.
-    w = tf.reshape(w, [convH, convW, inC, num_groups, -1])
-    w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2])
-    w = tf.reshape(w, [convH, convW, -1, num_groups * inC])
-
-    # Execute.
-    x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format)
-    return _simple_upfirdn_2d(x, k, pad0=(p+1)//2+factor-1, pad1=p//2+1, data_format=data_format, impl=impl)
-
-#----------------------------------------------------------------------------
-
-def conv_downsample_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):
-    r"""Fused `tf.nn.conv2d()` followed by `downsample_2d()`.
-
-    Padding is performed only once at the beginning, not between the operations.
-    The fused op is considerably more efficient than performing the same calculation
-    using standard TensorFlow ops. It supports gradients of arbitrary order.
-
-    Args:
-        x:            Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
-        w:            Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`.
-                      Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.
-        k:            FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
-                      The default is `[1] * factor`, which corresponds to average pooling.
-        factor:       Integer downsampling factor (default: 2).
-        gain:         Scaling factor for signal magnitude (default: 1.0).
-        data_format:  `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
-        impl:         Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
-
-    Returns:
-        Tensor of the shape `[N, C, H // factor, W // factor]` or
-        `[N, H // factor, W // factor, C]`, and same datatype as `x`.
-    """
-
-    assert isinstance(factor, int) and factor >= 1
-    w = tf.convert_to_tensor(w)
-    convH, convW, _inC, _outC = w.shape.as_list()
-    assert convW == convH
-    if k is None:
-        k = [1] * factor
-    k = _setup_kernel(k) * gain
-    p = (k.shape[0] - factor) + (convW - 1)
-    if data_format == 'NCHW':
-        s = [1, 1, factor, factor]
-    else:
-        s = [1, factor, factor, 1]
-    x = _simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)
-    return tf.nn.conv2d(x, w, strides=s, padding='VALID', data_format=data_format)
-
-#----------------------------------------------------------------------------
-# Internal helper funcs.
-
-def _shape(tf_expr, dim_idx):
-    if tf_expr.shape.rank is not None:
-        dim = tf_expr.shape[dim_idx].value
-        if dim is not None:
-            return dim
-    return tf.shape(tf_expr)[dim_idx]
-
-def _setup_kernel(k):
-    k = np.asarray(k, dtype=np.float32)
-    if k.ndim == 1:
-        k = np.outer(k, k)
-    k /= np.sum(k)
-    assert k.ndim == 2
-    assert k.shape[0] == k.shape[1]
-    return k
-
-def _simple_upfirdn_2d(x, k, up=1, down=1, pad0=0, pad1=0, data_format='NCHW', impl='cuda'):
-    assert data_format in ['NCHW', 'NHWC']
-    assert x.shape.rank == 4
-    y = x
-    if data_format == 'NCHW':
-        y = tf.reshape(y, [-1, _shape(y, 2), _shape(y, 3), 1])
-    y = upfirdn_2d(y, k, upx=up, upy=up, downx=down, downy=down, padx0=pad0, padx1=pad1, pady0=pad0, pady1=pad1, impl=impl)
-    if data_format == 'NCHW':
-        y = tf.reshape(y, [-1, _shape(x, 1), _shape(y, 1), _shape(y, 2)])
-    return y
-
-#----------------------------------------------------------------------------
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Clash Royale Nulls APK Indir Unlimited Gems Gold and Custom Cards.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Clash Royale Nulls APK Indir Unlimited Gems Gold and Custom Cards.md
deleted file mode 100644
index fd301e8d0dfeec52a61f9395c6f9a3ad3261888d..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Clash Royale Nulls APK Indir Unlimited Gems Gold and Custom Cards.md	
+++ /dev/null
@@ -1,107 +0,0 @@
-<br />
-<h1>Clash Royale Nulls APK Indir: How to Download and Play the Private Server with Unlimited Resources and Custom Cards</h1>
- <p>If you are a fan of Clash Royale, the popular real-time strategy game for mobile devices, you might have heard of Clash Royale Nulls APK Indir. This is a private server for the game that offers unlimited gems, gold, and cards, as well as some unique custom cards that are not available in the official game. In this article, we will tell you everything you need to know about Clash Royale Nulls APK Indir, including what it is, why you should download and play it, how to download and install it, how to play it, and what are the custom cards in it.</p>
-<h2>clash royale nulls apk indir</h2><br /><p><b><b>Download Zip</b> &gt;&gt;&gt; <a href="https://gohhs.com/2uPrb2">https://gohhs.com/2uPrb2</a></b></p><br /><br />
- <h2>What is Clash Royale Nulls APK Indir?</h2>
- <p>Clash Royale Nulls APK Indir is a private server for the popular mobile game Clash Royale. A private server is a modified version of the game that runs on a separate server from the official one. This means that you can play the game with different features and settings than the original one.</p>
- <h3>Clash Royale Nulls APK Indir is a private server for the popular mobile game Clash Royale</h3>
- <p>Clash Royale is a real-time strategy game where you collect and upgrade cards that represent different characters, spells, and buildings. You use these cards to battle other players online in fast-paced duels. The goal is to destroy your opponent's towers while protecting your own. The game is free to play, but you can also buy gems and gold with real money to speed up your progress.</p>
- <h3>It offers unlimited gems, gold, and cards, as well as some unique custom cards that are not available in the official game</h3>
- <p>Clash Royale Nulls APK Indir gives you unlimited gems and gold that you can use to buy and upgrade any cards you want. You can also get any card you want from the shop, including the custom ones that are exclusive to this server. You can also unlock all the chests and get all the rewards instantly.</p>
- <h3>It also allows you to play with real players from around the world, without any restrictions or bans</h3>
- <p>Clash Royale Nulls APK Indir lets you play with other players who are using the same server. You can join or create clans, chat with other players, and participate in challenges and tournaments. You don't have to worry about getting banned or losing your account, as this server is completely separate from the official one.</p>
-<p>clash royale nulls apk download latest version<br />
-clash royale nulls apk indir android<br />
-clash royale nulls apk mod unlimited gems and gold<br />
-clash royale nulls apk indir 2023<br />
-clash royale nulls apk indir ücretsiz<br />
-clash royale nulls apk indir son sürüm<br />
-clash royale nulls apk indir hileli<br />
-clash royale nulls apk indir güncel<br />
-clash royale nulls apk indir türkçe<br />
-clash royale nulls apk indir kurulumu<br />
-clash royale nulls apk indir nasıl yapılır<br />
-clash royale nulls apk indir linki<br />
-clash royale nulls apk indir mediafıre<br />
-clash royale nulls apk indir mega<br />
-clash royale nulls apk indir no root<br />
-clash royale nulls apk indir online<br />
-clash royale nulls apk indir oyun club<br />
-clash royale nulls apk indir oyun indir club<br />
-clash royale nulls apk indir pc<br />
-clash royale nulls apk indir play store<br />
-clash royale nulls apk indir private server<br />
-clash royale nulls apk indir reddit<br />
-clash royale nulls apk indir revdl<br />
-clash royale nulls apk indir sınırsız elmas ve altın<br />
-clash royale nulls apk indir tamindir<br />
-clash royale nulls apk indir update<br />
-clash royale nulls apk indir yeni sürüm<br />
-clash royale nulls apk indir youtube<br />
-clash royale nulls apk indir zip<br />
-clash royale nulls apk free download for android<br />
-how to install clash royale nulls apk on android<br />
-how to play clash royale nulls apk online with friends<br />
-how to update clash royale nulls apk to latest version<br />
-is clash royale nulls apk safe to download and use<br />
-what is the difference between clash royale and clash royale nulls apk<br />
-what are the features of clash royale nulls apk modded version<br />
-what are the custom cards in clash royale nulls apk private server<br />
-what are the benefits of playing on clash royale nulls apk unlimited resources server<br />
-what are the drawbacks of playing on clash royale nulls apk hacked server<br />
-what are the requirements to run clash royale nulls apk on android device<br />
-where can I find the official website of clash royale nulls apk developers<br />
-where can I get the latest news and updates about clash royale nulls apk game<br />
-where can I report bugs and issues with clash royale nulls apk game play <br />
-where can I get support and help with clash royale nulls apk installation and usage <br />
-where can I find the best tips and tricks for playing on clash royale nulls apk server <br />
-where can I join the community of other players who play on clash royale nulls apk server <br />
-where can I download other games similar to or better than clash royale nulls apk game</p>
- <h2>Why Should You Download and Play Clash Royale Nulls APK Indir?</h2>
- <p>Clash Royale Nulls APK Indir is a great way to enjoy Clash Royale without any limitations or costs. Here are some of the reasons why you should download and play it:</p>
- <h3>Clash Royale Nulls APK Indir is fun, free, and easy to install</h3>
- <p>Clash Royale Nulls APK Indir is a fun and exciting game that offers unlimited possibilities and content. You can try out different strategies and decks, without worrying about losing trophies or resources. You can also explore some amazing custom cards that are not found in the original game. The best part is that it is completely free and easy to install. You don't need to root your device or do anything complicated to play it.</p>
- <h3>It lets you experiment with different strategies and decks, without worrying about losing trophies or resources</h3>
- <p>Clash Royale Nulls APK Indir gives you the freedom to play the game however you want. You can use unlimited gems and gold to buy and upgrade any cards you want, including the custom ones. You can also mix and match different cards to create your own unique decks. You don't have to worry about losing trophies or resources, as they are unlimited and replenishable. You can also reset your progress anytime you want.</p>
- <h3>It gives you access to some exclusive features and content that are not found in the original game</h3>
- <p>Clash Royale Nulls APK Indir has some exclusive features and content that are not available in the official game. For example, it has some custom cards that are based on characters from other games or movies, such as Mario, Sonic, Deadpool, and Thanos. It also has some custom cards that have unique abilities and effects, such as teleporting, cloning, freezing, and exploding. These cards can add a lot of variety and fun to the game.</p>
- <h2>How to Download and Install Clash Royale Nulls APK Indir?</h2>
- <p>Clash Royale Nulls APK Indir is compatible with Android devices only. You need to follow three simple steps to download and install it:</p>
- <h3>Clash Royale Nulls APK Indir is compatible with Android devices only</h3>
- <p>Clash Royale Nulls APK Indir is an APK file that can only be installed on Android devices. It is not compatible with iOS devices or PC emulators. You need to have an Android device that meets the minimum requirements of the game, such as having at least 1 GB of RAM and 100 MB of free storage space.</p>
- <h3>You need to follow three simple steps: download the APK file, allow unknown sources, and install the app</h3>
- <p>The first step is to download the APK file from the official website of Null's Royale. You can find the latest version of the file on the homepage of the website. You need to click on the download button and wait for the file to be downloaded on your device.</p>
- <p>The second step is to allow unknown sources on your device. This is a security setting that prevents you from installing apps from sources other than the Google Play Store. To allow unknown sources, you need to go to your device settings, then security, then unknown sources, and then enable it.</p>
- <p>The third step is to install the app on your device. To do this, you need to locate the downloaded APK file on your device storage, then tap on it and follow the instructions on the screen. The app will be installed in a few seconds and you will be able to launch it from your app drawer.</p>
- <h2>How to Play Clash Royale Nulls APK Indir?</h2>
- <p>Clash Royale Nulls APK Indir works like the regular game, but with some differences. Here are some of the things you need to know about playing it:</p>
- <h3>Clash Royale Nulls APK Indir works like the regular game, but with some differences</h3>
- <p>Clash Royale Nulls APK Indir has the same gameplay mechanics as the original game. You still need to collect and upgrade cards, build your deck, battle other players online, destroy their towers, and win trophies. However, there are some differences that make the game more fun and exciting. For example, you have unlimited gems and gold that you can use to buy and upgrade any cards you want. You also have access to some custom cards that are not available in the official game. You can also play with real players from around the world, without any restrictions or bans.</p>
- <h3>You can use unlimited gems and gold to buy and upgrade any cards you want, including the custom ones</h3>
- <p>Clash Royale Nulls APK Indir gives you unlimited gems and gold that you can use to buy and upgrade any cards you want. You can also get any card you want from the shop, including the custom ones that are exclusive to this server. You can also unlock all the chests and get all the rewards instantly. You don't have to wait for anything or spend any real money to enjoy the game.</p>
- <h3>You can also join or create clans, chat with other players, and participate in challenges and tournaments</h3>
- <p>Clash Royale Nulls APK Indir lets you join or create clans, chat with other players, and participate in challenges and tournaments. You can also donate and request cards from your clan members, as well as share replays and tips. You can also compete with other clans in clan wars, clan chest, and clan trophies. You can also join or create custom tournaments, where you can set your own rules and prizes.</p>
- <h2>What are the Custom Cards in Clash Royale Nulls APK Indir?</h2>
- <p>Clash Royale Nulls APK Indir has some amazing custom cards that are not available in the official game. These cards are based on characters from other games or movies, or have unique abilities and effects. Here are some of the custom cards in Clash Royale Nulls APK Indir:</p>
- <h3>Clash Royale Nulls APK Indir has some amazing custom cards that are not available in the official game</h3>
- <p>Clash Royale Nulls APK Indir has some custom cards that are not available in the official game. These cards are created by the developers of the server, and they add a lot of variety and fun to the game. Some of them are based on characters from other games or movies, such as Mario, Sonic, Deadpool, and Thanos. Some of them have unique abilities and effects, such as teleporting, cloning, freezing, and exploding.</p>
- <h3>Some of them are based on characters from other games or movies, such as Mario, Sonic, Deadpool, and Thanos</h3>
- <p>Some of the custom cards in Clash Royale Nulls APK Indir are based on characters from other games or movies. For example, you can find Mario, Sonic, Deadpool, and Thanos as legendary cards in the game. These cards have special abilities that reflect their personalities and powers. For example, Mario can throw fireballs, Sonic can dash forward, Deadpool can regenerate health, and Thanos can snap his fingers and wipe out half of the enemy's troops.</p>
- <h3>Some of them have unique abilities and effects, such as teleporting, cloning, freezing, and exploding</h3>
- <p>Some of the custom cards in Clash Royale Nulls APK Indir have unique abilities and effects that are not found in the original game. For example, you can find Teleporter, Cloner, Freezer, and Bomber as epic cards in the game. These cards have special abilities that can change the course of the battle. For example, Teleporter can teleport to any location on the arena, Cloner can clone any troop on the field, Freezer can freeze any enemy for a few seconds, and Bomber can explode and deal massive damage to nearby enemies.</p>
- <h2>Conclusion</h2>
- <p>Clash Royale Nulls APK Indir is a private server for Clash Royale that offers unlimited resources and custom cards. It is a great way to enjoy the game without any limitations or costs. It is easy to download and install, and it works on Android devices only. If you want to try out different strategies and decks, experiment with some exclusive features and content, and have fun with other players from around the world, you should definitely download and play Clash Royale Nulls APK Indir. It is one of the best private servers for Clash Royale that you can find.</p>
- <h2>FAQs</h2>
- <p>Here are some of the frequently asked questions about Clash Royale Nulls APK Indir:</p>
- <h3>Is Clash Royale Nulls APK Indir safe to download and play?</h3>
- <p>Yes, Clash Royale Nulls APK Indir is safe to download and play. It does not contain any viruses or malware, and it does not harm your device or your data. However, you should always download the APK file from the official website of Null's Royale, and not from any other sources.</p>
- <h3>Is Clash Royale Nulls APK Indir legal to use?</h3>
- <p>Clash Royale Nulls APK Indir is not endorsed or affiliated with Supercell, the developer of Clash Royale. It is a fan-made project that is meant for entertainment purposes only. It does not violate any laws or regulations, as long as you use it for personal and non-commercial use only. However, you should be aware that using a private server may go against the terms of service of the official game, and you may risk losing your account or facing legal action if you use it for malicious or fraudulent purposes.</p>
- <h3>Can I play Clash Royale Nulls APK Indir with my friends who are using the official game?</h3>
- <p>No, you cannot play Clash Royale Nulls APK Indir with your friends who are using the official game. Clash Royale Nulls APK Indir runs on a separate server from the official one, and they are not compatible with each other. You can only play with other players who are using the same server as you.</p>
- <h3>Can I switch between Clash Royale Nulls APK Indir and the official game?</h3>
- <p>Yes, you can switch between Clash Royale Nulls APK Indir and the official game. However, you need to have two different accounts for each game, as they do not share the same data or progress. You also need to uninstall one game before installing the other one, as they cannot coexist on the same device.</p>
- <h3>How can I update Clash Royale Nulls APK Indir?</h3>
- <p>To update Clash Royale Nulls APK Indir, you need to download and install the latest version of the APK file from the official website of Null's Royale. You do not need to uninstall the previous version of the game, as the new one will overwrite it automatically. However, you should always backup your data before updating, as you may lose some of your progress or settings.</p> 401be4b1e0<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/fffffu/bing/src/components/toaster.tsx b/spaces/fffffu/bing/src/components/toaster.tsx
deleted file mode 100644
index 4d2693460b61307a1d4c127fd01df9bee16e59ff..0000000000000000000000000000000000000000
--- a/spaces/fffffu/bing/src/components/toaster.tsx
+++ /dev/null
@@ -1,3 +0,0 @@
-'use client'
-
-export { Toaster } from 'react-hot-toast'
diff --git a/spaces/fffiloni/Image-Caption-2-Shap-E/examples/blank.md b/spaces/fffiloni/Image-Caption-2-Shap-E/examples/blank.md
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/call-bind/README.md b/spaces/fffiloni/controlnet-animation-doodle/node_modules/call-bind/README.md
deleted file mode 100644
index 53649eb4622446f00cafc3b657bf127e5c9e6889..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/call-bind/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-# call-bind
-Robustly `.call.bind()` a function.
diff --git a/spaces/fffiloni/mmpose-estimation/app.py b/spaces/fffiloni/mmpose-estimation/app.py
deleted file mode 100644
index 1c6e072d95f81af9738d4632e4fd456a4508bfb6..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/mmpose-estimation/app.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import torch, torchvision
-import sys
-# sys.path.insert(0, 'test_mmpose/')
-try:
-    from mmcv.ops import get_compiling_cuda_version, get_compiler_version
-except:
-    import mim
-    mim.install('mmcv-full==1.5.0')
-    
-import mmpose
-import gradio as gr
-from mmpose.apis import (inference_top_down_pose_model, init_pose_model,
-                         vis_pose_result, process_mmdet_results)
-from mmdet.apis import inference_detector, init_detector
-from PIL import Image
-import cv2
-import numpy as np
-
-pose_config = 'configs/topdown_heatmap_hrnet_w48_coco_256x192.py'
-pose_checkpoint = 'hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth'
-det_config = 'configs/faster_rcnn_r50_fpn_1x_coco.py'
-det_checkpoint = 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'
-
-# initialize pose model
-pose_model = init_pose_model(pose_config, pose_checkpoint, device='cuda')
-# initialize detector
-det_model = init_detector(det_config, det_checkpoint, device='cuda')
-
-def predict(img):
-    mmdet_results = inference_detector(det_model, img)
-    person_results = process_mmdet_results(mmdet_results, cat_id=1)
-
-    pose_results, returned_outputs = inference_top_down_pose_model(
-      pose_model,
-      img,
-      person_results,
-      bbox_thr=0.3,
-      format='xyxy',
-      dataset=pose_model.cfg.data.test.type)
-    
-    vis_result = vis_pose_result(
-      pose_model,
-      img,
-      pose_results,
-      dataset=pose_model.cfg.data.test.type,
-      show=False)
-
-    #original_image = Image.open(img)
-    width, height, channels = img.shape
-    #vis_result = cv2.resize(vis_result, dsize=None, fx=0.5, fy=0.5)
-    print(f"POSE_RESULTS: {pose_results}")
-    
-    # define colors for each body part
-    body_part = {
-        "nose": 0,
-        "left_eye": 1,
-        "right_eye": 2,
-        "left_ear": 3,
-        "right_ear": 4,
-        "left_shoulder": 5,
-        "right_shoulder": 6,
-        "left_elbow": 7,
-        "right_elbow": 8,
-        "left_wrist": 9,
-        "right_wrist": 10,
-        "left_hip": 11,
-        "right_hip": 12,
-        "left_knee": 13,
-        "right_knee": 14,
-        "left_ankle": 15,
-        "right_ankle": 16
-    }
-    orange=(51,153,255)
-    blue=(255,128,0)
-    green=(0,255,0)
-    
-    # create a black image of the same size as the original image
-    black_img = np.zeros((width, height, 3), np.uint8)
-    
-    # iterate through each person in the POSE_RESULTS data
-    for person in pose_results:
-        # get the keypoints for this person
-        keypoints = person['keypoints']
-        
-        # draw lines between keypoints to form a skeleton
-        skeleton = [("right_eye", "left_eye", orange),("nose", "left_eye", orange), ("left_eye", "left_ear", orange), ("nose", "right_eye", orange), ("right_eye", "right_ear", orange),
-                    ("left_shoulder", "left_ear", orange),("right_shoulder", "right_ear", orange), ("left_shoulder", "right_shoulder", orange), ("left_shoulder", "left_elbow", green), ("right_shoulder", "right_elbow",blue),
-                    ("left_elbow", "left_wrist",green), ("right_elbow", "right_wrist",blue), ("left_shoulder", "left_hip",orange),
-                    ("right_shoulder", "right_hip", orange), ("left_hip", "right_hip", orange), ("left_hip", "left_knee",green),
-                    ("right_hip", "right_knee",blue), ("left_knee", "left_ankle",green), ("right_knee", "right_ankle",blue)]
-        for start_part, end_part, color in skeleton:
-            start_idx = list(body_part.keys()).index(start_part)
-            end_idx = list(body_part.keys()).index(end_part)
-            if keypoints[start_idx][2] > 0.1 and keypoints[end_idx][2] > 0.1:
-                pt1 = (int(keypoints[start_idx][0]), int(keypoints[start_idx][1]))
-                pt2 = (int(keypoints[end_idx][0]), int(keypoints[end_idx][1]))
-                cv2.line(black_img, pt1, pt2, color, thickness=2, lineType=cv2.LINE_AA)
-    
-        # draw circles at each keypoint
-        #for i in range(keypoints.shape[0]):
-        #    pt = (int(keypoints[i][0]), int(keypoints[i][1]))
-        #    cv2.circle(black_img, pt, 3, (255, 255, 255), thickness=-1, lineType=cv2.LINE_AA)
-
-
-    
-    # write black_img to a jpg file
-    
-    cv2.waitKey(0)
-    cv2.imwrite("output.jpg", black_img)
-    cv2.destroyAllWindows()
-    
-    return vis_result, "output.jpg"
-
-example_list = ['examples/demo2.png']
-title = "MMPose estimation"
-description = ""
-article = ""
-
-# Create the Gradio demo
-demo = gr.Interface(fn=predict,
-                    inputs=gr.Image(), 
-                    outputs=[gr.Image(label='Prediction'), gr.Image(label='Poses')], 
-                    examples=example_list, 
-                    title=title,
-                    description=description,
-                    article=article)
-
-# Launch the demo!
-demo.launch()
\ No newline at end of file
diff --git a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/envs/doorkey.py b/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/envs/doorkey.py
deleted file mode 100644
index 3bcc74128ba7b1dae5bc3538f5f029dca0efd819..0000000000000000000000000000000000000000
--- a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/envs/doorkey.py
+++ /dev/null
@@ -1,76 +0,0 @@
-from gym_minigrid.minigrid import *
-from gym_minigrid.register import register
-
-class DoorKeyEnv(MiniGridEnv):
-    """
-    Environment with a door and key, sparse reward
-    """
-
-    def __init__(self, size=8):
-        super().__init__(
-            grid_size=size,
-            max_steps=10*size*size
-        )
-
-    def _gen_grid(self, width, height):
-        # Create an empty grid
-        self.grid = Grid(width, height)
-
-        # Generate the surrounding walls
-        self.grid.wall_rect(0, 0, width, height)
-
-        # Place a goal in the bottom-right corner
-        self.put_obj(Goal(), width - 2, height - 2)
-
-        # Create a vertical splitting wall
-        splitIdx = self._rand_int(2, width-2)
-        self.grid.vert_wall(splitIdx, 0)
-
-        # Place the agent at a random position and orientation
-        # on the left side of the splitting wall
-        self.place_agent(size=(splitIdx, height))
-
-        # Place a door in the wall
-        doorIdx = self._rand_int(1, width-2)
-        self.put_obj(Door('yellow', is_locked=True), splitIdx, doorIdx)
-
-        # Place a yellow key on the left side
-        self.place_obj(
-            obj=Key('yellow'),
-            top=(0, 0),
-            size=(splitIdx, height)
-        )
-
-        self.mission = "use the key to open the door and then get to the goal"
-
-class DoorKeyEnv5x5(DoorKeyEnv):
-    def __init__(self):
-        super().__init__(size=5)
-
-class DoorKeyEnv6x6(DoorKeyEnv):
-    def __init__(self):
-        super().__init__(size=6)
-
-class DoorKeyEnv16x16(DoorKeyEnv):
-    def __init__(self):
-        super().__init__(size=16)
-
-register(
-    id='MiniGrid-DoorKey-5x5-v0',
-    entry_point='gym_minigrid.envs:DoorKeyEnv5x5'
-)
-
-register(
-    id='MiniGrid-DoorKey-6x6-v0',
-    entry_point='gym_minigrid.envs:DoorKeyEnv6x6'
-)
-
-register(
-    id='MiniGrid-DoorKey-8x8-v0',
-    entry_point='gym_minigrid.envs:DoorKeyEnv'
-)
-
-register(
-    id='MiniGrid-DoorKey-16x16-v0',
-    entry_point='gym_minigrid.envs:DoorKeyEnv16x16'
-)
diff --git a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/social_ai_envs/informationseekingenv.py b/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/social_ai_envs/informationseekingenv.py
deleted file mode 100644
index 26f719063efbc8920a7ea0c5c88127f3aecbc809..0000000000000000000000000000000000000000
--- a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/social_ai_envs/informationseekingenv.py
+++ /dev/null
@@ -1,1274 +0,0 @@
-import time
-import random
-
-import numpy as np
-from gym_minigrid.social_ai_envs.socialaigrammar import SocialAIGrammar, SocialAIActions, SocialAIActionSpace
-from gym_minigrid.minigrid import *
-from gym_minigrid.register import register
-import time
-from collections import deque
-
-def next_to(posa, posb):
-    if type(posa) == tuple:
-        posa = np.array(posa)
-
-    if type(posb) == tuple:
-        posb = np.array(posb)
-
-    return abs(posa-posb).sum() == 1
-
-
-class Caretaker(NPC):
-    """
-    A simple NPC that knows who is telling the truth
-    """
-    def __init__(self, color, name, env):
-        super().__init__(color)
-        self.name = name
-        self.env = env
-        self.npc_dir = 1  # NPC initially looks downward
-        self.npc_type = 0  # this will be put into the encoding
-
-        self.was_introduced_to = False
-        self.decoy_color_given = False
-
-        self.ate_an_apple = False
-        self.demo_over = False
-        self.demo_over_and_position_safe = False
-        self.apple_unlocked_for_agent = False
-
-        self.list_of_possible_utterances = [
-            *self.list_of_possible_utterances,
-            "Hot",
-            "Warm",
-            "Medium",
-            "Cold",
-            *COLOR_NAMES
-        ]
-
-        # target obj
-        assert self.env.problem == self.env.parameters["Problem"] if self.env.parameters else "Apples"
-
-        if self.env.problem in ["Apples"]:
-            self.target_obj = self.env.apple
-            self.distractor_obj = None
-
-        elif self.env.problem == "Doors":
-            self.target_obj = self.env.door
-            self.distractor_obj = self.env.distractor_door
-
-        elif self.env.problem == "Levers":
-            self.target_obj = self.env.lever
-            self.distractor_obj = self.env.distractor_lever
-
-        elif self.env.problem == "Boxes":
-            self.target_obj = self.env.box
-            self.distractor_obj = self.env.distractor_box
-
-        elif self.env.problem == "Switches":
-            self.target_obj = self.env.switch
-            self.distractor_obj = self.env.distractor_switch
-
-        elif self.env.problem == "Generators":
-            self.target_obj = self.env.generator
-            self.distractor_obj = self.env.distractor_generator
-
-        elif self.env.problem in ["Marble", "Marbles"]:
-            self.target_obj = self.env.generator
-            self.distractor_obj = self.env.distractor_generator
-
-        if self.env.ja_recursive:
-            if int(self.env.parameters["N"]) == 1:
-                self.ja_decoy = self.env._rand_elem([self.target_obj])
-            else:
-                self.ja_decoy = self.env._rand_elem([self.target_obj, self.distractor_obj])
-
-            # the other object is a decoy distractor
-            self.ja_decoy_distractor = list({self.target_obj, self.distractor_obj} - {self.ja_decoy})[0]
-
-            self.decoy_point_from_loc = self.find_point_from_loc(
-                target_pos=self.ja_decoy.cur_pos,
-                distractor_pos=self.ja_decoy_distractor.cur_pos if self.ja_decoy_distractor else None
-            )
-
-        self.point_from_loc = self.find_point_from_loc()
-
-        assert self.env.grammar.contains_utterance(self.introduction_statement)
-
-    def step(self, utterance):
-        reply, info = super().step()
-
-        if self.env.hidden_npc:
-            return reply, info
-
-        scaffolding = self.env.parameters.get("Scaffolding", "N") == "Y"
-        language_color = False
-        language_feedback = False
-        pointing = False
-        emulation = False
-
-        if not scaffolding:
-            cue_type = self.env.parameters["Cue_type"]
-
-            if cue_type == "Language_Color":
-                language_color = True
-            elif cue_type == "Language_Feedback":
-                language_feedback = True
-            elif cue_type == "Pointing":
-                pointing = True
-            elif cue_type == "Emulation":
-                emulation = True
-            else:
-                raise ValueError(f"Cue_type ({cue_type}) not defined.")
-        else:
-            # there are no cues if scaffolding is used (the peer gives the apples to the agent)
-            assert "Cue_type" not in self.env.parameters
-
-            # there is no additional test for joint attention (no cues are given so this wouldn't make sense)
-            assert not self.env.ja_recursive
-
-        reply, action = None, None
-        if not self.was_introduced_to:
-            # check introduction, updates was_introduced_to if needed
-            reply, action = self.handle_introduction(utterance)
-
-            assert action is None
-
-            if self.env.ja_recursive:
-                # look at the center of the room (this makes the cue giving in side and outisde JA different)
-                action = self.look_at_action([self.env.current_width // 2, self.env.current_height // 2])
-            else:
-                # look at the agent
-                action = self.look_at_action(self.env.agent_pos)
-
-            if self.was_introduced_to:
-                # was introduced just now
-                if self.is_pointing():
-                    action = self.stop_point
-
-                if language_color:
-                    # only say the color once
-                    reply = self.target_obj.color
-
-            elif self.env.ja_recursive:
-                # was not introduced
-                if language_feedback:
-                    # random reply
-                    reply = self.env._rand_elem([
-                        "Hot",
-                        "Warm",
-                        "Medium",
-                        "Cold"
-                    ])
-
-                if language_color and not self.decoy_color_given:
-                    # color of a decoy (can be the correct one)
-                    reply = self.ja_decoy.color
-                    self.decoy_color_given=True
-
-                if pointing:
-                    # point to a decoy
-                    action = self.goto_point_action(
-                        point_from_loc=self.decoy_point_from_loc,
-                        target_pos=self.ja_decoy.cur_pos,
-                        distractor_pos=self.ja_decoy_distractor.cur_pos if self.ja_decoy_distractor else None
-                    )
-
-                    if self.is_pointing():
-                        # if it's already pointing, turn to look at the center (to avoid looking at the wall)
-                        action = self.look_at_action([self.env.current_width//2, self.env.current_height//2])
-
-
-        else:
-
-            if self.was_introduced_to and language_color:
-                # language only once at introduction
-                # reply = self.target_obj.color
-                action = self.look_at_action(self.env.agent_pos)
-
-            if self.was_introduced_to and language_feedback:
-                # closeness string
-                agent_distance_to_target = np.abs(self.target_obj.cur_pos - self.env.agent_pos).sum()
-                if agent_distance_to_target <= 1:
-                    reply = "Hot"
-                elif agent_distance_to_target <= 2:
-                    reply = "Warm"
-                elif agent_distance_to_target <= 5:
-                    reply = "Medium"
-                elif agent_distance_to_target >= 5:
-                    reply = "Cold"
-
-                action = self.look_at_action(self.env.agent_pos)
-
-            # pointing
-            if self.was_introduced_to and pointing:
-                if self.env.parameters["N"] == "1":
-                    distractor_pos = None
-                else:
-                    distractor_pos = self.distractor_obj.cur_pos
-
-                action = self.goto_point_action(
-                    point_from_loc=self.point_from_loc,
-                    target_pos=self.target_obj.cur_pos,
-                    distractor_pos=distractor_pos,
-                )
-
-                if self.is_pointing():
-                    action = self.look_at_action(self.env.agent_pos)
-
-            # emulation or scaffolding
-            emulation_demo = self.was_introduced_to and emulation and not self.demo_over
-            scaffolding_help = self.was_introduced_to and scaffolding
-
-            # do the demonstration / unlock the apple
-            # in both of those two scenarios the NPC in essence solves the task
-            # in demonstration - it eats the apple, and reverts the env at the end
-            # in scaffolding - it doesn't eat the apple and looks at the agent
-            if emulation_demo or scaffolding_help:
-
-                if emulation_demo or (scaffolding_help and not self.apple_unlocked_for_agent):
-
-                    if self.is_pointing():
-                        # don't point during demonstration
-                        action = self.stop_point
-
-                    else:
-                        # if apple unlocked go pick it up
-                        if self.target_obj == self.env.switch and self.env.switch.is_on:
-                            assert self.env.parameters["Problem"] == "Switches"
-                            next_target_position = self.env.box.cur_pos
-
-                        elif self.target_obj == self.env.generator and self.env.generator.is_pressed:
-                            assert self.env.parameters["Problem"] in ["Generators", "Marbles", "Marble"]
-                            next_target_position = self.env.generator_platform.cur_pos
-
-                        elif self.target_obj == self.env.door and self.env.door.is_open:
-                            next_target_position = self.env.apple.cur_pos
-
-                        elif self.target_obj == self.env.lever and self.env.lever.is_on:
-                            next_target_position = self.env.apple.cur_pos
-
-                        else:
-                            next_target_position = self.target_obj.cur_pos
-
-                        if self.target_obj == self.env.generator and not self.env.generator.is_pressed:
-                            if not self.env.generator.marble_activation:
-                                # push generator
-                                action = self.path_to_pos(next_target_position)
-                            else:
-                                # find angle
-                                if self.env.marble.moving_dir is None:
-                                    distance = (self.env.marble.cur_pos - self.env.generator.cur_pos)
-                                    diff = np.sign(distance)
-
-                                    if sum(abs(diff)) == 1:
-                                        # if the agent pushed the ball during demo diff can be > 1, then it's unsolvable
-                                        push_pos = self.env.marble.cur_pos+diff
-                                        if all(self.cur_pos == push_pos):
-                                            next_target_position = self.env.marble.cur_pos
-                                        else:
-                                            next_target_position = push_pos
-
-                                        # go to loc in front of
-                                        # push
-                                        action = self.path_to_pos(next_target_position)
-
-                        else:
-                            # toggle all other objects
-                            action = self.path_to_toggle_pos(next_target_position)
-
-                        # for scaffolding check if trying to eat the apple
-                        # if so, stop - apple is unlocked
-                        if scaffolding_help:
-                            if (
-                                    self.env.get_cell(*self.front_pos) == self.env.apple and
-                                    action == self.toggle_action
-                            ):
-                                # don't eat the apple
-                                action = None
-                                self.apple_unlocked_for_agent = True
-
-                        # for emulation check if trying to toggle the eaten apple
-                        # if so, stop and revert the env - demo is over
-                        if emulation_demo:
-                            if (
-                                self.ate_an_apple and
-                                self.env.get_cell(*self.front_pos) == self.env.apple and
-                                action == self.toggle_action and
-                                self.env.apple.eaten
-                            ):
-                                # trying to toggle an apple it ate
-                                self.env.revert()
-                                self.demo_over = True
-                                action = None
-
-                # if scaffolding apple unlocked, look at the agent
-                if scaffolding_help and self.apple_unlocked_for_agent:
-                    if all(self.cur_pos == self.initial_pos):
-                        # if the apple is unlocked look at the agent
-                        wanted_dir = self.compute_wanted_dir(self.env.agent_pos)
-                        action = self.compute_turn_action(wanted_dir)
-                    else:
-                        # go to init pos, this removes problems in case the apple is unreachable now
-                        action = self.path_to_pos(self.initial_pos)
-
-            if self.was_introduced_to and emulation and self.demo_over and not self.demo_over_and_position_safe:
-                if self.env.is_in_marble_way(self.cur_pos):
-                    action = self.path_to_pos(self.find_point_from_loc())
-                else:
-                    self.demo_over_and_position_safe = True
-
-            if self.demo_over_and_position_safe:
-                assert emulation or scaffolding
-                # look at the agent after demo is done
-                action = self.look_at_action(self.env.agent_pos)
-
-            if self.was_introduced_to and self.env.parameters["Scaffolding"] == "Y":
-                if "Emulation" in self.env.parameters or "Pointing" in self.env.parameters or "Language_grounding" in self.env.parameters:
-                    raise ValueError(
-                        "Scaffolding cannot be used with information giving (Emulation, Pointing, Language_grounding)"
-                    )
-
-        eaten_before = self.env.apple.eaten
-
-        if action is not None:
-            action()
-
-        # check if the NPC ate the apple
-        eaten_after = self.env.apple.eaten
-        self.ate_an_apple = not eaten_before and eaten_after
-
-        info = self.create_info(
-            action=action,
-            utterance=reply,
-            was_introduced_to=self.was_introduced_to,
-        )
-
-        assert (reply or "no_op") in self.list_of_possible_utterances
-
-        return reply, info
-
-    def create_info(self, action, utterance, was_introduced_to):
-        info = {
-            "prim_action": action.__name__ if action is not None else "no_op",
-            "utterance": utterance or "no_op",
-            "was_introduced_to": was_introduced_to
-        }
-        return info
-
-    def is_point_from_loc(self, pos, target_pos=None, distractor_pos=None):
-
-        if target_pos is None:
-            target_pos = self.target_obj.cur_pos
-
-        if distractor_pos is None:
-            if self.distractor_obj is not None:
-                distractor_pos = self.distractor_obj.cur_pos
-            else:
-                distractor_pos = [None, None]
-
-        if self.env.is_in_marble_way(pos):
-            return False
-
-        if self.env.problem in ["Doors", "Levers"]:
-            # must not be in front of a door
-            if abs(self.env.door_current_pos - pos).sum() == 1:
-                return False
-
-            if self.env.problem in ["Doors"]:
-                if abs(self.env.distractor_current_pos - pos).sum() == 1:
-                    return False
-
-        if any(pos == target_pos):
-            same_ind = np.argmax(target_pos == pos)
-
-            #  is there an occlusion in the way
-            start = pos[1-same_ind]
-            end = target_pos[1-same_ind]
-            step = 1 if start <= end else -1
-            for i in np.arange(start, end, step):
-                p = pos.copy()
-                p[1-same_ind] = i
-                cell = self.env.grid.get(*p)
-
-                if cell is not None:
-                    if not cell.see_behind():
-                        return False
-
-            if pos[same_ind] != distractor_pos[same_ind]:
-                return True
-
-            if pos[same_ind] == distractor_pos[same_ind]:
-                # if in between
-                if distractor_pos[1-same_ind] < pos[1-same_ind] < target_pos[1-same_ind]:
-                    return True
-
-                if distractor_pos[1-same_ind] > pos[1-same_ind] > target_pos[1-same_ind]:
-                    return True
-        return False
-
-    def find_point_from_loc(self, target_pos=None, distractor_pos=None):
-        reject_fn = lambda env, p: not self.is_point_from_loc(p, target_pos=target_pos, distractor_pos=distractor_pos)
-
-        point = self.env.find_loc(size=(self.env.wall_x, self.env.wall_y), reject_fn=reject_fn, reject_agent_pos=False)
-
-        # assert all(point < np.array([self.env.wall_x, self.env.wall_y]))
-        # assert all(point > np.array([0, 0]))
-
-        return point
-
-    def goto_point_action(self, point_from_loc, target_pos, distractor_pos):
-        if self.is_point_from_loc(self.cur_pos, target_pos=target_pos, distractor_pos=distractor_pos):
-            # point to a direction
-            action = self.compute_wanted_point_action(target_pos)
-
-        else:
-            # do not point if not is_point_from_loc
-            if self.is_pointing():
-                # stop pointing
-                action = self.stop_point
-
-            else:
-                # move
-                action = self.path_to_pos(point_from_loc)
-
-        return action
-
-
-class InformationSeekingEnv(MultiModalMiniGridEnv):
-    """
-    Environment in which the agent is instructed to go to a given object
-    named using an English text string
-    """
-
-    def __init__(
-        self,
-        size=10,
-        diminished_reward=True,
-        step_penalty=False,
-        knowledgeable=False,
-        max_steps=80,
-        hidden_npc=False,
-        switch_no_light=True,
-        reward_diminish_factor=0.1,
-        see_through_walls=False,
-        n_colors=None,
-        egocentric_observation=True,
-    ):
-        assert size >= 5
-        self.empty_symbol = "NA \n"
-        self.diminished_reward = diminished_reward
-        self.step_penalty = step_penalty
-        self.knowledgeable = knowledgeable
-        self.hidden_npc = hidden_npc
-        self.hear_yourself = False
-        self.switch_no_light = switch_no_light
-
-        if n_colors is None:
-            self.n_colors = len(COLOR_NAMES)
-        else:
-            self.n_colors = n_colors
-
-        self.grammar = SocialAIGrammar()
-
-        self.init_done = False
-        # parameters - to be set in reset
-        self.parameters = None
-
-        self.add_npc_direction = True
-        self.add_npc_point_direction = True
-        self.add_npc_last_prim_action = True
-
-        self.reward_diminish_factor = reward_diminish_factor
-
-        self.egocentric_observation = egocentric_observation
-        self.encoding_size = 3 + 2*bool(not self.egocentric_observation) + bool(self.add_npc_direction) + bool(self.add_npc_point_direction) + bool(self.add_npc_last_prim_action)
-
-        super().__init__(
-            grid_size=size,
-            max_steps=max_steps,
-            # Set this to True for maximum speed
-            see_through_walls=see_through_walls,
-            actions=SocialAIActions,  # primitive actions
-            action_space=SocialAIActionSpace,
-            add_npc_direction=self.add_npc_direction,
-            add_npc_point_direction=self.add_npc_point_direction,
-            add_npc_last_prim_action=self.add_npc_last_prim_action,
-            reward_diminish_factor=self.reward_diminish_factor,
-        )
-        self.all_npc_utterance_actions = self.caretaker.list_of_possible_utterances
-        self.prim_actions_dict = SocialAINPCActionsDict
-
-    def revert(self):
-        self.grid.set(*self.caretaker.cur_pos, None)
-        self.place_npc()
-        self.put_objects_in_env(remove_objects=True)
-
-    def is_in_marble_way(self, pos):
-        target_pos = self.generator_current_pos
-
-        # generator distractor is in the same row / collumn as the marble and the generator
-        # if self.distractor_current_pos is not None:
-        #     distractor_pos = self.distractor_current_pos
-        # else:
-        #     distractor_pos = [None, None]
-
-        if self.problem in ["Marbles", "Marble"]:
-            # point can't be in the same row or column as both the marble and the generator
-            # all three: marble, generator, loc are in the same row or column
-            if any((pos == target_pos) * (pos == self.marble_current_pos)):
-                # all three: marble, generator, loc are in the same row or column -> is in its way
-                return True
-
-            if int(self.parameters["N"]) > 1:
-                # is it in the way for the distractor generator
-                if any((pos == self.distractor_current_pos) * (pos == self.marble_current_pos)):
-                    # all three: marble, distractor generator, loc are in the same row or column -> is in its way
-                    return True
-
-        # all good
-        return False
-
-
-    def _gen_grid(self, width_, height_):
-        # Create the grid
-        self.grid = Grid(width_, height_, nb_obj_dims=self.encoding_size)
-
-        # new
-        min_w = min(9, width_)
-        min_h = min(9, height_)
-        self.current_width = self._rand_int(min_w, width_+1)
-        self.current_height = self._rand_int(min_h, height_+1)
-
-        self.wall_x = self.current_width-1
-        self.wall_y = self.current_height-1
-
-        # problem: Apples/Boxes/Switches/Generators/Marbles
-        self.problem = self.parameters["Problem"] if self.parameters else "Apples"
-        num_of_colors = self.parameters.get("Num_of_colors", None) if self.parameters else None
-        if num_of_colors is None:
-            num_of_colors = self.n_colors
-
-        # additional test for recursivness of joint attention -> cues are given outside of JA
-        self.ja_recursive = self.parameters.get("JA_recursive", False) if self.parameters else False
-
-        self.add_obstacles()
-        if self.obstacles != "No":
-            warnings.warn("InformationSeeking should no be using obstacles.")
-
-        # Generate the surrounding walls
-        self.grid.wall_rect(0, 0, self.current_width, self.current_height)
-
-        if self.problem in ["Doors", "Levers"]:
-            # Add a second wall: this is needed so that an apple cannot be seen diagonally between the wall and the door
-            self.grid.wall_rect(1, 1, self.wall_x-1, self.wall_y-1)
-
-        # apple
-        self.apple_pos = (self.current_width, self.current_height)
-
-        # box
-        locked = self.problem == "Switches"
-
-        if num_of_colors is None:
-            POSSIBLE_COLORS = COLOR_NAMES.copy()
-
-        else:
-            POSSIBLE_COLORS = COLOR_NAMES[:int(num_of_colors)].copy()
-
-        self.box_color = self._rand_elem(POSSIBLE_COLORS)
-
-        if self.problem in ["Doors", "Levers"]:
-            # door
-
-            # find the position on a wall
-            self.apple_current_pos = self.find_loc(
-                size=(self.current_width, self.current_height),
-                reject_taken_pos=False,  # we will create a gap in the wall
-                reject_agent_pos=True,
-                reject_fn=lambda _, pos:
-                not (pos[0] in [0, self.wall_x] or pos[1] in [0, self.wall_y]) or  # reject not on a wall
-                tuple(pos) in [
-                    (0, 0),
-                    (0, 1),
-                    (1, 0),
-
-                    (0, self.wall_y),
-                    (0, self.wall_y-1),
-                    (1, self.wall_y),
-
-                    (self.wall_x, self.wall_y),
-                    (self.wall_x-1, self.wall_y),
-                    (self.wall_x, self.wall_y-1),
-
-                    (self.wall_x, 0),
-                    (self.wall_x, 1),
-                    (self.wall_x-1, 0),
-                ]
-            )
-            self.grid.set(*self.apple_current_pos, None)  # hole in the wall
-
-            # door is in front of the apple
-            door_x = {
-                0: 1,
-                self.wall_x: self.wall_x - 1,
-            }.get(self.apple_current_pos[0], self.apple_current_pos[0])
-            door_y = {
-                0: 1,
-                self.wall_y: self.wall_y - 1,
-            }.get(self.apple_current_pos[1], self.apple_current_pos[1])
-
-            self.door_current_pos = np.array([door_x, door_y])
-            self.grid.set(*self.door_current_pos, None)  # hole in the wall
-
-
-            #  lever
-            if self.problem in ["Levers"]:
-                self.lever_current_pos = self.find_loc(
-                    top=(2, 2),
-                    size=(self.current_width-4, self.current_height-4),
-                    reject_agent_pos=True,
-                    reject_fn=lambda _, pos: next_to(pos, self.door_current_pos) # reject in front of the door
-                )
-
-        else:
-            # find the position for the apple/box/generator_platform
-            self.apple_current_pos = self.find_loc(size=self.apple_pos, reject_agent_pos=True)
-            assert all(self.apple_current_pos < np.array([self.current_width-1, self.current_height-1]))
-
-        # door
-        self.door_color = self._rand_elem(POSSIBLE_COLORS)
-
-        # lever
-        self.lever_color = self._rand_elem(POSSIBLE_COLORS)
-
-        # switch
-        self.switch_pos = (self.current_width, self.current_height)
-        self.switch_color = self._rand_elem(POSSIBLE_COLORS)
-        self.switch_current_pos = self.find_loc(
-            size=self.switch_pos,
-            reject_agent_pos=True,
-            reject_fn=lambda _, pos: tuple(pos) in map(tuple, [self.apple_current_pos]),
-        )
-
-        # generator
-        self.generator_pos = (self.current_width, self.current_height)
-        self.generator_color = self._rand_elem(POSSIBLE_COLORS)
-        self.generator_current_pos = self.find_loc(
-            size=self.generator_pos,
-            reject_agent_pos=True,
-            reject_fn=lambda _, pos: (
-                tuple(pos) in map(tuple, [self.apple_current_pos])
-                or
-                (self.problem in ["Marble"] and tuple(pos) in [
-                    # not in corners
-                    (1, 1),
-                    (self.current_width-2, 1),
-                    (1, self.current_height-2),
-                    (self.current_width-2, self.current_height-2),
-                ])
-                or
-                # not in the same row collumn as the platform
-                (self.problem in ["Marble"] and any(pos == self.apple_current_pos))
-            ),
-        )
-
-        # generator platform
-        self.generator_platform_color = self._rand_elem(POSSIBLE_COLORS)
-
-        # marbles
-        self.marble_pos = (self.current_width, self.current_height)
-        self.marble_color = self._rand_elem(POSSIBLE_COLORS)
-        self.marble_current_pos = self.find_loc(
-            size=self.marble_pos,
-            reject_agent_pos=True,
-            reject_fn=lambda _, pos: self.problem in ["Marbles", "Marble"] and (
-                tuple(pos) in map(tuple, [self.apple_current_pos, self.generator_current_pos])
-                or
-                all(pos != self.generator_current_pos)  # reject if not in row or column as the generator
-                or
-                any(pos == 1)  # next to a wall
-                or
-                pos[1] == self.current_height-2
-                or
-                pos[0] == self.current_width-2
-            ),
-        )
-
-        # distractor
-        if self.problem == "Boxes":
-            assert not locked
-            POSSIBLE_COLORS.remove(self.box_color)
-
-        elif self.problem == "Doors":
-            POSSIBLE_COLORS.remove(self.door_color)
-
-        elif self.problem == "Levers":
-            POSSIBLE_COLORS.remove(self.lever_color)
-
-        elif self.problem == "Switches":
-            POSSIBLE_COLORS.remove(self.switch_color)
-
-        elif self.problem in ["Generators", "Marble"]:
-            POSSIBLE_COLORS.remove(self.generator_color)
-
-        self.distractor_color = self._rand_elem(POSSIBLE_COLORS)
-        self.distractor_pos = (self.current_width, self.current_height)
-
-        # distractor reject function
-        if self.problem in ["Apples", "Boxes"]:
-            distractor_reject_fn = lambda _, pos: tuple(pos) in map(tuple, [self.apple_current_pos])
-
-        elif self.problem in ["Switches"]:
-            distractor_reject_fn = lambda _, pos: tuple(pos) in map(tuple, [self.apple_current_pos, self.switch_current_pos])
-
-        elif self.problem in ["Generators"]:
-            distractor_reject_fn = lambda _, pos: tuple(pos) in map(tuple, [self.apple_current_pos, self.generator_current_pos])
-
-        elif self.problem in ["Marble"]:
-            # problem is marbles
-            if self.parameters["N"] == "1":
-                distractor_reject_fn = lambda _, pos: tuple(pos) in map(tuple, [self.apple_current_pos, self.generator_current_pos, self.marble_current_pos])
-            else:
-                same_dim = (self.generator_current_pos == self.marble_current_pos).argmax()
-                distactor_same_dim = 1-same_dim
-                distractor_reject_fn = lambda _, pos: tuple(pos) in map(tuple, [
-                    self.apple_current_pos,
-                    self.generator_current_pos,
-                    self.marble_current_pos
-                ]) or pos[distactor_same_dim] != self.marble_current_pos[distactor_same_dim]
-
-        elif self.problem in ["Doors"]:
-            # reject not next to a wall
-            distractor_reject_fn = lambda _, pos: (
-                not (pos[0] in [1, self.wall_x-1] or pos[1] in [1, self.wall_y-1]) or  # reject not on a wall
-                tuple(pos) in [
-                    (1, 1),
-                    (self.wall_x-1, self.wall_y - 1),
-                    (1, self.wall_y-1),
-                    (self.wall_x-1, 1),
-                    tuple(self.door_current_pos)
-                ]
-            )
-
-        elif self.problem in ["Levers"]:
-            # not in front of the door
-            distractor_reject_fn = lambda _, pos: next_to(pos, self.door_current_pos) or tuple(pos) in list(map(tuple, [self.door_current_pos, self.lever_current_pos]))
-
-        else:
-            raise ValueError("Problem {} indefined.".format(self.problem))
-
-        if self.problem == "Doors":
-
-            self.distractor_current_pos = self.find_loc(
-                top=(1, 1),
-                size=(self.current_width-2, self.current_height-2),
-                reject_agent_pos=True,
-                reject_fn=distractor_reject_fn,
-                reject_taken_pos=False
-            )
-
-            if self.parameters["N"] != "1":
-                self.grid.set(*self.distractor_current_pos, None)  # hole in the wall
-        else:
-            self.distractor_current_pos = self.find_loc(
-                size=self.distractor_pos,
-                reject_agent_pos=True,
-                reject_fn=distractor_reject_fn
-            )
-
-        self.put_objects_in_env()
-
-
-        # NPC
-        put_peer = self.parameters["Peer"] if self.parameters else "N"
-        assert put_peer in ["Y", "N"]
-
-        color = self._rand_elem(COLOR_NAMES)
-        self.caretaker = Caretaker(color, "Caretaker", self)
-
-        if put_peer == "Y":
-            self.place_npc()
-
-
-        # Randomize the agent's start position and orientation
-        self.place_agent(size=(self.current_width, self.current_height))
-
-        # Generate the mission string
-        self.mission = 'lets collaborate'
-
-        # Dummy beginning string
-        # self.beginning_string = "This is what you hear. \n"
-        self.beginning_string = "Conversation: \n"
-        self.utterance = self.beginning_string
-
-        # utterance appended at the end of each step
-        self.utterance_history = ""
-
-        # used for rendering
-        self.full_conversation = self.utterance
-        self.outcome_info = None
-
-    def place_npc(self):
-        if self.problem in ["Doors"]:
-            self.place_obj(
-                self.caretaker,
-                size=(self.current_width, self.current_height),
-                reject_fn=lambda _, pos: next_to(pos, self.door_current_pos) or next_to(pos, self.distractor_current_pos)
-            )
-
-        elif self.problem in ["Levers"]:
-            self.place_obj(
-                self.caretaker,
-                size=(self.current_width, self.current_height),
-                reject_fn=lambda _, pos: next_to(pos, self.door_current_pos)
-            )
-
-        else:
-            self.place_obj(self.caretaker, size=(self.current_width, self.current_height), reject_fn=InformationSeekingEnv.is_in_marble_way)
-
-        self.caretaker.initial_pos = self.caretaker.cur_pos
-
-    def put_objects_in_env(self, remove_objects=False):
-
-        assert self.apple_current_pos is not None
-        assert self.switch_current_pos is not None
-
-        self.doors_block_set = []
-        self.levers_block_set = []
-        self.switches_block_set = []
-        self.boxes_block_set = []
-        self.generators_block_set = []
-
-        self.distractor_door = None
-        self.distractor_lever = None
-        self.distractor_box = None
-        self.distractor_switch = None
-        self.distractor_generator = None
-
-        # problem: Apples/Boxes/Switches/Generators
-        assert self.problem == self.parameters["Problem"] if self.parameters else "Apples"
-
-        # move objects (used only in revert), not in gen_grid
-        if remove_objects:
-            # remove apple or box
-            # assert type(self.grid.get(*self.apple_current_pos)) in [Apple, LockableBox]
-            # self.grid.set(*self.apple_current_pos, None)
-
-            # remove apple (after demo it must be an apple)
-            assert type(self.grid.get(*self.apple_current_pos)) in [Apple]
-            self.grid.set(*self.apple_current_pos, None)
-
-            if self.problem in ["Doors"]:
-                # assert type(self.grid.get(*self.door_current_pos)) in [Door]
-                self.grid.set(*self.door.cur_pos, None)
-
-            elif self.problem in ["Levers"]:
-                # assert type(self.grid.get(*self.door_current_pos)) in [Door]
-                self.grid.set(*self.remote_door.cur_pos, None)
-                self.grid.set(*self.lever.cur_pos, None)
-
-            elif self.problem in ["Switches"]:
-                # remove switch
-                assert type(self.grid.get(*self.switch_current_pos)) in [Switch]
-                self.grid.set(*self.switch.cur_pos, None)
-
-            elif self.problem in ["Generators", "Marbles", "Marble"]:
-                # remove generator
-                assert type(self.grid.get(*self.generator.cur_pos)) in [AppleGenerator]
-                self.grid.set(*self.generator.cur_pos, None)
-
-                if self.problem in ["Marbles", "Marble"]:
-                    # remove generator
-                    assert type(self.grid.get(*self.marble.cur_pos)) in [Marble]
-                    self.grid.set(*self.marble.cur_pos, None)
-
-                    if self.marble.tee_uncovered:
-                        self.grid.set(*self.marble.tee.cur_pos, None)
-
-            elif self.problem in ["Apples", "Boxes"]:
-                pass
-
-            else:
-                raise ValueError("Undefined problem {}".format(self.problem))
-
-            # remove distractor
-            if self.problem in ["Boxes", "Switches", "Generators", "Marbles", "Marble", "Doors", "Levers"] and self.parameters["N"] != "1":
-                assert type(self.grid.get(*self.distractor_current_pos)) in [LockableBox, Switch, AppleGenerator, Door, Lever]
-                self.grid.set(*self.distractor_current_pos, None)
-
-        # apple
-        self.apple = Apple()
-
-        # Box
-        locked = self.problem == "Switches"
-
-        self.box = LockableBox(
-            self.box_color,
-            contains=self.apple,
-            is_locked=locked,
-            block_set=self.boxes_block_set
-        )
-        self.boxes_block_set.append(self.box)
-
-        # Doors
-        self.door = Door(
-            color=self.door_color,
-            is_locked=False,
-            block_set=self.doors_block_set,
-        )
-        self.doors_block_set.append(self.door)
-
-        # Levers
-        self.remote_door = RemoteDoor(
-            color=self.door_color,
-        )
-
-        self.lever = Lever(
-            color=self.lever_color,
-            object=self.remote_door,
-            active_steps=None,
-            block_set=self.levers_block_set,
-        )
-        self.levers_block_set.append(self.lever)
-
-        # Switch
-        self.switch = Switch(
-            color=self.switch_color,
-            lockable_object=self.box,
-            locker_switch=True,
-            no_turn_off=True,
-            no_light=self.switch_no_light,
-            block_set=self.switches_block_set,
-        )
-        self.switches_block_set.append(self.switch)
-
-        # Generator
-        self.generator = AppleGenerator(
-            self.generator_color,
-            block_set=self.generators_block_set,
-            # on_push=lambda: self.put_obj_np(self.apple, self.apple_current_pos)
-            on_push=lambda: self.grid.set(*self.apple_current_pos, self.apple),
-            marble_activation=self.problem in ["Marbles", "Marble"],
-        )
-        self.generators_block_set.append(self.generator)
-
-        self.generator_platform = GeneratorPlatform(self.generator_platform_color)
-
-        self.marble = Marble(self.marble_color, env=self)
-
-        if self.problem in ["Apples"]:
-            self.put_obj_np(self.apple, self.apple_current_pos)
-
-        elif self.problem in ["Doors"]:
-            self.put_obj_np(self.apple, self.apple_current_pos)
-            self.put_obj_np(self.door, self.door_current_pos)
-
-        elif self.problem in ["Levers"]:
-            self.put_obj_np(self.apple, self.apple_current_pos)
-            self.put_obj_np(self.remote_door, self.door_current_pos)
-            self.put_obj_np(self.lever, self.lever_current_pos)
-
-        elif self.problem in ["Boxes"]:
-            self.put_obj_np(self.box, self.apple_current_pos)
-
-        elif self.problem in ["Switches"]:
-            self.put_obj_np(self.box, self.apple_current_pos)
-            self.put_obj_np(self.switch, self.switch_current_pos)
-
-        elif self.problem in ["Generators", "Marbles", "Marble"]:
-            self.put_obj_np(self.generator, self.generator_current_pos)
-            self.put_obj_np(self.generator_platform, self.apple_current_pos)
-
-            if self.problem in ["Marbles", "Marble"]:
-                self.put_obj_np(self.marble, self.marble_current_pos)
-        else:
-            raise ValueError("Problem {} not defined. ".format(self.problem))
-
-        # Distractors
-        if self.problem not in ["Apples"]:
-
-            N = int(self.parameters["N"])
-            if N > 1:
-                assert N == 2
-
-                if self.problem == "Boxes":
-                    assert not locked
-
-                    self.distractor_box = LockableBox(
-                        self.distractor_color,
-                        is_locked=locked,
-                        block_set=self.boxes_block_set,
-                    )
-                    self.boxes_block_set.append(self.distractor_box)
-
-                    self.put_obj_np(self.distractor_box, self.distractor_current_pos)
-
-                elif self.problem == "Doors":
-                    self.distractor_door = Door(
-                        color=self.distractor_color,
-                        is_locked=False,
-                        block_set=self.doors_block_set,
-                    )
-                    self.doors_block_set.append(self.distractor_door)
-
-                    self.put_obj_np(self.distractor_door, self.distractor_current_pos)
-
-                elif self.problem == "Levers":
-                    self.distractor_lever = Lever(
-                        color=self.distractor_color,
-                        active_steps=None,
-                        block_set=self.levers_block_set,
-                    )
-                    self.levers_block_set.append(self.distractor_lever)
-                    self.put_obj_np(self.distractor_lever, self.distractor_current_pos)
-
-                elif self.problem == "Switches":
-                    self.distractor_switch = Switch(
-                        color=self.distractor_color,
-                        locker_switch=True,
-                        no_turn_off=True,
-                        no_light=self.switch_no_light,
-                        block_set=self.switches_block_set,
-                    )
-                    self.switches_block_set.append(self.distractor_switch)
-
-                    self.put_obj_np(self.distractor_switch, self.distractor_current_pos)
-
-                elif self.problem in ["Generators", "Marbles", "Marble"]:
-                    self.distractor_generator = AppleGenerator(
-                        color=self.distractor_color,
-                        block_set=self.generators_block_set,
-                        marble_activation=self.problem in ["Marbles", "Marble"],
-                    )
-                    self.generators_block_set.append(self.distractor_generator)
-
-                    self.put_obj_np(self.distractor_generator, self.distractor_current_pos)
-
-                else:
-                    raise ValueError("Undefined N for problem {}".format(self.problem))
-
-    def reset(
-            self, *args, **kwargs
-    ):
-        # This env must be used inside the parametric env
-        if not kwargs:
-            # The only place when kwargs can empty is during the class construction
-            # reset should be called again before using the env (paramenv does it in its constructor)
-            assert self.parameters is None
-            assert not self.init_done
-            self.init_done = True
-
-            obs = super().reset()
-            return obs
-
-        else:
-            assert self.init_done
-
-        self.parameters = dict(kwargs)
-
-        assert self.parameters is not None
-        assert len(self.parameters) > 0
-
-        obs = super().reset()
-
-        self.agent_ate_the_apple = False
-        self.agent_opened_the_box = False
-        self.agent_opened_the_door = False
-        self.agent_pulled_the_lever = False
-        self.agent_turned_on_the_switch = False
-        self.agent_pressed_the_generator = False
-        self.agent_pushed_the_marble = False
-
-        return obs
-
-    def step(self, action):
-
-        success = False
-        p_action = action[0]
-        utterance_action = action[1:]
-
-        apple_had_been_eaten = self.apple.eaten
-        box_had_been_opened = self.box.is_open
-        door_had_been_opened = self.door.is_open
-        lever_had_been_pulled = self.lever.is_on
-        switch_had_been_turned_on = self.switch.is_on
-        generator_had_been_pressed = self.generator.is_pressed
-        marble_had_been_pushed = self.marble.was_pushed
-
-        # primitive actions
-        _, reward, done, info = super().step(p_action)
-
-        if self.problem in ["Marbles", "Marble"]:
-            # todo: create stepable objects which are stepped automatically?
-            self.marble.step()
-
-        # eaten just now by primitive actions of the agent
-        if not self.agent_ate_the_apple:
-            self.agent_ate_the_apple = self.apple.eaten and not apple_had_been_eaten
-
-        if not self.agent_opened_the_box:
-            self.agent_opened_the_box = self.box.is_open and not box_had_been_opened
-
-        if not self.agent_opened_the_door:
-            self.agent_opened_the_door = self.door.is_open and not door_had_been_opened
-
-        if not self.agent_pulled_the_lever:
-            self.agent_pulled_the_lever = self.lever.is_on and not lever_had_been_pulled
-
-        if not self.agent_turned_on_the_switch:
-            self.agent_turned_on_the_switch = self.switch.is_on and not switch_had_been_turned_on
-
-        if not self.agent_pressed_the_generator:
-            self.agent_pressed_the_generator = self.generator.is_pressed and not generator_had_been_pressed
-
-        if not self.agent_pushed_the_marble:
-            self.agent_pushed_the_marble = self.marble.was_pushed and not marble_had_been_pushed
-
-        # utterances
-        agent_spoke = not all(np.isnan(utterance_action))
-        if agent_spoke:
-            utterance = self.grammar.construct_utterance(utterance_action)
-
-            if self.hear_yourself:
-                self.utterance += "YOU: {} \n".format(utterance)
-            self.full_conversation += "YOU: {} \n".format(utterance)
-        else:
-            utterance = None
-
-        if self.parameters["Peer"] == "Y":
-            reply, npc_info = self.caretaker.step(utterance)
-        else:
-            reply = None
-            npc_info = self.caretaker.create_info(
-                action=None,
-                utterance=None,
-                was_introduced_to=False
-            )
-
-        if reply:
-            self.utterance += "{}: {} \n".format(self.caretaker.name, reply)
-            self.full_conversation += "{}: {} \n".format(self.caretaker.name, reply)
-
-        # aftermath
-        if p_action == self.actions.done:
-            done = True
-
-        elif self.agent_ate_the_apple:
-            # check that it is the agent who ate it
-            assert self.actions(p_action) == self.actions.toggle
-            assert self.get_cell(*self.front_pos) == self.apple
-
-            if self.parameters.get("Cue_type", "nan") == "Emulation":
-
-                # during emulation it can be the NPC who eats the apple, opens the box, and turns on the switch
-                if self.parameters["Scaffolding"] and self.caretaker.apple_unlocked_for_agent:
-                    # if the caretaker unlocked the apple the agent gets reward upon eating it
-                    reward = self._reward()
-                    success = True
-
-                elif self.problem == "Apples":
-                    reward = self._reward()
-                    success = True
-
-                elif self.problem == "Doors" and self.agent_opened_the_door:
-                    reward = self._reward()
-                    success = True
-
-                elif self.problem == "Levers" and self.agent_pulled_the_lever:
-                    reward = self._reward()
-                    success = True
-
-                elif self.problem == "Boxes" and self.agent_opened_the_box:
-                    reward = self._reward()
-                    success = True
-
-                elif self.problem == "Switches" and self.agent_opened_the_box and self.agent_turned_on_the_switch:
-                    reward = self._reward()
-                    success = True
-
-                elif self.problem == "Generators" and self.agent_pressed_the_generator:
-                    reward = self._reward()
-                    success = True
-
-                elif self.problem in ["Marble"] and self.agent_pushed_the_marble:
-                    reward = self._reward()
-                    success = True
-
-            else:
-                reward = self._reward()
-                success = True
-
-            done = True
-
-        # discount
-        if self.step_penalty:
-            reward = reward - 0.01
-
-        # update obs with NPC movement
-        obs = self.gen_obs(full_obs=self.full_obs)
-
-        # fill observation with text
-        self.append_existing_utterance_to_history()
-        obs = self.add_utterance_to_observation(obs)
-        self.reset_utterance()
-
-        if done:
-            if reward > 0:
-                self.outcome_info = "SUCCESS: agent got {} reward \n".format(np.round(reward, 1))
-            else:
-                self.outcome_info = "FAILURE: agent got {} reward \n".format(reward)
-
-        # is the npc seen by the agent
-        ag_view_npc = self.relative_coords(*self.caretaker.cur_pos)
-        if ag_view_npc is not None:
-            # in the agent's field of view
-            ag_view_npc_x, ag_view_npc_y = ag_view_npc
-
-            n_dims = obs['image'].shape[-1]
-            npc_encoding = self.caretaker.encode(n_dims)
-
-            # is it occluded
-            npc_observed = all(obs['image'][ag_view_npc_x, ag_view_npc_y] == npc_encoding)
-        else:
-            npc_observed = False
-
-        info = {**info, **{"NPC_"+k: v for k, v in npc_info.items()}}
-
-        info["NPC_observed"] = npc_observed
-        info["success"] = success
-
-        assert success == (reward > 0)
-
-        return obs, reward, done, info
-
-    def _reward(self):
-        if self.diminished_reward:
-            return super()._reward()
-        else:
-            return 1.0
-
-    def render(self, *args, **kwargs):
-        obs = super().render(*args, **kwargs)
-        if args and args[0] == 'human':
-            self.window.clear_text()  # erase previous text
-            self.window.set_caption(self.full_conversation)
-
-            # self.window.ax.set_title("correct color: {}".format(self.box.target_color), loc="left", fontsize=10)
-
-            if self.outcome_info:
-                color = None
-                if "SUCCESS" in self.outcome_info:
-                    color = "lime"
-                elif "FAILURE" in self.outcome_info:
-                    color = "red"
-                self.window.add_text(*(0.01, 0.85, self.outcome_info),
-                                     **{'fontsize': 15, 'color': color, 'weight': "bold"})
-
-            self.window.show_img(obs)  # re-draw image to add changes to window
-        return obs
-
-
-register(
-    id='SocialAI-InformationSeeking-v0',
-    entry_point='gym_minigrid.social_ai_envs:InformationSeekingEnv'
-)
\ No newline at end of file
diff --git a/spaces/freddyaboulton/3.1.4.9-all-demos/demos/hello_login/run.py b/spaces/freddyaboulton/3.1.4.9-all-demos/demos/hello_login/run.py
deleted file mode 100644
index 2483f95e316003acddb71c334ba57d5bb9c129bd..0000000000000000000000000000000000000000
--- a/spaces/freddyaboulton/3.1.4.9-all-demos/demos/hello_login/run.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import gradio as gr
-
-user_db = {"admin": "admin", "foo": "bar"}
-
-
-def greet(name):
-    return "Hello " + name + "!!"
-
-
-demo = gr.Interface(fn=greet, inputs="text", outputs="text")
-if __name__ == "__main__":
-    demo.launch(
-        auth=lambda u, p: user_db.get(u) == p,
-        auth_message="This is a welcome message")
diff --git a/spaces/freddyaboulton/3.1.4.9-all-demos/demos/input-output/run.py b/spaces/freddyaboulton/3.1.4.9-all-demos/demos/input-output/run.py
deleted file mode 100644
index a09c05fc514335919f2cfa7330b15cebf10bbe82..0000000000000000000000000000000000000000
--- a/spaces/freddyaboulton/3.1.4.9-all-demos/demos/input-output/run.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import gradio as gr
-
-
-def image_mod(text):
-    return text[::-1]
-
-
-demo = gr.Blocks()
-
-with demo:
-    text = gr.Textbox(label="Input-Output")
-    btn = gr.Button("Run")
-    btn.click(image_mod, text, text)
-
-if __name__ == "__main__":
-    demo.launch()
diff --git a/spaces/frncscp/bullerengue/musika/22kHz/utils.py b/spaces/frncscp/bullerengue/musika/22kHz/utils.py
deleted file mode 100644
index 52ed398266cb813b581ef3125707b5bc10ce01cf..0000000000000000000000000000000000000000
--- a/spaces/frncscp/bullerengue/musika/22kHz/utils.py
+++ /dev/null
@@ -1,506 +0,0 @@
-import os
-import time
-
-import librosa
-import matplotlib.pyplot as plt
-import numpy as np
-import tensorflow as tf
-from tensorflow.python.framework import random_seed
-import gradio as gr
-from scipy.io.wavfile import write as write_wav
-
-
-class Utils_functions:
-    def __init__(self, args):
-
-        self.args = args
-
-        melmat = tf.signal.linear_to_mel_weight_matrix(
-            num_mel_bins=args.mel_bins,
-            num_spectrogram_bins=(4 * args.hop) // 2 + 1,
-            sample_rate=args.sr,
-            lower_edge_hertz=0.0,
-            upper_edge_hertz=args.sr // 2,
-        )
-        mel_f = tf.convert_to_tensor(librosa.mel_frequencies(n_mels=args.mel_bins + 2, fmin=0.0, fmax=args.sr // 2))
-        enorm = tf.cast(
-            tf.expand_dims(
-                tf.constant(2.0 / (mel_f[2 : args.mel_bins + 2] - mel_f[: args.mel_bins])),
-                0,
-            ),
-            tf.float32,
-        )
-        melmat = tf.multiply(melmat, enorm)
-        melmat = tf.divide(melmat, tf.reduce_sum(melmat, axis=0))
-        self.melmat = tf.where(tf.math.is_nan(melmat), tf.zeros_like(melmat), melmat)
-
-        with np.errstate(divide="ignore", invalid="ignore"):
-            self.melmatinv = tf.constant(np.nan_to_num(np.divide(melmat.numpy().T, np.sum(melmat.numpy(), axis=1))).T)
-
-    def conc_tog_specphase(self, S, P):
-        S = tf.cast(S, tf.float32)
-        P = tf.cast(P, tf.float32)
-        S = self.denormalize(S, clip=False)
-        S = tf.math.sqrt(self.db2power(S) + 1e-7)
-        P = P * np.pi
-        Sls = tf.split(S, S.shape[0], 0)
-        S = tf.squeeze(tf.concat(Sls, 1), 0)
-        Pls = tf.split(P, P.shape[0], 0)
-        P = tf.squeeze(tf.concat(Pls, 1), 0)
-        SP = tf.cast(S, tf.complex64) * tf.math.exp(1j * tf.cast(P, tf.complex64))
-        wv = tf.signal.inverse_stft(
-            SP,
-            4 * self.args.hop,
-            self.args.hop,
-            fft_length=4 * self.args.hop,
-            window_fn=tf.signal.inverse_stft_window_fn(self.args.hop),
-        )
-        return np.squeeze(wv)
-
-    def _tf_log10(self, x):
-        numerator = tf.math.log(x)
-        denominator = tf.math.log(tf.constant(10, dtype=numerator.dtype))
-        return numerator / denominator
-
-    def normalize(self, S, clip=False):
-        S = (S - self.args.mu_rescale) / self.args.sigma_rescale
-        if clip:
-            S = tf.clip_by_value(S, -1.0, 1.0)
-        return S
-
-    def normalize_rel(self, S):
-        S = S - tf.math.reduce_min(S + 1e-7)
-        S = (S / (tf.math.reduce_max(S + 1e-7) + 1e-7)) + 1e-7
-        return S
-
-    def denormalize(self, S, clip=False):
-        if clip:
-            S = tf.clip_by_value(S, -1.0, 1.0)
-        return (S * self.args.sigma_rescale) + self.args.mu_rescale
-
-    def amp2db(self, x):
-        return 20 * self._tf_log10(tf.clip_by_value(tf.abs(x), 1e-5, 1e100))
-
-    def db2amp(self, x):
-        return tf.pow(tf.ones(tf.shape(x)) * 10.0, x * 0.05)
-
-    def power2db(self, power, ref_value=1.0, amin=1e-10, top_db=None, norm=False):
-        log_spec = 10.0 * self._tf_log10(tf.maximum(amin, power))
-        log_spec -= 10.0 * self._tf_log10(tf.maximum(amin, ref_value))
-        if top_db is not None:
-            log_spec = tf.maximum(log_spec, tf.reduce_max(log_spec) - top_db)
-        return log_spec
-
-    def power2db_batch(self, power, ref_value=1.0, amin=1e-10, top_db=None, norm=False):
-        log_spec = 10.0 * self._tf_log10(tf.maximum(amin, power))
-        log_spec -= 10.0 * self._tf_log10(tf.maximum(amin, ref_value))
-        if top_db is not None:
-            log_spec = tf.maximum(log_spec, tf.reduce_max(log_spec, [-2, -1], keepdims=True) - top_db)
-        return log_spec
-
-    def db2power(self, S_db, ref=1.0):
-        return ref * tf.math.pow(10.0, 0.1 * S_db)
-
-    def wv2mel(self, wv, topdb=80.0):
-        X = tf.signal.stft(
-            wv,
-            frame_length=4 * self.args.hop,
-            frame_step=self.args.hop,
-            fft_length=4 * self.args.hop,
-            window_fn=tf.signal.hann_window,
-            pad_end=False,
-        )
-        S = self.normalize(self.power2db(tf.abs(X) ** 2, top_db=topdb) - self.args.ref_level_db)
-        SM = tf.tensordot(S, self.melmat, 1)
-        return SM
-
-    def mel2spec(self, SM):
-        return tf.tensordot(SM, tf.transpose(self.melmatinv), 1)
-
-    def spec2mel(self, S):
-        return tf.tensordot(S, self.melmat, 1)
-
-    def wv2spec(self, wv, hop_size=256, fac=4):
-        X = tf.signal.stft(
-            wv,
-            frame_length=fac * hop_size,
-            frame_step=hop_size,
-            fft_length=fac * hop_size,
-            window_fn=tf.signal.hann_window,
-            pad_end=False,
-        )
-        return self.normalize(self.power2db(tf.abs(X) ** 2, top_db=None))
-
-    def wv2spec_hop(self, wv, topdb=80.0, hopsize=256):
-        X = tf.signal.stft(
-            wv,
-            frame_length=4 * hopsize,
-            frame_step=hopsize,
-            fft_length=4 * hopsize,
-            window_fn=tf.signal.hann_window,
-            pad_end=False,
-        )
-        S = self.normalize(self.power2db(tf.abs(X) ** 2, top_db=topdb))
-        return tf.tensordot(S, self.melmat, 1)
-
-    def distribute(self, x, model, bs=64, dual_out=False):
-        outls = []
-        if isinstance(x, list):
-            bdim = x[0].shape[0]
-            for i in range(((bdim - 2) // bs) + 1):
-                outls.append(model([el[i * bs : i * bs + bs] for el in x], training=False))
-        else:
-            bdim = x.shape[0]
-            for i in range(((bdim - 2) // bs) + 1):
-                outls.append(model(x[i * bs : i * bs + bs], training=False))
-
-        if dual_out:
-            return np.concatenate([outls[k][0] for k in range(len(outls))], 0), np.concatenate(
-                [outls[k][1] for k in range(len(outls))], 0
-            )
-        else:
-            return np.concatenate(outls, 0)
-
-    def distribute_enc(self, x, model, bs=64):
-        outls = []
-        if isinstance(x, list):
-            bdim = x[0].shape[0]
-            for i in range(((bdim - 2) // bs) + 1):
-                res = model([el[i * bs : i * bs + bs] for el in x], training=False)
-                resls = tf.split(res, self.args.shape // self.args.window, 0)
-                res = tf.concat(resls, -2)
-                outls.append(res)
-        else:
-            bdim = x.shape[0]
-            for i in range(((bdim - 2) // bs) + 1):
-                res = model(x[i * bs : i * bs + bs], training=False)
-                resls = tf.split(res, self.args.shape // self.args.window, 0)
-                res = tf.concat(resls, -2)
-                outls.append(res)
-
-        return np.concatenate(outls, 0)
-
-    def distribute_dec(self, x, model, bs=64):
-        outls = []
-        bdim = x.shape[0]
-        for i in range(((bdim - 2) // bs) + 1):
-            inp = x[i * bs : i * bs + bs]
-            inpls = tf.split(inp, 2, -2)
-            inp = tf.concat(inpls, 0)
-            res = model(inp, training=False)
-            outls.append(res)
-        return np.concatenate([outls[k][0] for k in range(len(outls))], 0), np.concatenate(
-            [outls[k][1] for k in range(len(outls))], 0
-        )
-
-    def distribute_dec2(self, x, model, bs=64):
-        outls = []
-        bdim = x.shape[0]
-        for i in range(((bdim - 2) // bs) + 1):
-            inp1 = x[i * bs : i * bs + bs]
-            inpls = tf.split(inp1, 2, -2)
-            inp1 = tf.concat(inpls, 0)
-            outls.append(model(inp1, training=False))
-
-        return np.concatenate(outls, 0)
-
-    def get_noise_interp(self):
-        noiseg = tf.random.normal([1, 64], dtype=tf.float32)
-
-        noisel = tf.concat([tf.random.normal([1, 64], dtype=tf.float32), noiseg], -1)
-        noisec = tf.concat([tf.random.normal([1, 64], dtype=tf.float32), noiseg], -1)
-        noiser = tf.concat([tf.random.normal([1, 64], dtype=tf.float32), noiseg], -1)
-
-        rl = tf.linspace(noisel, noisec, self.args.latlen + 1, axis=-2)[:, :-1, :]
-        rr = tf.linspace(noisec, noiser, self.args.latlen + 1, axis=-2)
-
-        noisetot = tf.concat([rl, rr], -2)
-        return tf.image.random_crop(noisetot, [1, self.args.latlen, 64 + 64])
-
-    def generate_example_stereo(self, models_ls):
-        (
-            critic,
-            gen,
-            enc,
-            dec,
-            enc2,
-            dec2,
-            critic_rec,
-            gen_ema,
-            [opt_dec, opt_disc],
-        ) = models_ls
-        abb = gen_ema(self.get_noise_interp(), training=False)
-        abbls = tf.split(abb, abb.shape[-2] // 16, -2)
-        abb = tf.concat(abbls, 0)
-
-        chls = []
-        for channel in range(2):
-
-            ab = self.distribute_dec2(
-                abb[
-                    :,
-                    :,
-                    :,
-                    channel * self.args.latdepth : channel * self.args.latdepth + self.args.latdepth,
-                ],
-                dec2,
-            )
-            abls = tf.split(ab, ab.shape[-2] // self.args.shape, -2)
-            ab = tf.concat(abls, 0)
-            ab_m, ab_p = self.distribute_dec(ab, dec)
-            wv = self.conc_tog_specphase(ab_m, ab_p)
-            chls.append(wv)
-
-        return np.stack(chls, -1)
-
-    # Save in training loop
-    def save_test_image_full(self, path, models_ls=None):
-
-        abwv = self.generate_example_stereo(models_ls)
-        abwv2 = self.generate_example_stereo(models_ls)
-        abwv3 = self.generate_example_stereo(models_ls)
-        abwv4 = self.generate_example_stereo(models_ls)
-
-        # IPython.display.display(
-        #     IPython.display.Audio(np.squeeze(np.transpose(abwv)), rate=self.args.sr)
-        # )
-        # IPython.display.display(
-        #     IPython.display.Audio(np.squeeze(np.transpose(abwv2)), rate=self.args.sr)
-        # )
-        # IPython.display.display(
-        #     IPython.display.Audio(np.squeeze(np.transpose(abwv3)), rate=self.args.sr)
-        # )
-        # IPython.display.display(
-        #     IPython.display.Audio(np.squeeze(np.transpose(abwv4)), rate=self.args.sr)
-        # )
-
-        write_wav(f"{path}/out1.wav", self.args.sr, np.squeeze(abwv))
-        write_wav(f"{path}/out2.wav", self.args.sr, np.squeeze(abwv2))
-        write_wav(f"{path}/out3.wav", self.args.sr, np.squeeze(abwv3))
-        write_wav(f"{path}/out4.wav", self.args.sr, np.squeeze(abwv4))
-
-        fig, axs = plt.subplots(nrows=4, ncols=1, figsize=(20, 20))
-        axs[0].imshow(
-            np.flip(
-                np.array(
-                    tf.transpose(
-                        self.wv2spec_hop((abwv[:, 0] + abwv[:, 1]) / 2.0, 80.0, 256),
-                        [1, 0],
-                    )
-                ),
-                -2,
-            ),
-            cmap=None,
-        )
-        axs[0].axis("off")
-        axs[0].set_title("Generated1")
-        axs[1].imshow(
-            np.flip(
-                np.array(
-                    tf.transpose(
-                        self.wv2spec_hop((abwv2[:, 0] + abwv2[:, 1]) / 2.0, 80.0, 256),
-                        [1, 0],
-                    )
-                ),
-                -2,
-            ),
-            cmap=None,
-        )
-        axs[1].axis("off")
-        axs[1].set_title("Generated2")
-        axs[2].imshow(
-            np.flip(
-                np.array(
-                    tf.transpose(
-                        self.wv2spec_hop((abwv3[:, 0] + abwv3[:, 1]) / 2.0, 80.0, 256),
-                        [1, 0],
-                    )
-                ),
-                -2,
-            ),
-            cmap=None,
-        )
-        axs[2].axis("off")
-        axs[2].set_title("Generated3")
-        axs[3].imshow(
-            np.flip(
-                np.array(
-                    tf.transpose(
-                        self.wv2spec_hop((abwv4[:, 0] + abwv4[:, 1]) / 2.0, 80.0, 256),
-                        [1, 0],
-                    )
-                ),
-                -2,
-            ),
-            cmap=None,
-        )
-        axs[3].axis("off")
-        axs[3].set_title("Generated4")
-        # plt.show()
-        plt.savefig(f"{path}/output.png")
-
-    def save_end(
-        self,
-        epoch,
-        gloss,
-        closs,
-        mloss,
-        models_ls=None,
-        n_save=3,
-        save_path="checkpoints",
-    ):
-        (
-            critic,
-            gen,
-            enc,
-            dec,
-            enc2,
-            dec2,
-            critic_rec,
-            gen_ema,
-            [opt_dec, opt_disc],
-        ) = models_ls
-        if epoch % n_save == 0:
-            print("Saving...")
-            path = f"{save_path}/MUSIKA_iterations-{((epoch+1)*self.args.totsamples)//(self.args.bs*1000)}k_losses-{str(gloss)[:9]}-{str(closs)[:9]}-{str(mloss)[:9]}"
-            os.mkdir(path)
-            critic.save_weights(path + "/critic.h5")
-            critic_rec.save_weights(path + "/critic_rec.h5")
-            gen.save_weights(path + "/gen.h5")
-            gen_ema.save_weights(path + "/gen_ema.h5")
-            # enc.save_weights(path + "/enc.h5")
-            # dec.save_weights(path + "/dec.h5")
-            # enc2.save_weights(path + "/enc2.h5")
-            # dec2.save_weights(path + "/dec2.h5")
-            np.save(path + "/opt_dec.npy", opt_dec.get_weights())
-            np.save(path + "/opt_disc.npy", opt_disc.get_weights())
-            self.save_test_image_full(path, models_ls=models_ls)
-
-    def truncated_normal(self, shape, bound=2.0, dtype=tf.float32):
-        seed1, seed2 = random_seed.get_seed(tf.random.uniform((), tf.int32.min, tf.int32.max, dtype=tf.int32))
-        return tf.random.stateless_parameterized_truncated_normal(shape, [seed1, seed2], 0.0, 1.0, -bound, bound)
-
-    def distribute_gen(self, x, model, bs=64):
-        outls = []
-        bdim = x.shape[0]
-        if bdim == 1:
-            bdim = 2
-        for i in range(((bdim - 2) // bs) + 1):
-            outls.append(model(x[i * bs : i * bs + bs], training=False))
-        return np.concatenate(outls, 0)
-
-    def get_noise_interp_multi(self, fac=1, var=2.0):
-        noiseg = self.truncated_normal([1, 64], var, dtype=tf.float32)
-
-        if var < 1.75:
-            var = 1.75
-
-        noisels = [
-            tf.concat([self.truncated_normal([1, 64], var, dtype=tf.float32), noiseg], -1) for i in range(2 + (fac - 1))
-        ]
-        rls = [
-            tf.linspace(noisels[k], noisels[k + 1], self.args.latlen + 1, axis=-2)[:, :-1, :]
-            for k in range(len(noisels) - 1)
-        ]
-        return tf.concat(rls, 0)
-
-    def stfunc(self, z, var, models_ls):
-
-        critic, gen, enc, dec, enc2, dec2, critic_rec, gen_ema, [opt_dec, opt_disc] = models_ls
-
-        var = 0.01 + (3.5 * (var / 100.0))
-
-        if z == 0:
-            fac = 1
-        elif z == 1:
-            fac = 5
-        else:
-            fac = 10
-
-        bef = time.time()
-        ab = self.distribute_gen(self.get_noise_interp_multi(fac, var), gen_ema)
-        abls = tf.split(ab, ab.shape[0], 0)
-        ab = tf.concat(abls, -2)
-        abls = tf.split(ab, ab.shape[-2] // 16, -2)
-        abi = tf.concat(abls, 0)
-
-        chls = []
-        for channel in range(2):
-
-            ab = self.distribute_dec2(
-                abi[:, :, :, channel * self.args.latdepth : channel * self.args.latdepth + self.args.latdepth],
-                dec2,
-                bs=128,
-            )
-            abls = tf.split(ab, ab.shape[-2] // self.args.shape, -2)
-            ab = tf.concat(abls, 0)
-
-            ab_m, ab_p = self.distribute_dec(ab, dec, bs=128)
-            abwv = self.conc_tog_specphase(ab_m, ab_p)
-            chls.append(abwv)
-
-        print(
-            f"Time for complete generation pipeline: {time.time()-bef} s        {int(np.round((fac*23.)/(time.time()-bef)))}x faster than Real Time!"
-        )
-
-        abwvc = np.clip(np.squeeze(np.stack(chls, -1)), -1.0, 1.0)
-        spec = np.flip(
-            np.array(
-                tf.transpose(
-                    self.wv2spec_hop((abwvc[: 23 * self.args.sr, 0] + abwvc[: 23 * self.args.sr, 1]) / 2.0, 80.0, 256),
-                    [1, 0],
-                )
-            ),
-            -2,
-        )
-
-        return (
-            spec,
-            (self.args.sr, np.int16(abwvc * 32767.0)),
-        )
-
-    def render_gradio(self, models_ls, train=True):
-        article_text = "Original work by Marco Pasini ([Twitter](https://twitter.com/marco_ppasini)) at Johannes Kepler Universität Linz. Supervised by Jan Schlüter."
-
-        def gradio_func(x, y):
-            return self.stfunc(x, y, models_ls)
-
-        iface = gr.Interface(
-            fn=gradio_func,
-            inputs=[
-                gr.inputs.Radio(
-                    choices=["23s", "1m 58s", "3m 57s"], type="index", default="1m 58s", label="Generated Music Length",
-                ),
-                gr.inputs.Slider(
-                    minimum=0,
-                    maximum=100,
-                    step=1,
-                    default=25,
-                    label="Stability[left]/Variety[right] Tradeoff (Truncation Trick)",
-                ),
-            ],
-            outputs=[
-                gr.outputs.Image(label="Log-MelSpectrogram of Generated Audio (first 23 s)"),
-                gr.outputs.Audio(type="numpy", label="Generated Audio"),
-            ],
-            title="musika!",
-            description="The generator used for this demo is updated *after* every epoch!",
-            article=article_text,
-        )
-
-        print("--------------------------------")
-        print("--------------------------------")
-        print("--------------------------------")
-        print("--------------------------------")
-        print("--------------------------------")
-        print("CLICK ON LINK BELOW TO OPEN GRADIO INTERFACE")
-        if train:
-            iface.launch(prevent_thread_lock=True)
-        else:
-            iface.launch()
-        # iface.launch(share=True, enable_queue=True)
-        print("--------------------------------")
-        print("--------------------------------")
-        print("--------------------------------")
-        print("--------------------------------")
-        print("--------------------------------")
diff --git a/spaces/gbharti/stable-riffusion-walk/app.py b/spaces/gbharti/stable-riffusion-walk/app.py
deleted file mode 100644
index 5873b68d07362469d0091fbd59512734d00b595b..0000000000000000000000000000000000000000
--- a/spaces/gbharti/stable-riffusion-walk/app.py
+++ /dev/null
@@ -1,153 +0,0 @@
-import streamlit as st
-import requests
-import time
-
-API_KEY = str(st.secrets['SIEVE_API_KEY'])
-
-st.title("Stable Riffusion Walk")
-st.markdown('Built by [Gaurang Bharti](https://gaurangbharti.netlify.app) Powered by [Sieve](https://www.sievedata.com)')
-st.markdown("Stable Riffusion Walk combines [Stable Diffusion Walk](https://github.com/nateraw/stable-diffusion-videos) and [Stable Riffusion](https://www.riffusion.com) to generate music videos through Stable Diffusion!")
-st.caption("You can find some examples at the bottom. Generations can take anywhere from 5 mins to 10+ mins depending on demand. Please be patient :)")
-
-def check_status(url, interval, job_id):
-    finished = False
-    headers = {
-        'X-API-Key': API_KEY
-        }
-    while True:
-        response = requests.get(url, headers=headers)
-        assert response.json()['data'], print(response.json())
-        data = response.json()['data']
-        
-        for job in data:
-            if job['id'] == job_id:
-            
-                if job['status'] == 'processing':
-              
-                    time.sleep(interval)
-                if job['status'] == 'finished':
-                   
-                    finished = True
-                    return finished
-                if job['status'] == 'error':
-                    st.error("An error occured, please try again. If the error persists, please inform the developers.")
-                    print(job['error'])
-                    return job['error']
-
-def fetch_video(job_id):
-    url = f"https://mango.sievedata.com/v1/jobs/{job_id}"
-    headers = {
-        'Content-Type': 'application/json',
-        'X-API-Key': API_KEY
-    }
-    response = requests.get(url, headers = headers)
-    data = response.json()
-    url = data['data'][0]['url']
-    return url
-
-def send_data(audio_text, video_text, duration, name):
-    url = "https://mango.sievedata.com/v1/push"
-    
-    headers = {
-        'Content-Type': 'application/json',
-        'X-API-Key': API_KEY
-    } 
-    data = {
-        "workflow_name": name,
-        "inputs": {
-            "video_prompt": video_text,
-            "audio_prompt": audio_text,
-            "duration": duration
-            }
-        }
-    try:
-        response = requests.post(url, headers=headers, json=data)
-        if ('id') not in response.json():
-            st.error(response.json()['description'])
-            return False
-        return (response.json()['id'])
-    except Exception as e:
-        return (f'An error occurred: {e}')
-    
-#Streamlit App
-st.subheader("Music")
-audio_in = st.text_input('Try your favorite styles, instruments like saxophone or violin, modifiers like arabic or jamaican, genres like jazz or gospel, sounds like church bells or rain, or any combination', placeholder="Enter prompt for music here", max_chars=100)
-
-with st.expander("Music examples"):
-    st.write("Alarm Clock")
-    st.audio("Audios/alarm_clock.mp3")
-    st.write("Class Rock Mellow Gold Progressive")
-    st.audio("Audios/classic_rock_mellow_gold_progressive.mp3")
-    st.write("Guitar Riff")
-    st.audio("Audios/guitar_riff.mp3")
-    st.write("Reggae Fusion")
-    st.audio("Audios/reggae_fusion.mp3")
-    st.write("Rock & Roll")
-    st.audio("Audios/rock_and_roll.mp3")
-
-music_options = ["None", "Alarm Clock", "Classic Rock Mellow Gold Progressive", "Guitar Riff", "Reggae Fusion", "Rock & Roll"]
-music_example = st.radio("Or try something from the examples! (Set to None if you're using a custom prompt)", options=music_options)
-
-if music_example != "None":
-    audio_in = music_example
-
-st.subheader("Video")
-
-video_in = st.text_input("Describe the visuals of the video! You can try any Stable Diffusion or Midjourney prompts. Some examples below!", placeholder="Enter prompt for video here")
-
-st.caption("Note: More complex prompts will take longer")
-with st.expander("Music Video examples"):
-        col1, col2 = st.columns([1, 1])
-        with col1:
-            st.write("Audio prompt: Hans Zimmer")
-            st.write("Video prompt: Skull demon sorcerer Concept art portrait by Terese Nielsen, Brom, Miho Hirano, hyperdetailed intricately detailed gothic art trending on Artstation triadic colors Unreal Engine 5 detailed matte painting, Dark Black Velvet Background, art nouveau, deep color, fantastical, intricate detail, splash screen, complementary colors, fantasy concept art, gothic deviantart masterpiece, Vivid colors, 16k, UHD, HDR10, (Masterpiece:1. 5), Absurdres, (best quality:1. 5) Model: ReV Animated v1. 21")
-        with col2:
-            st.video("https://github.com/gaurangbharti1/riffusion-walk-streamlit/raw/main/Videos/skull_demon.mp4")
-        col3, col4 = st.columns([1, 1])
-        with col3:
-            st.write("Audio prompt: Relaxing, Floating, Waterfall")
-            st.write("Video prompt: floating island in the clouds, nice weather, trees, a wooden house, waterfall")
-        with col4:
-            st.video("https://github.com/gaurangbharti1/riffusion-walk-streamlit/raw/main/Videos/floating_island_waterfall.mp4")
-        col5, col6 = st.columns([1, 1])
-        with col5:
-            st.write("Audio prompt: New Orleans Blues")
-            st.write("Video prompt: pixel video game with fighting dragons, high quality")
-        with col6:
-            st.video("https://github.com/gaurangbharti1/riffusion-walk-streamlit/raw/main/Videos/pixel_fighting_game_new_orleans_blues.mp4")
-
-input_duration = st.slider("Duration (seconds)", 4, 7, 5)
-
-workflow_name = "openjourney-test"
-
-# Experimental
-
-#workflow_names = ["stable-riffusion-walk", "openjourney-test"]
-
-# st.write("Pick one of these models for the video")
-# options = ["OpenJourney", "Stable Diffusion v1.5"]
-# col1, col2 = st.columns([1, 1])
-# with col1:
-#     selected_option = st.radio("Select an option", options)
-# with col2:
-#     if selected_option == "Stable Diffusion v1.5":
-#         st.image("sd_21_2.jpg", width=300)
-#         workflow_name = "stable-riffusion-walk"
-#     elif selected_option == "Openjourney":
-#         st.image("openjourney_1.png", width=300)
-#         workflow_name = "openjourney-test"
-    
-button1 = st.button("Diffuse!")
-
-if st.session_state.get('button') != True:
-    st.session_state['button'] = button1
-
-if st.session_state['button'] == True:
-
-    job = send_data(audio_in, video_in, input_duration, workflow_name)
-    if job:
-        with st.spinner("Processing Video"):
-            status = check_status('https://mango.sievedata.com/v1/jobs', 5, str(job))
-            if status == True:
-                video = fetch_video(job)
-                st.video(video)
\ No newline at end of file
diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/backbones/__init__.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/backbones/__init__.py
deleted file mode 100644
index 8339983905fb5d20bae42ba6f76fea75d278b1aa..0000000000000000000000000000000000000000
--- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/backbones/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from .cgnet import CGNet
-# from .fast_scnn import FastSCNN
-from .hrnet import HRNet
-from .mobilenet_v2 import MobileNetV2
-from .mobilenet_v3 import MobileNetV3
-from .resnest import ResNeSt
-from .resnet import ResNet, ResNetV1c, ResNetV1d
-from .resnext import ResNeXt
-from .unet import UNet
-from .vit import VisionTransformer
-from .uniformer import UniFormer
-
-__all__ = [
-    'ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet',
-    'ResNeSt', 'MobileNetV2', 'UNet', 'CGNet', 'MobileNetV3',
-    'VisionTransformer', 'UniFormer'
-]
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Borgias Season 1 1080p Torrent Experience the Power Passion and Corruption of the Renaissance.md b/spaces/gotiQspiryo/whisper-ui/examples/Borgias Season 1 1080p Torrent Experience the Power Passion and Corruption of the Renaissance.md
deleted file mode 100644
index d1a01c6ad3c0faca06fcbf268b60ce7e80e45b2a..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Borgias Season 1 1080p Torrent Experience the Power Passion and Corruption of the Renaissance.md	
+++ /dev/null
@@ -1,5 +0,0 @@
-
-<p>"Download this fast torrent at the maximum of your internet connection speed (you can see the image proof by clicking on image link): img845.imageshack.us/img845/2518/theborgiashdtorrspeedpr.jpg. I have an internet connection speed of 10 Mbit/s and it is downloading at its maximum of 1.1 MB/s. If your connection is faster, you will download it at its maximum speed.</p>
-<h2>Borgias Season 1 1080p Torrent</h2><br /><p><b><b>Download</b> &#10001; <a href="https://urlgoal.com/2uyNd2">https://urlgoal.com/2uyNd2</a></b></p><br /><br /> aaccfb2cb3<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Calculadora De Codigos Para Liberar Nokia 1616 WORK.md b/spaces/gotiQspiryo/whisper-ui/examples/Calculadora De Codigos Para Liberar Nokia 1616 WORK.md
deleted file mode 100644
index 18281696f347ccd78bdf4c18b315c55fad99f8a8..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Calculadora De Codigos Para Liberar Nokia 1616 WORK.md	
+++ /dev/null
@@ -1,13 +0,0 @@
-<h2>calculadora de codigos para liberar nokia 1616</h2><br /><p><b><b>Download Zip</b> >>> <a href="https://urlgoal.com/2uyNwy">https://urlgoal.com/2uyNwy</a></b></p><br /><br />
-
-calculadora de codigos para liberar nokia 1616 Alice and other games for PC: Summary.
-This is a collection of the games and apps that I have recently downloaded.
-The games in this list include all games and apps from the original Sega Genesis games, and many more.
-All games are downloaded from the internet.
-If you have problems.
-Sega Dendy Emu Downloads Emu-land.net.
-Download and play Sega emu games, free.
-The official emu-land.net site for Emu-land.net, download games for Sega, Dendy, NES, SNES, GB, GBC, SMD and others. 8a78ff9644<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Crack Cambam Plus V0.9.846.md b/spaces/gotiQspiryo/whisper-ui/examples/Crack Cambam Plus V0.9.846.md
deleted file mode 100644
index 597f638703656daf2fa7cdce707e4f7f22d9b0d0..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Crack Cambam Plus V0.9.846.md	
+++ /dev/null
@@ -1,11 +0,0 @@
-<h2>Crack cambam plus v0.9.846</h2><br /><p><b><b>Download Zip</b> &#8250;&#8250;&#8250;&#8250;&#8250; <a href="https://urlgoal.com/2uyM5b">https://urlgoal.com/2uyM5b</a></b></p><br /><br />
-<br />
-cambam plus v0.9.846 (c) Copyright 2005 Joo.
-Takes some effort to get it up and running, but it is a great program.
-The program is a tool for modifying your tweaks, making your tweaks more flexible, and making them more secure.
-You can generate your own tweaks with the cambam modifier.
-The program is optimized to run on a large host of machines and can be installed as a service.
-There are several tweak options: - The tweak options that can be used 8a78ff9644<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/MaxSea V12.6.4.1 With CM93v3 Issue505 Professional Handylogo Internetsp.md b/spaces/gotiQspiryo/whisper-ui/examples/MaxSea V12.6.4.1 With CM93v3 Issue505 Professional Handylogo Internetsp.md
deleted file mode 100644
index 3d79660f49d7910e46c2352bdd6fd1369a778a8e..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/MaxSea V12.6.4.1 With CM93v3 Issue505 Professional Handylogo Internetsp.md	
+++ /dev/null
@@ -1,18 +0,0 @@
-<br />
-<h1>MaxSea V12.6.4.1: A Professional Navigation Software with CM93v3 Issue505 Charts and Handylogo Internetsp Features</h1>
-<p>MaxSea V12.6.4.1 is a powerful and user-friendly navigation software that integrates the latest CM93v3 Issue505 charts, which cover the entire world and are updated monthly. MaxSea V12.6.4.1 also offers handylogo internetsp features, which allow you to access weather forecasts, tide information, AIS data, and other online services from your mobile device.</p>
-<h2>MaxSea V12.6.4.1 With CM93v3 Issue505 Professional handylogo internetsp</h2><br /><p><b><b>Download Zip</b> &#10001; <a href="https://urlgoal.com/2uyMiE">https://urlgoal.com/2uyMiE</a></b></p><br /><br />
-<p>With MaxSea V12.6.4.1, you can plan your routes, monitor your position, track your speed and course, display various instruments and alarms, and customize your map display with layers, colors, symbols, and fonts. You can also import and export waypoints, routes, tracks, and marks in various formats, such as GPX, KML, NMEA, and CSV.</p>
-<p>MaxSea V12.6.4.1 is compatible with Windows XP, Vista, 7, 8, and 10 operating systems, and requires a minimum of 2 GB of RAM and 10 GB of free disk space. You can download MaxSea V12.6.4.1 from the official website or purchase it from authorized dealers.</p>
-<p>If you are looking for a professional navigation software that combines high-quality charts and online services with an easy-to-use interface and a reliable performance, MaxSea V12.6.4.1 is the perfect choice for you.</p>
-
-<p>One of the main advantages of MaxSea V12.6.4.1 is its integration with CM93v3 Issue505 charts, which are based on the official S-57 vector charts from the International Hydrographic Organization (IHO). These charts provide accurate and up-to-date information on coastlines, depths, buoys, lights, and other navigational aids. They also include bathymetric data, which shows the shape and contour of the seabed. You can zoom in and out, pan, rotate, and measure distances and bearings on the charts with ease.</p>
-<p>Another benefit of MaxSea V12.6.4.1 is its handylogo internetsp features, which enable you to connect your mobile device to your PC via Wi-Fi or Bluetooth and access various online services from anywhere. You can check the weather forecasts from different sources, such as NOAA, Meteo France, or Windy.com, and overlay them on the charts. You can also view the tide information from the World Tidal Database and the current information from the OSCAR database. Moreover, you can receive AIS data from nearby vessels and display their positions, names, speeds, and courses on the charts.</p>
-<p></p>
-<p>MaxSea V12.6.4.1 is not only a navigation software, but also a tool for learning and entertainment. You can use MaxSea V12.6.4.1 to simulate your navigation scenarios and test your skills and knowledge. You can also use MaxSea V12.6.4.1 to play games, such as treasure hunt, fishing contest, or sailing race, with your friends or family.</p>
-
-<p>MaxSea V12.6.4.1 is designed to meet the needs and expectations of various types of users, such as sailors, fishermen, cruisers, racers, and explorers. You can customize MaxSea V12.6.4.1 according to your preferences and requirements, by choosing from different modules, options, and settings. You can also add external devices, such as GPS receivers, radars, sonars, autopilots, and cameras, to enhance your navigation experience.</p>
-<p>MaxSea V12.6.4.1 is not only a software, but also a community. You can share your routes, tracks, marks, and photos with other MaxSea users through the MaxSea Community website or the MaxSea Facebook page. You can also join the MaxSea Club and enjoy exclusive benefits, such as discounts, updates, support, and newsletters.</p>
-<p>MaxSea V12.6.4.1 is the ultimate navigation software that combines the best of technology and innovation with the best of human experience and passion. Whether you are a beginner or an expert, a hobbyist or a professional, a solo traveler or a group leader, MaxSea V12.6.4.1 will make your navigation easier, safer, and more enjoyable.</p> d5da3c52bf<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/gradio/HuBERT/examples/wav2vec/__init__.py b/spaces/gradio/HuBERT/examples/wav2vec/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/gradio/HuBERT/fairseq/modules/sinusoidal_positional_embedding.py b/spaces/gradio/HuBERT/fairseq/modules/sinusoidal_positional_embedding.py
deleted file mode 100644
index 4793ecfb522d0729fc2d24a3ddf0c6a774d67773..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/fairseq/modules/sinusoidal_positional_embedding.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-from typing import Any, Optional
-
-import torch
-import torch.onnx.operators
-from fairseq import utils
-from torch import Tensor, nn
-
-
-class SinusoidalPositionalEmbedding(nn.Module):
-    """This module produces sinusoidal positional embeddings of any length.
-
-    Padding symbols are ignored.
-    """
-
-    def __init__(self, embedding_dim, padding_idx, init_size=1024):
-        super().__init__()
-        self.embedding_dim = embedding_dim
-        self.padding_idx = padding_idx if padding_idx is not None else 0
-        self.weights = SinusoidalPositionalEmbedding.get_embedding(
-            init_size, embedding_dim, padding_idx
-        )
-        self.onnx_trace = False
-        self.register_buffer("_float_tensor", torch.FloatTensor(1))
-        self.max_positions = int(1e5)
-
-    def prepare_for_onnx_export_(self):
-        self.onnx_trace = True
-
-    @staticmethod
-    def get_embedding(
-        num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
-    ):
-        """Build sinusoidal embeddings.
-
-        This matches the implementation in tensor2tensor, but differs slightly
-        from the description in Section 3.5 of "Attention Is All You Need".
-        """
-        half_dim = embedding_dim // 2
-        emb = math.log(10000) / (half_dim - 1)
-        emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
-        emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
-            1
-        ) * emb.unsqueeze(0)
-        emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
-            num_embeddings, -1
-        )
-        if embedding_dim % 2 == 1:
-            # zero pad
-            emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
-        if padding_idx is not None:
-            emb[padding_idx, :] = 0
-        return emb
-
-    def forward(
-        self,
-        input,
-        incremental_state: Optional[Any] = None,
-        timestep: Optional[Tensor] = None,
-        positions: Optional[Any] = None,
-    ):
-        """Input is expected to be of size [bsz x seqlen]."""
-        bspair = torch.onnx.operators.shape_as_tensor(input)
-        bsz, seq_len = bspair[0], bspair[1]
-        max_pos = self.padding_idx + 1 + seq_len
-        if self.weights is None or max_pos > self.weights.size(0):
-            # recompute/expand embeddings if needed
-            self.weights = SinusoidalPositionalEmbedding.get_embedding(
-                max_pos, self.embedding_dim, self.padding_idx
-            )
-        self.weights = self.weights.to(self._float_tensor)
-
-        if incremental_state is not None:
-            # positions is the same for every token when decoding a single step
-            pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
-            if self.onnx_trace:
-                return (
-                    self.weights.index_select(index=self.padding_idx + pos, dim=0)
-                    .unsqueeze(1)
-                    .repeat(bsz, 1, 1)
-                )
-            return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
-
-        positions = utils.make_positions(
-            input, self.padding_idx, onnx_trace=self.onnx_trace
-        )
-        if self.onnx_trace:
-            flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
-            embedding_shape = torch.cat(
-                (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))
-            )
-            embeddings = torch.onnx.operators.reshape_from_tensor_shape(
-                flat_embeddings, embedding_shape
-            )
-            return embeddings
-        return (
-            self.weights.index_select(0, positions.view(-1))
-            .view(bsz, seq_len, -1)
-            .detach()
-        )
diff --git a/spaces/gradio/HuBERT/fairseq/tasks/sentence_ranking.py b/spaces/gradio/HuBERT/fairseq/tasks/sentence_ranking.py
deleted file mode 100644
index bed44f34e5f8e506b6ae7ba30ddaa661bf4a7522..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/fairseq/tasks/sentence_ranking.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-import os
-
-import numpy as np
-from fairseq import utils
-from fairseq.data import (
-    ConcatSentencesDataset,
-    Dictionary,
-    IdDataset,
-    NestedDictionaryDataset,
-    NumelDataset,
-    NumSamplesDataset,
-    PrependTokenDataset,
-    RawLabelDataset,
-    RightPadDataset,
-    SortDataset,
-    TruncateDataset,
-    data_utils,
-)
-from fairseq.data.shorten_dataset import maybe_shorten_dataset
-from fairseq.tasks import LegacyFairseqTask, register_task
-
-
-logger = logging.getLogger(__name__)
-
-
-@register_task("sentence_ranking")
-class SentenceRankingTask(LegacyFairseqTask):
-    """
-    Ranking task on multiple sentences.
-
-    Args:
-        dictionary (Dictionary): the dictionary for the input of the task
-    """
-
-    @staticmethod
-    def add_args(parser):
-        """Add task-specific arguments to the parser."""
-        parser.add_argument("data", metavar="FILE", help="file prefix for data")
-        parser.add_argument(
-            "--num-classes", type=int, help="number of sentences to be ranked"
-        )
-        parser.add_argument(
-            "--init-token",
-            type=int,
-            help="add token at the beginning of each batch item",
-        )
-        parser.add_argument(
-            "--separator-token", type=int, help="add separator token between inputs"
-        )
-        parser.add_argument("--no-shuffle", action="store_true")
-        parser.add_argument(
-            "--shorten-method",
-            default="none",
-            choices=["none", "truncate", "random_crop"],
-            help="if not none, shorten sequences that exceed --tokens-per-sample",
-        )
-        parser.add_argument(
-            "--shorten-data-split-list",
-            default="",
-            help="comma-separated list of dataset splits to apply shortening to, "
-            'e.g., "train,valid" (default: all dataset splits)',
-        )
-        parser.add_argument(
-            "--max-option-length", type=int, help="max length for each option"
-        )
-
-    def __init__(self, args, dictionary):
-        super().__init__(args)
-        self.dictionary = dictionary
-
-    @classmethod
-    def load_dictionary(cls, args, filename, source=True):
-        """Load the dictionary from the filename
-
-        Args:
-            filename (str): the filename
-        """
-        dictionary = Dictionary.load(filename)
-        dictionary.add_symbol("<mask>")
-        return dictionary
-
-    @classmethod
-    def setup_task(cls, args, **kwargs):
-        assert (
-            args.criterion == "sentence_ranking"
-        ), "Must set --criterion=sentence_ranking"
-
-        # load data dictionary
-        data_dict = cls.load_dictionary(
-            args,
-            os.path.join(args.data, "input0", "dict.txt"),
-            source=True,
-        )
-        logger.info("[input] dictionary: {} types".format(len(data_dict)))
-        return SentenceRankingTask(args, data_dict)
-
-    def load_dataset(self, split, combine=False, **kwargs):
-        """Load a given dataset split (e.g., train, valid, test)."""
-
-        def get_path(type, split):
-            return os.path.join(self.args.data, type, split)
-
-        def make_dataset(type, dictionary):
-            split_path = get_path(type, split)
-
-            dataset = data_utils.load_indexed_dataset(
-                split_path,
-                self.source_dictionary,
-                self.args.dataset_impl,
-                combine=combine,
-            )
-            return dataset
-
-        input0 = make_dataset("input0", self.source_dictionary)
-        input_options = [
-            make_dataset("input{idx}".format(idx=idx + 1), self.source_dictionary)
-            for idx in range(self.args.num_classes)
-        ]
-
-        if self.args.separator_token is not None:
-            input0 = PrependTokenDataset(input0, self.args.separator_token)
-
-        src_tokens = []
-        for input_option in input_options:
-            if self.args.init_token is not None:
-                input_option = PrependTokenDataset(input_option, self.args.init_token)
-            if self.args.max_option_length is not None:
-                input_option = TruncateDataset(
-                    input_option, self.args.max_option_length
-                )
-            src_token = ConcatSentencesDataset(input_option, input0)
-            src_token = maybe_shorten_dataset(
-                src_token,
-                split,
-                self.args.shorten_data_split_list,
-                self.args.shorten_method,
-                self.args.max_positions,
-                self.args.seed,
-            )
-            src_tokens.append(src_token)
-
-        with data_utils.numpy_seed(self.args.seed):
-            shuffle = np.random.permutation(len(src_tokens[0]))
-
-        dataset = {
-            "id": IdDataset(),
-            "nsentences": NumSamplesDataset(),
-            "ntokens": NumelDataset(src_tokens[0], reduce=True),
-        }
-
-        for src_token_idx in range(len(src_tokens)):
-            dataset.update(
-                {
-                    "net_input{idx}".format(idx=src_token_idx + 1): {
-                        "src_tokens": RightPadDataset(
-                            src_tokens[src_token_idx],
-                            pad_idx=self.source_dictionary.pad(),
-                        ),
-                        "src_lengths": NumelDataset(
-                            src_tokens[src_token_idx], reduce=False
-                        ),
-                    }
-                }
-            )
-
-        label_path = "{}.label".format(get_path("label", split))
-        if os.path.exists(label_path):
-            with open(label_path) as h:
-                dataset.update(
-                    target=RawLabelDataset([int(x.strip()) for x in h.readlines()])
-                )
-
-        nested_dataset = NestedDictionaryDataset(
-            dataset,
-            sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
-        )
-
-        if self.args.no_shuffle:
-            dataset = nested_dataset
-        else:
-            dataset = SortDataset(
-                nested_dataset,
-                # shuffle
-                sort_order=[shuffle],
-            )
-
-        logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset)))
-
-        self.datasets[split] = dataset
-        return self.datasets[split]
-
-    def build_model(self, args):
-        from fairseq import models
-
-        model = models.build_model(args, self)
-
-        model.register_classification_head(
-            getattr(args, "ranking_head_name", "sentence_classification_head"),
-            num_classes=1,
-        )
-
-        return model
-
-    def max_positions(self):
-        return self.args.max_positions
-
-    @property
-    def source_dictionary(self):
-        return self.dictionary
-
-    @property
-    def target_dictionary(self):
-        return self.dictionary
diff --git a/spaces/gradio/HuBERT/tests/test_utils.py b/spaces/gradio/HuBERT/tests/test_utils.py
deleted file mode 100644
index 79195903e0f34372a24fa50312a6e00170c14471..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/tests/test_utils.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import unittest
-
-import torch
-from fairseq import utils
-
-
-class TestUtils(unittest.TestCase):
-    def test_convert_padding_direction(self):
-        pad = 1
-        left_pad = torch.LongTensor(
-            [
-                [2, 3, 4, 5, 6],
-                [1, 7, 8, 9, 10],
-                [1, 1, 1, 11, 12],
-            ]
-        )
-        right_pad = torch.LongTensor(
-            [
-                [2, 3, 4, 5, 6],
-                [7, 8, 9, 10, 1],
-                [11, 12, 1, 1, 1],
-            ]
-        )
-
-        self.assertAlmostEqual(
-            right_pad,
-            utils.convert_padding_direction(
-                left_pad,
-                pad,
-                left_to_right=True,
-            ),
-        )
-        self.assertAlmostEqual(
-            left_pad,
-            utils.convert_padding_direction(
-                right_pad,
-                pad,
-                right_to_left=True,
-            ),
-        )
-
-    def test_make_positions(self):
-        pad = 1
-        left_pad_input = torch.LongTensor(
-            [
-                [9, 9, 9, 9, 9],
-                [1, 9, 9, 9, 9],
-                [1, 1, 1, 9, 9],
-            ]
-        )
-        left_pad_output = torch.LongTensor(
-            [
-                [2, 3, 4, 5, 6],
-                [1, 2, 3, 4, 5],
-                [1, 1, 1, 2, 3],
-            ]
-        )
-        right_pad_input = torch.LongTensor(
-            [
-                [9, 9, 9, 9, 9],
-                [9, 9, 9, 9, 1],
-                [9, 9, 1, 1, 1],
-            ]
-        )
-        right_pad_output = torch.LongTensor(
-            [
-                [2, 3, 4, 5, 6],
-                [2, 3, 4, 5, 1],
-                [2, 3, 1, 1, 1],
-            ]
-        )
-
-        self.assertAlmostEqual(
-            left_pad_output,
-            utils.make_positions(left_pad_input, pad),
-        )
-        self.assertAlmostEqual(
-            right_pad_output,
-            utils.make_positions(right_pad_input, pad),
-        )
-
-    def test_clip_grad_norm_(self):
-        params = torch.nn.Parameter(torch.zeros(5)).requires_grad_(False)
-        grad_norm = utils.clip_grad_norm_(params, 1.0)
-        self.assertTrue(torch.is_tensor(grad_norm))
-        self.assertEqual(grad_norm, 0.0)
-
-        params = [torch.nn.Parameter(torch.zeros(5)) for i in range(3)]
-        for p in params:
-            p.grad = torch.full((5,), fill_value=2.0)
-        grad_norm = utils.clip_grad_norm_(params, 1.0)
-        exp_grad_norm = torch.full((15,), fill_value=2.0).norm()
-        self.assertTrue(torch.is_tensor(grad_norm))
-        self.assertEqual(grad_norm, exp_grad_norm)
-
-        grad_norm = utils.clip_grad_norm_(params, 1.0)
-        self.assertAlmostEqual(grad_norm, torch.tensor(1.0))
-
-    def test_resolve_max_positions_with_tuple(self):
-        resolved = utils.resolve_max_positions(None, (2000, 100, 2000), 12000)
-        self.assertEqual(resolved, (2000, 100, 2000))
-
-    def assertAlmostEqual(self, t1, t2):
-        self.assertEqual(t1.size(), t2.size(), "size mismatch")
-        self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4)
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/spaces/gradio/depth_estimation/app.py b/spaces/gradio/depth_estimation/app.py
deleted file mode 100644
index bf3d6e258b65d2bf808b1e156577136f9e70c97a..0000000000000000000000000000000000000000
--- a/spaces/gradio/depth_estimation/app.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import gradio as gr
-from transformers import DPTFeatureExtractor, DPTForDepthEstimation
-import torch
-import numpy as np
-from PIL import Image
-import open3d as o3d
-from pathlib import Path
-import os
-
-feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
-model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
-
-def process_image(image_path):
-    image_path = Path(image_path)
-    image_raw = Image.open(image_path)
-    image = image_raw.resize(
-        (800, int(800 * image_raw.size[1] / image_raw.size[0])),
-        Image.Resampling.LANCZOS)
-
-    # prepare image for the model
-    encoding = feature_extractor(image, return_tensors="pt")
-
-    # forward pass
-    with torch.no_grad():
-        outputs = model(**encoding)
-        predicted_depth = outputs.predicted_depth
-
-    # interpolate to original size
-    prediction = torch.nn.functional.interpolate(
-        predicted_depth.unsqueeze(1),
-        size=image.size[::-1],
-        mode="bicubic",
-        align_corners=False,
-    ).squeeze()
-    output = prediction.cpu().numpy()
-    depth_image = (output * 255 / np.max(output)).astype('uint8')
-    try:
-        gltf_path = create_3d_obj(np.array(image), depth_image, image_path)
-        img = Image.fromarray(depth_image)
-        return [img, gltf_path, gltf_path]
-    except Exception as e:
-        gltf_path = create_3d_obj(
-            np.array(image), depth_image, image_path, depth=8)
-        img = Image.fromarray(depth_image)
-        return [img, gltf_path, gltf_path]
-    except:
-        print("Error reconstructing 3D model")
-        raise Exception("Error reconstructing 3D model")
-
-
-def create_3d_obj(rgb_image, depth_image, image_path, depth=10):
-    depth_o3d = o3d.geometry.Image(depth_image)
-    image_o3d = o3d.geometry.Image(rgb_image)
-    rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
-        image_o3d, depth_o3d, convert_rgb_to_intensity=False)
-    w = int(depth_image.shape[1])
-    h = int(depth_image.shape[0])
-
-    camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()
-    camera_intrinsic.set_intrinsics(w, h, 500, 500, w/2, h/2)
-
-    pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
-        rgbd_image, camera_intrinsic)
-
-    print('normals')
-    pcd.normals = o3d.utility.Vector3dVector(
-        np.zeros((1, 3)))  # invalidate existing normals
-    pcd.estimate_normals(
-        search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=30))
-    pcd.orient_normals_towards_camera_location(
-        camera_location=np.array([0., 0., 1000.]))
-    pcd.transform([[1, 0, 0, 0],
-                   [0, -1, 0, 0],
-                   [0, 0, -1, 0],
-                   [0, 0, 0, 1]])
-    pcd.transform([[-1, 0, 0, 0],
-                   [0, 1, 0, 0],
-                   [0, 0, 1, 0],
-                   [0, 0, 0, 1]])
-
-    print('run Poisson surface reconstruction')
-    with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:
-        mesh_raw, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
-            pcd, depth=depth, width=0, scale=1.1, linear_fit=True)
-
-    voxel_size = max(mesh_raw.get_max_bound() - mesh_raw.get_min_bound()) / 256
-    print(f'voxel_size = {voxel_size:e}')
-    mesh = mesh_raw.simplify_vertex_clustering(
-        voxel_size=voxel_size,
-        contraction=o3d.geometry.SimplificationContraction.Average)
-
-    # vertices_to_remove = densities < np.quantile(densities, 0.001)
-    # mesh.remove_vertices_by_mask(vertices_to_remove)
-    bbox = pcd.get_axis_aligned_bounding_box()
-    mesh_crop = mesh.crop(bbox)
-    gltf_path = f'./{image_path.stem}.gltf'
-    o3d.io.write_triangle_mesh(
-        gltf_path, mesh_crop, write_triangle_uvs=True)
-    return gltf_path
-
-title = "Demo: zero-shot depth estimation with DPT + 3D Point Cloud"
-description = "This demo is a variation from the original <a href='https://huggingface.co/spaces/nielsr/dpt-depth-estimation' target='_blank'>DPT Demo</a>. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object."
-examples = [["examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg"]]
-
-iface = gr.Interface(fn=process_image,
-                     inputs=[gr.Image(
-                         type="filepath", label="Input Image")],
-                     outputs=[gr.Image(label="predicted depth", type="pil"),
-                              gr.Model3D(label="3d mesh reconstruction", clear_color=[
-                                                 1.0, 1.0, 1.0, 1.0]),
-                              gr.File(label="3d gLTF")],
-                     title=title,
-                     description=description,
-                     examples=examples,
-                     allow_flagging="never",
-                     cache_examples=False)
-
-iface.launch(debug=True, enable_queue=False)
\ No newline at end of file
diff --git a/spaces/gradio/gpt-neo/model_fns.py b/spaces/gradio/gpt-neo/model_fns.py
deleted file mode 100644
index 50d3a898a5e35a18e783804e46bff97d3fe0d144..0000000000000000000000000000000000000000
--- a/spaces/gradio/gpt-neo/model_fns.py
+++ /dev/null
@@ -1,305 +0,0 @@
-import mesh_tensorflow as mtf
-import tensorflow.compat.v1 as tf
-from tensorflow.python.tpu import tpu_estimator
-import mesh_tensorflow.transformer as mtf_transformer
-from optimizers import get_optimizer
-from utils import (create_host_call, get_graph_info, remove_batch_from_layout, simd_mesh_setup, add_mode_to_params,
-                   get_batch_size, auto_layout, auto_layout_and_mesh_shape)
-from models.utils import biasmask_attn_weights
-from tensorflow.python.ops import resources
-from sample import sample_autoregressive
-from models.gpt2 import gpt2
-import math
-
-
-def model_fn(features, labels, mode, params):
-    # Get global step
-    global_step = tf.train.get_global_step()
-
-    # Construct mtf graph + mesh from params
-    graph = mtf.Graph()
-    mesh_shape = mtf.convert_to_shape(params["mesh_shape"])
-    layout_rules = mtf.convert_to_layout_rules(params["layout"])
-
-    # Mesh setup
-    if params["use_tpu"]:
-        var_placer, mesh_impl = simd_mesh_setup(params, mesh_shape, layout_rules)
-    else:
-        var_placer = None
-        gpu_ids = params["gpu_ids"]
-        mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
-            mesh_shape, layout_rules, gpu_ids)
-
-    # Trainable variable precision
-    # Store to checkpoints in master type, train in slice type, compute in activation type
-    if params["precision"] == "bfloat16":
-        variable_dtype = mtf.VariableDType(master_dtype=tf.bfloat16, slice_dtype=tf.float32,
-                                           activation_dtype=tf.bfloat16)
-    else:
-        variable_dtype = mtf.VariableDType(master_dtype=tf.float32, slice_dtype=tf.float32, activation_dtype=tf.float32)
-
-    # Build mtf mesh object
-    mesh = mtf.Mesh(graph, "my_mesh", var_placer)
-
-    # Build mtf_features & seq length dict for getting number of microbatches
-    # We need to pack inputs into a dict to pass into serialize_training_step
-    features_dict = {"inputs": features, "labels": labels}
-    sequence_length_dict = {"inputs": params["n_ctx"], "labels": params["n_ctx"]}
-
-    params = add_mode_to_params(params, mode)
-    batch_size = get_batch_size(params)
-
-    batch_dim = mtf.Dimension("batch", batch_size)
-    batch_dims = [batch_dim]
-    feature_length = sequence_length_dict["inputs"]
-    length_dim = mtf.Dimension("sequence", feature_length)
-
-    mtf_features = {}
-    for key, x in features_dict.items():
-        if x is not None:
-            feature_shape = mtf.Shape(batch_dims + [length_dim])
-            if type(features_dict[key]) == dict:
-                features_dict[key] = features_dict[key]["feature"]
-            x = tf.cast(features_dict[key], tf.int32)
-            x = tf.reshape(x, feature_shape.to_integer_list)
-            mtf_features[key] = mtf.import_fully_replicated(
-                mesh, x, feature_shape, name=key)
-
-    # Instantiate dict for dimensions, bias, etc that can be calculated here once then passed into model
-    other_features = {}
-    memory_length_dim = mtf.Dimension("memory_length", length_dim.size)
-
-    attn_bias = biasmask_attn_weights(mesh, length_dim, memory_length_dim, variable_dtype) if params["causal"] else None
-
-    # Add attn_bias into mtf_features
-    other_features["attn_bias"] = attn_bias
-
-    # Define other Dimensions that we'll need inside the model
-    embd_dim = mtf.Dimension("embd", params["n_embd"])
-    vocab_dim = mtf.Dimension("vocab", params["n_vocab"])
-    # We need this because gathering when both the args have the same dimension in them breaks things
-    # This dim is specifically for the weights
-    # This prevents the "Einsum has lhs dimension without corresponding rhs or output dimension." error
-    embed_sequence_dim = mtf.Dimension("embed_sequence", params["n_ctx"])
-
-    other_features["embd_dim"] = embd_dim
-    other_features["vocab_dim"] = vocab_dim
-    other_features["embed_sequence_dim"] = embed_sequence_dim
-    other_features["memory_length_dim"] = memory_length_dim
-
-    if mode == tf.estimator.ModeKeys.PREDICT:
-        # Set up the model for prediction
-        inputs = mtf_features["inputs"]
-        if params["remove_partial_sequences"] is None:
-            params["remove_partial_sequences"] = False
-
-        export = params.get("export", False)
-
-        if not export:
-            mtf_samples = sample_autoregressive(
-                inputs, other_features=other_features, params=params, variable_dtype=variable_dtype,
-                remove_partial_sequences=params["remove_partial_sequences"], stop_at_token=params["eos_id"],
-                sampling_use_entmax=params['sampling_use_entmax'], max_steps=params["predict_max_steps"])
-
-        else:
-            with mtf.utils.outside_all_rewrites():
-                with tf.variable_scope('gpt2'):
-                    mtf_samples, loss, loss_batch = gpt2.model(mtf_features, other_features, params, mesh,
-                                                               variable_dtype=variable_dtype, context=None)
-
-        mtf_samples = mtf.anonymize(mtf_samples)
-        inputs = mtf.anonymize(inputs)
-        lowering = mtf.Lowering(graph, {mesh: mesh_impl}, autostack=True)
-        inputs = lowering.export_to_tf_tensor(inputs)
-        outputs = lowering.export_to_tf_tensor(mtf_samples)
-        predictions = {
-            "inputs": inputs,
-            "outputs": outputs}
-
-        def scaffold_fn():
-            return tf.train.Scaffold(
-                local_init_op=tf.group(
-                    tf.train.Scaffold.default_local_init_op(),
-                    lowering.copy_masters_to_slices(),
-                    name="mtf_local_init_op"),
-                ready_op=tf.concat(
-                    [tf.report_uninitialized_variables(),
-                     resources.report_uninitialized_resources()],
-                    axis=0,
-                    name="mtf_ready_op"))
-
-        return tpu_estimator.TPUEstimatorSpec(
-            mode=tf.estimator.ModeKeys.PREDICT,
-            predictions=predictions,
-            scaffold_fn=scaffold_fn,
-            prediction_hooks=[mtf.MtfRestoreHook(lowering)])
-
-    # We're not predicting, so we better be training or evaluating
-    assert (mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL)
-
-    if mode == tf.estimator.ModeKeys.TRAIN:
-        # Gets number of microbatches per batch for serialized training
-        # if param tokens_per_mb_per_replica = None, this defaults to 1 and no microbatching is performed
-        num_microbatches = int(mtf_transformer.utils.serialize_num_microbatches(batch_dim=batch_dim,
-                                                                                sequence_length=sequence_length_dict,
-                                                                                mesh_shape=mesh_shape,
-                                                                                layout_rules=layout_rules,
-                                                                                tokens_per_microbatch_per_replica=
-                                                                                params["tokens_per_mb_per_replica"]))
-    else:
-        num_microbatches = 1
-
-    params["num_microbatches"] = num_microbatches  # Add num microbatches to params
-
-    if num_microbatches > 1:
-
-        # For serialize_training_step we need to modify the model to output results in a dict
-        def serialized_fn(mtf_features):
-            if params["model"] == "GPT":
-                with tf.variable_scope('gpt2'):
-                    logits, loss, loss_batch = gpt2.model(mtf_features, other_features, params, mesh,
-                                                          variable_dtype=variable_dtype)
-                return {"logits": logits, "loss": loss, "loss_batch": loss_batch}
-            else:
-                raise Exception(f"'{params['model']}' is not a valid model - please select from [GPT]")
-
-        # Serialize the training step - Gradients are accumulated locally and reduced once.
-        var_grads, output_dict = mtf.serialize_training_step(mtf_features, serialized_fn, batch_dim, num_microbatches)
-        loss = output_dict["loss"]
-        loss_batch = output_dict["loss_batch"]
-        logits = output_dict["logits"]
-    else:
-        # If we're not splitting into microbatches, return logits & loss as is
-        if params["model"] == "GPT":
-            with mtf.utils.outside_all_rewrites():
-                with tf.variable_scope('gpt2'):
-                    logits, loss, loss_batch = gpt2.model(mtf_features, other_features, params, mesh,
-                                                          variable_dtype=variable_dtype, context=None)
-        else:
-            raise Exception(f"'{params['model']}' is not a valid model - please select from [GPT]")
-
-    # Auto layout generation
-    if params["auto_layout"]:
-        auto_layout(graph, mesh_shape, logits, loss)
-    if params["auto_layout_and_mesh_shape"]:
-        auto_layout_and_mesh_shape(graph, params["num_cores"], logits, loss)
-
-    if mode == tf.estimator.ModeKeys.TRAIN:
-        # In TRAIN mode, get optimizer
-        if params["num_microbatches"] > 1:
-            # If we are splitting the batch into microbatches, var grads are created in the serialize_training_step fn
-            # So we pass them in here
-            _, update_ops, var_grads = get_optimizer(mesh, loss, params, variable_dtype=variable_dtype,
-                                                     inp_var_grads=var_grads)
-        else:
-            # Otherwise, they are created in the get_optimizer fn, so we leave inp_var_grads blank
-            _, update_ops, var_grads = get_optimizer(mesh, loss, params, variable_dtype=variable_dtype)
-        # Log summaries to tensorboard
-        mtf.scalar_summary("loss", loss)
-        # Log gradients if in params
-        if params["log_grads"] not in [None, False]:
-            for g in var_grads:
-                grad_norm = mtf.sqrt(mtf.reduce_sum(mtf.square(g)))
-                mtf.scalar_summary("grads/norm" + g.name[:-2], grad_norm)
-    else:
-        # For now, we can only export fully-replicated tensors.
-        # This has to be done before lowering or they will not be included in the graph
-        mean_logits = mtf.reduce_mean(logits, reduced_dim=vocab_dim)
-        max_logits = mtf.argmax(logits, vocab_dim)
-        del logits
-        fully_replicated_mean_logits = mtf.anonymize(mean_logits)
-        fully_replicated_max_logits = mtf.anonymize(max_logits)
-        fully_replicated_loss_batch = mtf.anonymize(loss_batch)
-
-    # Gets & prints info about no. trainable vars in the model & dimension names
-    get_graph_info(graph)
-
-    # 'lowers' mtf tensors into a tf graph - this enables us to export results as tf tensors
-    lowering = mtf.Lowering(graph, {mesh: mesh_impl}, autostack=True)
-    tf_loss = lowering.export_to_tf_tensor(loss)
-    tf_loss = tf.cast(tf_loss, tf.float32)
-
-    if mode == tf.estimator.ModeKeys.TRAIN:
-        # Use our patched version until mtf updates theirs
-        host_call = create_host_call(params['model_path'])
-        mtf.utils.remove_summaries()
-
-        # Creates train_op
-        tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
-        tf_update_ops.append(tf.assign_add(global_step, 1))  # Need to manually increment global_step
-        tf.logging.info(f"tf_update_ops: {tf_update_ops}")
-        train_op = tf.group(tf_update_ops)
-    else:
-        tf_mean_logits = lowering.export_to_tf_tensor(fully_replicated_mean_logits)
-        tf_max_logits = lowering.export_to_tf_tensor(fully_replicated_max_logits)
-        tf_loss_batch = tf.to_float(lowering.export_to_tf_tensor(fully_replicated_loss_batch))
-
-    with mtf.utils.outside_all_rewrites():
-        # Copy master variables to slices. Must be called first.
-        restore_hook = mtf.MtfRestoreHook(lowering)
-        if mode == tf.estimator.ModeKeys.TRAIN:
-            # Set up the checkpoint server and return the TPUEstimatorSpec
-            saver = tf.train.Saver(
-                tf.global_variables(),
-                sharded=True,
-                max_to_keep=10,
-                keep_checkpoint_every_n_hours=2,
-                defer_build=False,
-                save_relative_paths=True)
-            tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
-            saver_listener = mtf.MtfCheckpointSaverListener(lowering)
-            saver_hook = tf.train.CheckpointSaverHook(
-                params["model_path"],
-                save_steps=params["steps_per_checkpoint"],
-                saver=saver,
-                listeners=[saver_listener])
-
-            return tpu_estimator.TPUEstimatorSpec(
-                tf.estimator.ModeKeys.TRAIN,
-                loss=tf_loss,
-                host_call=host_call,
-                train_op=train_op,
-                training_hooks=[restore_hook, saver_hook])
-
-        elif mode == tf.estimator.ModeKeys.EVAL:
-            # Evaluation metrics
-            def _perplexity(loss):
-                perplexity = tf.exp(loss)
-                return tf.metrics.mean(perplexity)
-
-            def _bits_per_byte(loss):
-                bpb = loss * (0.29335 / math.log(2))
-                return tf.metrics.mean(bpb)
-
-            def _metric_fn(tf_mean_logits, tf_loss_batch):
-                mean_logits = tf.metrics.mean(tf_mean_logits)
-                loss = tf.reduce_mean(tf_loss_batch)
-                perp = _perplexity(loss)
-                bpb = _bits_per_byte(loss)
-                return {"mean_logits": mean_logits, "perplexity": perp, "bits per byte": bpb}
-
-            def _lambada_metric_fn(labels, tf_max_logits, tf_loss_batch):
-                eos_token = params["eos_id"]
-                answer_positions = tf.where(tf.math.not_equal(labels, eos_token))
-
-                correct_answers = tf.gather_nd(tf.math.equal(tf_max_logits, labels), answer_positions)
-                accuracy = tf.metrics.mean(tf.cast(correct_answers, tf.float32))
-
-                # I guess tf_loss_batch has z_loss and maybe other stuff added to it
-                # so maybe this should be calculated separately in the future
-                answer_loss = tf.gather_nd(tf_loss_batch, answer_positions)
-                log_perplexity = tf.metrics.mean(answer_loss)
-
-                return {"lambada_acc": accuracy, "lambada_log_ppl": log_perplexity}
-
-            eval_task = params["eval_task"]
-            if eval_task == "lambada":
-                eval_metrics = (_lambada_metric_fn, [labels, tf_max_logits, tf_loss_batch])
-            else:
-                eval_metrics = (_metric_fn, [tf_mean_logits, tf_loss_batch])
-
-            return tpu_estimator.TPUEstimatorSpec(
-                tf.estimator.ModeKeys.EVAL,
-                evaluation_hooks=[restore_hook],
-                loss=tf_loss,
-                eval_metrics=eval_metrics)
diff --git a/spaces/gradio/neon-tts-plugin-coqui_main/DESCRIPTION.md b/spaces/gradio/neon-tts-plugin-coqui_main/DESCRIPTION.md
deleted file mode 100644
index d9e5f1cc47c696247c5e3bc6ed193f3992dff75a..0000000000000000000000000000000000000000
--- a/spaces/gradio/neon-tts-plugin-coqui_main/DESCRIPTION.md
+++ /dev/null
@@ -1 +0,0 @@
-This  demo converts text to speech in 14 languages.
\ No newline at end of file
diff --git a/spaces/gwang-kim/DATID-3D/eg3d/run_inversion.py b/spaces/gwang-kim/DATID-3D/eg3d/run_inversion.py
deleted file mode 100644
index 584393842c46d1620675a3844fd269cf378c6ad2..0000000000000000000000000000000000000000
--- a/spaces/gwang-kim/DATID-3D/eg3d/run_inversion.py
+++ /dev/null
@@ -1,106 +0,0 @@
-
-# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
-#
-# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
-# property and proprietary rights in and to this material, related
-# documentation and any modifications thereto. Any use, reproduction,
-# disclosure or distribution of this material and related documentation
-# without an express license agreement from NVIDIA CORPORATION or
-# its affiliates is strictly prohibited.
-
-"""Generate lerp videos using pretrained network pickle."""
-
-import os
-import re
-from typing import List, Optional, Tuple, Union
-
-import click
-import dnnlib
-import numpy as np
-import torch
-import legacy
-from torchvision.transforms import transforms
-from projector import w_projector,w_plus_projector
-from PIL import Image
-from glob import glob
-from os.path import join as opj
-
-@click.command()
-@click.option('--image_path', help='path of image file or image directory', type=str, required=True, metavar='STR', show_default=True)
-@click.option('--c_path', help='camera parameters path', type=str, required=True,  default='test-runs', metavar='STR', show_default=True)
-@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
-@click.option('--outdir', help='Output directory', type=str, required=True, metavar='DIR')
-@click.option('--latent_space_type', help='latent_space_type', type=click.Choice(['w', 'w_plus']), required=False, metavar='STR',
-              default='w', show_default=True)
-@click.option('--num_steps', 'num_steps', type=int,
-              help='Multiplier for depth sampling in volume rendering', default=500, show_default=True)
-@click.option('--sample_mult', 'sampling_multiplier', type=float,
-              help='Multiplier for depth sampling in volume rendering', default=2, show_default=True)
-@click.option('--nrr', type=int, help='Neural rendering resolution override', default=None, show_default=True)
-def run(
-        network_pkl: str,
-        outdir: str,
-        sampling_multiplier: float,
-        nrr: Optional[int],
-        latent_space_type:str,
-        image_path:str,
-        c_path:str,
-        num_steps:int
-):
-    os.makedirs(outdir, exist_ok=True)
-    print('Loading networks from "%s"...' % network_pkl)
-    device = torch.device('cuda')
-    with dnnlib.util.open_url(network_pkl) as f:
-        G = legacy.load_network_pkl(f)['G_ema']
-
-    G =  G.to(device)
-    G.rendering_kwargs['depth_resolution'] = int(G.rendering_kwargs['depth_resolution'] * sampling_multiplier)
-    G.rendering_kwargs['depth_resolution_importance'] = int(
-        G.rendering_kwargs['depth_resolution_importance'] * sampling_multiplier)
-    if nrr is not None: G.neural_rendering_resolution = nrr
-
-
-    if os.path.isdir(image_path):
-        img_paths = sorted(glob(opj(image_path,"*.png")))
-    else:
-        img_paths = [image_path]
-
-    trans = transforms.Compose([
-        transforms.ToTensor(),
-        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
-        transforms.Resize((512, 512))
-    ])
-
-
-    for img_path in img_paths:
-        img = Image.open(img_path).convert('RGB')
-        img_id = os.path.split(img_path)[-1].split('.')[0]
-        img.save(f'{outdir}/{img_id}_orig.png')
-        c = np.load(img_path.replace('png','npy'))
-        c = np.reshape(c,(1,25))
-        c = torch.FloatTensor(c).cuda()
-
-        from_im = trans(img).cuda()
-        id_image = torch.squeeze((from_im.cuda() + 1) / 2) * 255
-
-        if latent_space_type == 'w':
-            w = w_projector.project(G, c, outdir,id_image, device=torch.device('cuda'), w_avg_samples=600, num_steps = num_steps, w_name=img_id)
-        else:
-            w = w_plus_projector.project(G, c,outdir, id_image, device=torch.device('cuda'), w_avg_samples=600, w_name=img_id, num_steps = num_steps )
-
-        result_img = G.synthesis(w, c, noise_mode='const')['image']
-        vis_img = (result_img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
-        Image.fromarray(vis_img[0].cpu().numpy(), 'RGB').save(f'{outdir}/{img_id}_inv.png')
-
-        torch.save(w.detach().cpu(), f'{outdir}/{img_id}_inv.pt')
-
-# ----------------------------------------------------------------------------
-
-if __name__ == "__main__":
-    run()  # pylint: disable=no-value-for-parameter
-
-# ----------------------------------------------------------------------------
-
-
-
diff --git a/spaces/haakohu/deep_privacy2/sg3_torch_utils/ops/grid_sample_gradfix.py b/spaces/haakohu/deep_privacy2/sg3_torch_utils/ops/grid_sample_gradfix.py
deleted file mode 100644
index 87067e150c591b1ace91816e7a5c3ee3a4aeacd3..0000000000000000000000000000000000000000
--- a/spaces/haakohu/deep_privacy2/sg3_torch_utils/ops/grid_sample_gradfix.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES.  All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto.  Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Custom replacement for `torch.nn.functional.grid_sample` that
-supports arbitrarily high order gradients between the input and output.
-Only works on 2D images and assumes
-`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
-
-import torch
-from torch.cuda.amp import custom_bwd, custom_fwd
-from pkg_resources import parse_version
-# pylint: disable=redefined-builtin
-# pylint: disable=arguments-differ
-# pylint: disable=protected-access
-_use_pytorch_1_11_api = parse_version(torch.__version__) >= parse_version('1.11.0a') # Allow prerelease builds of 1.11
-
-
-#----------------------------------------------------------------------------
-
-enabled = False  # Enable the custom op by setting this to true.
-
-#----------------------------------------------------------------------------
-
-def grid_sample(input, grid):
-    if _should_use_custom_op():
-        return _GridSample2dForward.apply(input, grid)
-    return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
-
-#----------------------------------------------------------------------------
-
-def _should_use_custom_op():
-    return enabled
-
-#----------------------------------------------------------------------------
-
-class _GridSample2dForward(torch.autograd.Function):
-    @staticmethod
-    @custom_fwd(cast_inputs=torch.float16)
-    def forward(ctx, input, grid):
-        assert input.ndim == 4
-        assert grid.ndim == 4
-        output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
-        ctx.save_for_backward(input, grid)
-        return output
-
-    @staticmethod
-    @custom_bwd
-    def backward(ctx, grad_output):
-        input, grid = ctx.saved_tensors
-        grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid)
-        return grad_input, grad_grid
-
-#----------------------------------------------------------------------------
-
-class _GridSample2dBackward(torch.autograd.Function):
-    @staticmethod
-    @custom_fwd(cast_inputs=torch.float16)
-    def forward(ctx, grad_output, input, grid):
-        op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
-        if _use_pytorch_1_11_api:
-            output_mask = (ctx.needs_input_grad[1], ctx.needs_input_grad[2])
-            grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False, output_mask)
-        else:
-            grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
-        ctx.save_for_backward(grid)
-        return grad_input, grad_grid
-
-    @staticmethod
-    @custom_bwd
-    def backward(ctx, grad2_grad_input, grad2_grad_grid):
-        _ = grad2_grad_grid # unused
-        grid, = ctx.saved_tensors
-        grad2_grad_output = None
-        grad2_input = None
-        grad2_grid = None
-
-        if ctx.needs_input_grad[0]:
-            grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid)
-
-        assert not ctx.needs_input_grad[2]
-        return grad2_grad_output, grad2_input, grad2_grid
-
-#----------------------------------------------------------------------------
diff --git a/spaces/hamacojr/CAT-Seg/cat_seg/utils/misc.py b/spaces/hamacojr/CAT-Seg/cat_seg/utils/misc.py
deleted file mode 100644
index 874d9805b482f52bbffc1be620e36e0cffc07c46..0000000000000000000000000000000000000000
--- a/spaces/hamacojr/CAT-Seg/cat_seg/utils/misc.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/util/misc.py
-"""
-Misc functions, including distributed helpers.
-
-Mostly copy-paste from torchvision references.
-"""
-from typing import List, Optional
-
-import torch
-import torch.distributed as dist
-import torchvision
-from torch import Tensor
-
-
-def _max_by_axis(the_list):
-    # type: (List[List[int]]) -> List[int]
-    maxes = the_list[0]
-    for sublist in the_list[1:]:
-        for index, item in enumerate(sublist):
-            maxes[index] = max(maxes[index], item)
-    return maxes
-
-
-class NestedTensor(object):
-    def __init__(self, tensors, mask: Optional[Tensor]):
-        self.tensors = tensors
-        self.mask = mask
-
-    def to(self, device):
-        # type: (Device) -> NestedTensor # noqa
-        cast_tensor = self.tensors.to(device)
-        mask = self.mask
-        if mask is not None:
-            assert mask is not None
-            cast_mask = mask.to(device)
-        else:
-            cast_mask = None
-        return NestedTensor(cast_tensor, cast_mask)
-
-    def decompose(self):
-        return self.tensors, self.mask
-
-    def __repr__(self):
-        return str(self.tensors)
-
-
-def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
-    # TODO make this more general
-    if tensor_list[0].ndim == 3:
-        if torchvision._is_tracing():
-            # nested_tensor_from_tensor_list() does not export well to ONNX
-            # call _onnx_nested_tensor_from_tensor_list() instead
-            return _onnx_nested_tensor_from_tensor_list(tensor_list)
-
-        # TODO make it support different-sized images
-        max_size = _max_by_axis([list(img.shape) for img in tensor_list])
-        # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
-        batch_shape = [len(tensor_list)] + max_size
-        b, c, h, w = batch_shape
-        dtype = tensor_list[0].dtype
-        device = tensor_list[0].device
-        tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
-        mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
-        for img, pad_img, m in zip(tensor_list, tensor, mask):
-            pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
-            m[: img.shape[1], : img.shape[2]] = False
-    else:
-        raise ValueError("not supported")
-    return NestedTensor(tensor, mask)
-
-
-# _onnx_nested_tensor_from_tensor_list() is an implementation of
-# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
-@torch.jit.unused
-def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
-    max_size = []
-    for i in range(tensor_list[0].dim()):
-        max_size_i = torch.max(
-            torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)
-        ).to(torch.int64)
-        max_size.append(max_size_i)
-    max_size = tuple(max_size)
-
-    # work around for
-    # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
-    # m[: img.shape[1], :img.shape[2]] = False
-    # which is not yet supported in onnx
-    padded_imgs = []
-    padded_masks = []
-    for img in tensor_list:
-        padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
-        padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
-        padded_imgs.append(padded_img)
-
-        m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
-        padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
-        padded_masks.append(padded_mask.to(torch.bool))
-
-    tensor = torch.stack(padded_imgs)
-    mask = torch.stack(padded_masks)
-
-    return NestedTensor(tensor, mask=mask)
-
-
-def is_dist_avail_and_initialized():
-    if not dist.is_available():
-        return False
-    if not dist.is_initialized():
-        return False
-    return True
diff --git a/spaces/hank1996/yolopv2/lib/utils/augmentations.py b/spaces/hank1996/yolopv2/lib/utils/augmentations.py
deleted file mode 100644
index 2a6744f63c81a8b197113a3e459e7ec00e0a7bf3..0000000000000000000000000000000000000000
--- a/spaces/hank1996/yolopv2/lib/utils/augmentations.py
+++ /dev/null
@@ -1,257 +0,0 @@
-
-# -*- coding: utf-8 -*-
-
-import numpy as np
-import cv2
-import random
-import math
-
-
-def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
-    """change color hue, saturation, value"""
-    r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1  # random gains
-    hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
-    dtype = img.dtype  # uint8
-
-    x = np.arange(0, 256, dtype=np.int16)
-    lut_hue = ((x * r[0]) % 180).astype(dtype)
-    lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
-    lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
-
-    img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
-    cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)  # no return needed
-
-    # Histogram equalization
-    # if random.random() < 0.2:
-    #     for i in range(3):
-    #         img[:, :, i] = cv2.equalizeHist(img[:, :, i])
-
-
-def random_perspective(combination, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
-    """combination of img transform"""
-    # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
-    # targets = [cls, xyxy]
-    img, gray, line = combination
-    height = img.shape[0] + border[0] * 2  # shape(h,w,c)
-    width = img.shape[1] + border[1] * 2
-
-    # Center
-    C = np.eye(3)
-    C[0, 2] = -img.shape[1] / 2  # x translation (pixels)
-    C[1, 2] = -img.shape[0] / 2  # y translation (pixels)
-
-    # Perspective
-    P = np.eye(3)
-    P[2, 0] = random.uniform(-perspective, perspective)  # x perspective (about y)
-    P[2, 1] = random.uniform(-perspective, perspective)  # y perspective (about x)
-
-    # Rotation and Scale
-    R = np.eye(3)
-    a = random.uniform(-degrees, degrees)
-    # a += random.choice([-180, -90, 0, 90])  # add 90deg rotations to small rotations
-    s = random.uniform(1 - scale, 1 + scale)
-    # s = 2 ** random.uniform(-scale, scale)
-    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
-
-    # Shear
-    S = np.eye(3)
-    S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # x shear (deg)
-    S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # y shear (deg)
-
-    # Translation
-    T = np.eye(3)
-    T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width  # x translation (pixels)
-    T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height  # y translation (pixels)
-
-    # Combined rotation matrix
-    M = T @ S @ R @ P @ C  # order of operations (right to left) is IMPORTANT
-    if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any():  # image changed
-        if perspective:
-            img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
-            gray = cv2.warpPerspective(gray, M, dsize=(width, height), borderValue=0)
-            line = cv2.warpPerspective(line, M, dsize=(width, height), borderValue=0)
-        else:  # affine
-            img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
-            gray = cv2.warpAffine(gray, M[:2], dsize=(width, height), borderValue=0)
-            line = cv2.warpAffine(line, M[:2], dsize=(width, height), borderValue=0)
-
-    # Visualize
-    # import matplotlib.pyplot as plt
-    # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
-    # ax[0].imshow(img[:, :, ::-1])  # base
-    # ax[1].imshow(img2[:, :, ::-1])  # warped
-
-    # Transform label coordinates
-    n = len(targets)
-    if n:
-        # warp points
-        xy = np.ones((n * 4, 3))
-        xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2)  # x1y1, x2y2, x1y2, x2y1
-        xy = xy @ M.T  # transform
-        if perspective:
-            xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8)  # rescale
-        else:  # affine
-            xy = xy[:, :2].reshape(n, 8)
-
-        # create new boxes
-        x = xy[:, [0, 2, 4, 6]]
-        y = xy[:, [1, 3, 5, 7]]
-        xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
-
-        # # apply angle-based reduction of bounding boxes
-        # radians = a * math.pi / 180
-        # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
-        # x = (xy[:, 2] + xy[:, 0]) / 2
-        # y = (xy[:, 3] + xy[:, 1]) / 2
-        # w = (xy[:, 2] - xy[:, 0]) * reduction
-        # h = (xy[:, 3] - xy[:, 1]) * reduction
-        # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
-
-        # clip boxes
-        xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
-        xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
-
-        # filter candidates
-        i = _box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
-        targets = targets[i]
-        targets[:, 1:5] = xy[i]
-
-    combination = (img, gray, line)
-    return combination, targets
-
-
-def cutout(combination, labels):
-    # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
-    image, gray = combination
-    h, w = image.shape[:2]
-
-    def bbox_ioa(box1, box2):
-        # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
-        box2 = box2.transpose()
-
-        # Get the coordinates of bounding boxes
-        b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
-        b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
-
-        # Intersection area
-        inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
-                     (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
-
-        # box2 area
-        box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
-
-        # Intersection over box2 area
-        return inter_area / box2_area
-
-    # create random masks
-    scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16  # image size fraction
-    for s in scales:
-        mask_h = random.randint(1, int(h * s))
-        mask_w = random.randint(1, int(w * s))
-
-        # box
-        xmin = max(0, random.randint(0, w) - mask_w // 2)
-        ymin = max(0, random.randint(0, h) - mask_h // 2)
-        xmax = min(w, xmin + mask_w)
-        ymax = min(h, ymin + mask_h)
-        # print('xmin:{},ymin:{},xmax:{},ymax:{}'.format(xmin,ymin,xmax,ymax))
-
-        # apply random color mask
-        image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
-        gray[ymin:ymax, xmin:xmax] = -1
-
-        # return unobscured labels
-        if len(labels) and s > 0.03:
-            box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
-            ioa = bbox_ioa(box, labels[:, 1:5])  # intersection over area
-            labels = labels[ioa < 0.60]  # remove >60% obscured labels
-
-    return image, gray, labels
-
-
-def letterbox(combination, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
-    """Resize the input image and automatically padding to suitable shape :https://zhuanlan.zhihu.com/p/172121380"""
-    # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
-    img, gray, line = combination
-    shape = img.shape[:2]  # current shape [height, width]
-    if isinstance(new_shape, int):
-        new_shape = (new_shape, new_shape)
-
-    # Scale ratio (new / old)
-    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
-    if not scaleup:  # only scale down, do not scale up (for better test mAP)
-        r = min(r, 1.0)
-
-    # Compute padding
-    ratio = r, r  # width, height ratios
-    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
-    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
-    if auto:  # minimum rectangle
-        dw, dh = np.mod(dw, 32), np.mod(dh, 32)  # wh padding
-    elif scaleFill:  # stretch
-        dw, dh = 0.0, 0.0
-        new_unpad = (new_shape[1], new_shape[0])
-        ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios
-
-    dw /= 2  # divide padding into 2 sides
-    dh /= 2
-
-    if shape[::-1] != new_unpad:  # resize
-        img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
-        gray = cv2.resize(gray, new_unpad, interpolation=cv2.INTER_LINEAR)
-        line = cv2.resize(line, new_unpad, interpolation=cv2.INTER_LINEAR)
-
-    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
-    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
-
-    img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
-    gray = cv2.copyMakeBorder(gray, top, bottom, left, right, cv2.BORDER_CONSTANT, value=0)  # add border
-    line = cv2.copyMakeBorder(line, top, bottom, left, right, cv2.BORDER_CONSTANT, value=0)  # add border
-    # print(img.shape)
-    
-    combination = (img, gray, line)
-    return combination, ratio, (dw, dh)
-
-def letterbox_for_img(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
-    # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
-    shape = img.shape[:2]  # current shape [height, width]
-    if isinstance(new_shape, int):
-        new_shape = (new_shape, new_shape)
-
-    # Scale ratio (new / old)
-    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
-    if not scaleup:  # only scale down, do not scale up (for better test mAP)
-        r = min(r, 1.0)
-
-    # Compute padding
-    ratio = r, r  # width, height ratios
-    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
-
-
-    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
-
-    if auto:  # minimum rectangle
-        dw, dh = np.mod(dw, 32), np.mod(dh, 32)  # wh padding
-
-    elif scaleFill:  # stretch
-        dw, dh = 0.0, 0.0
-        new_unpad = (new_shape[1], new_shape[0])
-        ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios
-
-    dw /= 2  # divide padding into 2 sides
-    dh /= 2
-    if shape[::-1] != new_unpad:  # resize
-        img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_AREA)
-
-    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
-    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
-    img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
-    return img, ratio, (dw, dh)
-
-
-def _box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1):  # box1(4,n), box2(4,n)
-    # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
-    w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
-    w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
-    ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16))  # aspect ratio
-    return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr)  # candidates
diff --git a/spaces/haoqi7/research/inference_hf/__init__.py b/spaces/haoqi7/research/inference_hf/__init__.py
deleted file mode 100644
index fc0d43df0f7739b74e5b4c53b898bc2467717d24..0000000000000000000000000000000000000000
--- a/spaces/haoqi7/research/inference_hf/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from ._inference import InferenceHF
\ No newline at end of file
diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/box_label_loader.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/box_label_loader.py
deleted file mode 100644
index fddd7aa2b5b859d7b410cacd9c61800fd7190c51..0000000000000000000000000000000000000000
--- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/box_label_loader.py
+++ /dev/null
@@ -1,251 +0,0 @@
-import torch
-import numpy as np
-import math
-import base64
-import collections
-import pycocotools.mask as mask_utils
-
-from maskrcnn_benchmark.structures.bounding_box import BoxList
-from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
-
-
-class LabelLoader(object):
-    def __init__(self, labelmap, extra_fields=(), filter_duplicate_relations=False, ignore_attr=None, ignore_rel=None,
-                 mask_mode="poly"):
-        self.labelmap = labelmap
-        self.extra_fields = extra_fields
-        self.supported_fields = ["class", "conf", "attributes", 'scores_all', 'boxes_all', 'feature', "mask"]
-        self.filter_duplicate_relations = filter_duplicate_relations
-        self.ignore_attr = set(ignore_attr) if ignore_attr != None else set()
-        self.ignore_rel = set(ignore_rel) if ignore_rel != None else set()
-        assert mask_mode == "poly" or mask_mode == "mask"
-        self.mask_mode = mask_mode
-
-    def __call__(self, annotations, img_size, remove_empty=False, load_fields=None):
-        boxes = [obj["rect"] for obj in annotations]
-        boxes = torch.as_tensor(boxes).reshape(-1, 4)
-        target = BoxList(boxes, img_size, mode="xyxy")
-
-        if load_fields is None:
-            load_fields = self.extra_fields
-
-        for field in load_fields:
-            assert field in self.supported_fields, "Unsupported field {}".format(field)
-            if field == "class":
-                classes = self.add_classes(annotations)
-                target.add_field("labels", classes)
-            elif field == "conf":
-                confidences = self.add_confidences(annotations)
-                target.add_field("scores", confidences)
-            elif field == "attributes":
-                attributes = self.add_attributes(annotations)
-                target.add_field("attributes", attributes)
-            elif field == "scores_all":
-                scores_all = self.add_scores_all(annotations)
-                target.add_field("scores_all", scores_all)
-            elif field == "boxes_all":
-                boxes_all = self.add_boxes_all(annotations)
-                target.add_field("boxes_all", boxes_all)
-            elif field == "feature":
-                features = self.add_features(annotations)
-                target.add_field("box_features", features)
-            elif field == "mask":
-                masks, is_box_mask = self.add_masks(annotations, img_size)
-                target.add_field("masks", masks)
-                target.add_field("is_box_mask", is_box_mask)
-
-        target = target.clip_to_image(remove_empty=remove_empty)
-        return target
-
-    def get_box_mask(self, rect, img_size):
-        x1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3]
-        if self.mask_mode == "poly":
-            return [[x1, y1, x1, y2, x2, y2, x2, y1]]
-        elif self.mask_mode == "mask":
-            # note the order of height/width order in mask is opposite to image
-            mask = np.zeros([img_size[1], img_size[0]], dtype=np.uint8)
-            mask[math.floor(y1):math.ceil(y2), math.floor(x1):math.ceil(x2)] = 255
-            encoded_mask = mask_utils.encode(np.asfortranarray(mask))
-            encoded_mask["counts"] = encoded_mask["counts"].decode("utf-8")
-            return encoded_mask
-
-    def add_masks(self, annotations, img_size):
-        masks = []
-        is_box_mask = []
-        for obj in annotations:
-            if "mask" in obj:
-                masks.append(obj["mask"])
-                is_box_mask.append(0)
-            else:
-                masks.append(self.get_box_mask(obj["rect"], img_size))
-                is_box_mask.append(1)
-        masks = SegmentationMask(masks, img_size, mode=self.mask_mode)
-        is_box_mask = torch.tensor(is_box_mask)
-        return masks, is_box_mask
-
-    def add_classes(self, annotations):
-        class_names = [obj["class"] for obj in annotations]
-        classes = [None] * len(class_names)
-        for i in range(len(class_names)):
-            classes[i] = self.labelmap['class_to_ind'][class_names[i]]
-        return torch.tensor(classes)
-
-    def add_confidences(self, annotations):
-        confidences = []
-        for obj in annotations:
-            if "conf" in obj:
-                confidences.append(obj["conf"])
-            else:
-                confidences.append(1.0)
-        return torch.tensor(confidences)
-
-    def add_attributes(self, annotations):
-        # the maximal number of attributes per object is 16
-        attributes = [[0] * 16 for _ in range(len(annotations))]
-        for i, obj in enumerate(annotations):
-            for j, attr in enumerate(obj["attributes"]):
-                attributes[i][j] = self.labelmap['attribute_to_ind'][attr]
-        return torch.tensor(attributes)
-
-    def add_features(self, annotations):
-        features = []
-        for obj in annotations:
-            features.append(np.frombuffer(base64.b64decode(obj['feature']), np.float32))
-        return torch.tensor(features)
-
-    def add_scores_all(self, annotations):
-        scores_all = []
-        for obj in annotations:
-            scores_all.append(np.frombuffer(base64.b64decode(obj['scores_all']), np.float32))
-        return torch.tensor(scores_all)
-
-    def add_boxes_all(self, annotations):
-        boxes_all = []
-        for obj in annotations:
-            boxes_all.append(np.frombuffer(base64.b64decode(obj['boxes_all']), np.float32).reshape(-1, 4))
-        return torch.tensor(boxes_all)
-
-    def relation_loader(self, relation_annos, target):
-        if self.filter_duplicate_relations:
-            # Filter out dupes!
-            all_rel_sets = collections.defaultdict(list)
-            for triplet in relation_annos:
-                all_rel_sets[(triplet['subj_id'], triplet['obj_id'])].append(triplet)
-            relation_annos = [np.random.choice(v) for v in all_rel_sets.values()]
-
-        # get M*M pred_labels
-        relation_triplets = []
-        relations = torch.zeros([len(target), len(target)], dtype=torch.int64)
-        for i in range(len(relation_annos)):
-            if len(self.ignore_rel) != 0 and relation_annos[i]['class'] in self.ignore_rel:
-                continue
-            subj_id = relation_annos[i]['subj_id']
-            obj_id = relation_annos[i]['obj_id']
-            predicate = self.labelmap['relation_to_ind'][relation_annos[i]['class']]
-            relations[subj_id, obj_id] = predicate
-            relation_triplets.append([subj_id, obj_id, predicate])
-
-        relation_triplets = torch.tensor(relation_triplets)
-        target.add_field("relation_labels", relation_triplets)
-        target.add_field("pred_labels", relations)
-        return target
-
-
-class BoxLabelLoader(object):
-    def __init__(self, labelmap, extra_fields=(), ignore_attrs=(),
-                 mask_mode="poly"):
-        self.labelmap = labelmap
-        self.extra_fields = extra_fields
-        self.ignore_attrs = ignore_attrs
-        assert mask_mode == "poly" or mask_mode == "mask"
-        self.mask_mode = mask_mode
-        self.all_fields = ["class", "mask", "confidence",
-                           "attributes_encode", "IsGroupOf", "IsProposal"]
-
-    def __call__(self, annotations, img_size, remove_empty=True):
-        boxes = [obj["rect"] for obj in annotations]
-        boxes = torch.as_tensor(boxes).reshape(-1, 4)
-        target = BoxList(boxes, img_size, mode="xyxy")
-
-        for field in self.extra_fields:
-            assert field in self.all_fields, "Unsupported field {}".format(field)
-            if field == "class":
-                classes = self.add_classes_with_ignore(annotations)
-                target.add_field("labels", classes)
-            elif field == "mask":
-                masks, is_box_mask = self.add_masks(annotations, img_size)
-                target.add_field("masks", masks)
-                target.add_field("is_box_mask", is_box_mask)
-            elif field == "confidence":
-                confidences = self.add_confidences(annotations)
-                target.add_field("confidences", confidences)
-            elif field == "attributes_encode":
-                attributes = self.add_attributes(annotations)
-                target.add_field("attributes", attributes)
-            elif field == "IsGroupOf":
-                is_group = [1 if 'IsGroupOf' in obj and obj['IsGroupOf'] == 1 else 0
-                            for obj in annotations]
-                target.add_field("IsGroupOf", torch.tensor(is_group))
-            elif field == "IsProposal":
-                is_proposal = [1 if "IsProposal" in obj and obj['IsProposal'] == 1 else 0
-                               for obj in annotations]
-                target.add_field("IsProposal", torch.tensor(is_proposal))
-
-        target = target.clip_to_image(remove_empty=remove_empty)
-        return target
-
-    def add_classes_with_ignore(self, annotations):
-        class_names = [obj["class"] for obj in annotations]
-        classes = [None] * len(class_names)
-        if self.ignore_attrs:
-            for i, obj in enumerate(annotations):
-                if any([obj[attr] for attr in self.ignore_attrs if attr in obj]):
-                    classes[i] = -1
-        for i, cls in enumerate(classes):
-            if cls != -1:
-                classes[i] = self.labelmap[class_names[i]] + 1  # 0 is saved for background
-        return torch.tensor(classes)
-
-    def add_masks(self, annotations, img_size):
-        masks = []
-        is_box_mask = []
-        for obj in annotations:
-            if "mask" in obj:
-                masks.append(obj["mask"])
-                is_box_mask.append(0)
-            else:
-                masks.append(self.get_box_mask(obj["rect"], img_size))
-                is_box_mask.append(1)
-        masks = SegmentationMask(masks, img_size, mode=self.mask_mode)
-        is_box_mask = torch.tensor(is_box_mask)
-        return masks, is_box_mask
-
-    def get_box_mask(self, rect, img_size):
-        x1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3]
-        if self.mask_mode == "poly":
-            return [[x1, y1, x1, y2, x2, y2, x2, y1]]
-        elif self.mask_mode == "mask":
-            # note the order of height/width order in mask is opposite to image
-            mask = np.zeros([img_size[1], img_size[0]], dtype=np.uint8)
-            mask[math.floor(y1):math.ceil(y2), math.floor(x1):math.ceil(x2)] = 255
-            encoded_mask = mask_utils.encode(np.asfortranarray(mask))
-            encoded_mask["counts"] = encoded_mask["counts"].decode("utf-8")
-            return encoded_mask
-
-    def add_confidences(self, annotations):
-        confidences = []
-        for obj in annotations:
-            if "confidence" in obj:
-                confidences.append(obj["confidence"])
-            elif "conf" in obj:
-                confidences.append(obj["conf"])
-            else:
-                confidences.append(1.0)
-        return torch.tensor(confidences)
-
-    def add_attributes(self, annotations):
-        # we know that the maximal number of attributes per object is 16
-        attributes = [[0] * 16 for _ in range(len(annotations))]
-        for i, obj in enumerate(annotations):
-            attributes[i][:len(obj["attributes_encode"])] = obj["attributes_encode"]
-        return torch.tensor(attributes)
diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/rpn/transformer.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/rpn/transformer.py
deleted file mode 100644
index 72e108a1a2bf628ced2161c0a6d5a1b28e654bcd..0000000000000000000000000000000000000000
--- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/rpn/transformer.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import torch
-import torch.nn.functional as F
-from torch import nn, Tensor
-
-import copy
-from typing import Optional, List
-
-
-def _get_clones(module, N):
-    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
-
-
-def _get_activation_fn(activation):
-    """Return an activation function given a string"""
-    if activation == "relu":
-        return F.relu
-    if activation == "gelu":
-        return F.gelu
-    if activation == "glu":
-        return F.glu
-    raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
-
-
-class TransformerEncoderLayer(nn.Module):
-    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
-                 activation="relu", normalize_before=False):
-        super(TransformerEncoderLayer, self).__init__()
-        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
-        # Implementation of Feedforward model
-        self.linear1 = nn.Linear(d_model, dim_feedforward)
-        self.dropout = nn.Dropout(dropout)
-        self.linear2 = nn.Linear(dim_feedforward, d_model)
-
-        self.norm1 = nn.LayerNorm(d_model)
-        self.norm2 = nn.LayerNorm(d_model)
-        self.dropout1 = nn.Dropout(dropout)
-        self.dropout2 = nn.Dropout(dropout)
-
-        self.activation = _get_activation_fn(activation)
-        self.normalize_before = normalize_before
-
-    def forward(self, src,
-                src_mask: Optional[Tensor] = None,
-                src_key_padding_mask: Optional[Tensor] = None):
-        src2 = self.self_attn(src, src, src, attn_mask=src_mask,
-                              key_padding_mask=src_key_padding_mask)[0]
-        src = src + self.dropout1(src2)
-        src = self.norm1(src)
-        src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
-        src = src + self.dropout2(src2)
-        src = self.norm2(src)
-        return src
diff --git a/spaces/herder/DragDiffusion/app.py b/spaces/herder/DragDiffusion/app.py
deleted file mode 100644
index a699bc5b3c2e987102ca93e0ee28d601e0a93d02..0000000000000000000000000000000000000000
--- a/spaces/herder/DragDiffusion/app.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import gradio as gr
-
-def greet(name):
-    return "Hello " + name + "!!"
-
-iface = gr.Interface(fn=greet, inputs="text", outputs="text")
-iface.launch()
\ No newline at end of file
diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_DA3.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_DA3.py
deleted file mode 100644
index cf282e5c9a0c4e840d34517d2e95c3821ea7f535..0000000000000000000000000000000000000000
--- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_DA3.py
+++ /dev/null
@@ -1,190 +0,0 @@
-#    Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-import numpy as np
-import torch
-from batchgenerators.utilities.file_and_folder_operations import join
-from nnunet.network_architecture.generic_UNet import Generic_UNet
-from nnunet.network_architecture.initialization import InitWeights_He
-from nnunet.network_architecture.neural_network import SegmentationNetwork
-from nnunet.training.data_augmentation.data_augmentation_insaneDA2 import get_insaneDA_augmentation2
-from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \
-    default_2D_augmentation_params, get_patch_size
-from nnunet.training.dataloading.dataset_loading import unpack_dataset
-from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2
-from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2, maybe_mkdir_p
-from nnunet.utilities.nd_softmax import softmax_helper
-from torch import nn
-
-
-class nnUNetTrainerV2_DA3(nnUNetTrainerV2):
-    def setup_DA_params(self):
-        super().setup_DA_params()
-        self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
-            np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1]
-
-        if self.threeD:
-            self.data_aug_params = default_3D_augmentation_params
-            self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
-            self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
-            self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
-            if self.do_dummy_2D_aug:
-                self.data_aug_params["dummy_2D"] = True
-                self.print_to_log_file("Using dummy2d data augmentation")
-                self.data_aug_params["elastic_deform_alpha"] = \
-                    default_2D_augmentation_params["elastic_deform_alpha"]
-                self.data_aug_params["elastic_deform_sigma"] = \
-                    default_2D_augmentation_params["elastic_deform_sigma"]
-                self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
-        else:
-            self.do_dummy_2D_aug = False
-            if max(self.patch_size) / min(self.patch_size) > 1.5:
-                default_2D_augmentation_params['rotation_x'] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi)
-            self.data_aug_params = default_2D_augmentation_params
-        self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
-
-        if self.do_dummy_2D_aug:
-            self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
-                                                             self.data_aug_params['rotation_x'],
-                                                             self.data_aug_params['rotation_y'],
-                                                             self.data_aug_params['rotation_z'],
-                                                             self.data_aug_params['scale_range'])
-            self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
-        else:
-            self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
-                                                             self.data_aug_params['rotation_y'],
-                                                             self.data_aug_params['rotation_z'],
-                                                             self.data_aug_params['scale_range'])
-
-        self.data_aug_params['selected_seg_channels'] = [0]
-        self.data_aug_params['patch_size_for_spatialtransform'] = self.patch_size
-
-        self.data_aug_params["p_rot"] = 0.3
-
-        self.data_aug_params["scale_range"] = (0.65, 1.6)
-        self.data_aug_params["p_scale"] = 0.3
-        self.data_aug_params["independent_scale_factor_for_each_axis"] = True
-        self.data_aug_params["p_independent_scale_per_axis"] = 0.3
-
-        self.data_aug_params["do_elastic"] = True
-        self.data_aug_params["p_eldef"] = 0.3
-        self.data_aug_params["eldef_deformation_scale"] = (0, 0.25)
-
-        self.data_aug_params["do_additive_brightness"] = True
-        self.data_aug_params["additive_brightness_mu"] = 0
-        self.data_aug_params["additive_brightness_sigma"] = 0.2
-        self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
-        self.data_aug_params["additive_brightness_p_per_channel"] = 1
-
-        self.data_aug_params['gamma_range'] = (0.5, 1.6)
-
-        self.data_aug_params['num_cached_per_thread'] = 4
-
-    def initialize(self, training=True, force_load_plans=False):
-        if not self.was_initialized:
-            maybe_mkdir_p(self.output_folder)
-
-            if force_load_plans or (self.plans is None):
-                self.load_plans_file()
-
-            self.process_plans(self.plans)
-
-            self.setup_DA_params()
-
-            ################# Here we wrap the loss for deep supervision ############
-            # we need to know the number of outputs of the network
-            net_numpool = len(self.net_num_pool_op_kernel_sizes)
-
-            # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
-            # this gives higher resolution outputs more weight in the loss
-            weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
-
-            # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
-            mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
-            weights[~mask] = 0
-            weights = weights / weights.sum()
-            self.ds_loss_weights = weights
-            # now wrap the loss
-            self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
-            ################# END ###################
-
-            self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
-                                                      "_stage%d" % self.stage)
-            if training:
-                self.dl_tr, self.dl_val = self.get_basic_generators()
-                if self.unpack_data:
-                    print("unpacking dataset")
-                    unpack_dataset(self.folder_with_preprocessed_data)
-                    print("done")
-                else:
-                    print(
-                        "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
-                        "will wait all winter for your model to finish!")
-
-                self.tr_gen, self.val_gen = get_insaneDA_augmentation2(
-                    self.dl_tr, self.dl_val,
-                    self.data_aug_params[
-                        'patch_size_for_spatialtransform'],
-                    self.data_aug_params,
-                    deep_supervision_scales=self.deep_supervision_scales,
-                    pin_memory=self.pin_memory
-                )
-                self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
-                                       also_print_to_console=False)
-                self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
-                                       also_print_to_console=False)
-            else:
-                pass
-
-            self.initialize_network()
-            self.initialize_optimizer_and_scheduler()
-
-            assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
-        else:
-            self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
-        self.was_initialized = True
-
-    """def run_training(self):
-        from batchviewer import view_batch
-
-        a = next(self.tr_gen)
-        view_batch(a['data'][:, 0], width=512, height=512)
-
-        import IPython;IPython.embed()"""
-
-
-class nnUNetTrainerV2_DA3_BN(nnUNetTrainerV2_DA3):
-    def initialize_network(self):
-        if self.threeD:
-            conv_op = nn.Conv3d
-            dropout_op = nn.Dropout3d
-            norm_op = nn.BatchNorm3d
-
-        else:
-            conv_op = nn.Conv2d
-            dropout_op = nn.Dropout2d
-            norm_op = nn.BatchNorm2d
-
-        norm_op_kwargs = {'eps': 1e-5, 'affine': True}
-        dropout_op_kwargs = {'p': 0, 'inplace': True}
-        net_nonlin = nn.LeakyReLU
-        net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
-        self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
-                                    len(self.net_num_pool_op_kernel_sizes),
-                                    self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
-                                    dropout_op_kwargs,
-                                    net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
-                                    self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
-        if torch.cuda.is_available():
-            self.network.cuda()
-        self.network.inference_apply_nonlin = softmax_helper
diff --git a/spaces/hojumoney/WarriorMama777-OrangeMixs/README.md b/spaces/hojumoney/WarriorMama777-OrangeMixs/README.md
deleted file mode 100644
index 8c726e28a0f7c425b1aaac97b59ad4f8c568d186..0000000000000000000000000000000000000000
--- a/spaces/hojumoney/WarriorMama777-OrangeMixs/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: WarriorMama777 OrangeMixs
-emoji: 👀
-colorFrom: blue
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.20.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/hrdtbs/rvc-mochinoa/app.py b/spaces/hrdtbs/rvc-mochinoa/app.py
deleted file mode 100644
index afa4d3bff77cda1983cf6e1b7fc808bcc9414af4..0000000000000000000000000000000000000000
--- a/spaces/hrdtbs/rvc-mochinoa/app.py
+++ /dev/null
@@ -1,183 +0,0 @@
-import os
-import json
-import traceback
-import logging
-import gradio as gr
-import numpy as np
-import librosa
-import torch
-import asyncio
-import edge_tts
-from datetime import datetime
-from fairseq import checkpoint_utils
-from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
-from vc_infer_pipeline import VC
-from config import Config
-config = Config()
-logging.getLogger("numba").setLevel(logging.WARNING)
-limitation = os.getenv("SYSTEM") == "spaces"  # limit audio length in huggingface spaces
-
-def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index):
-    def vc_fn(
-        input_audio,
-        f0_up_key,
-        f0_method,
-        index_rate,
-        tts_mode,
-        tts_text,
-        tts_voice
-    ):
-        try:
-            if tts_mode:
-                if len(tts_text) > 100 and limitation:
-                    return "Text is too long", None
-                if tts_text is None or tts_voice is None:
-                    return "You need to enter text and select a voice", None
-                asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
-                audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
-            else:
-                if input_audio is None:
-                    return "You need to upload an audio", None
-                sampling_rate, audio = input_audio
-                duration = audio.shape[0] / sampling_rate
-                if duration > 20 and limitation:
-                    return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None
-                audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
-                if len(audio.shape) > 1:
-                    audio = librosa.to_mono(audio.transpose(1, 0))
-                if sampling_rate != 16000:
-                    audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
-            times = [0, 0, 0]
-            f0_up_key = int(f0_up_key)
-            audio_opt = vc.pipeline(
-                hubert_model,
-                net_g,
-                0,
-                audio,
-                times,
-                f0_up_key,
-                f0_method,
-                file_index,
-                index_rate,
-                if_f0,
-                f0_file=None,
-            )
-            print(
-                f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
-            )
-            return (tgt_sr, audio_opt)
-        except BaseException:
-            info = traceback.format_exc()
-            print(info)
-            return info, (None, None)
-    return vc_fn
-
-def load_hubert():
-    global hubert_model
-    models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
-        ["hubert_base.pt"],
-        suffix="",
-    )
-    hubert_model = models[0]
-    hubert_model = hubert_model.to(config.device)
-    if config.is_half:
-        hubert_model = hubert_model.half()
-    else:
-        hubert_model = hubert_model.float()
-    hubert_model.eval()
-
-def change_to_tts_mode(tts_mode):
-    if tts_mode:
-        return gr.Audio.update(visible=False), gr.Checkbox.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True)
-    else:
-        return gr.Audio.update(visible=True), gr.Checkbox.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
-
-def change_to_microphone_mode(microphone_mode):
-    if microphone_mode:
-        return gr.Audio.update(source="microphone")
-    else:
-        return gr.Audio.update(source="upload")
-
-if __name__ == '__main__':
-    load_hubert()
-    tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
-    voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
-    models = []
-    with open("weights/model_info.json", "r", encoding="utf-8") as f:
-        models_info = json.load(f)
-    for model_name, info in models_info.items():
-        if not info['enable']:
-            continue
-        model_title = info['title']
-        model_author = info.get("author", None)
-        model_cover = f"weights/{model_name}/{info['cover']}"
-        model_index = f"weights/{model_name}/{info['feature_retrieval_library']}"
-        cpt = torch.load(f"weights/{model_name}/{model_name}.pth", map_location="cpu")
-        tgt_sr = cpt["config"][-1]
-        cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]  # n_spk
-        if_f0 = cpt.get("f0", 1)
-        if if_f0 == 1:
-            net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
-        else:
-            net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
-        del net_g.enc_q
-        print(net_g.load_state_dict(cpt["weight"], strict=False))
-        net_g.eval().to(config.device)
-        if config.is_half:
-            net_g = net_g.half()
-        else:
-            net_g = net_g.float()
-        vc = VC(tgt_sr, config)
-        print(f"Model loaded: {model_name}")
-        models.append((model_name, model_title, model_author, model_cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, model_index)))
-    with gr.Blocks() as app:
-        gr.Markdown(
-            "# <center> RVC Mochinoa\n"
-            "望月のあ Mochizuki Noa\n\n"
-            "YouTube: https://www.youtube.com/@MochizukiNoa \n\n"
-            "Twitter: https://twitter.com/_noach \n\n"
-            "このプロジェクトに含まれるRVCモデル `mochinoa`及びこのモデルを利用して作成された音声を利用する場合、`mochinoa`モデルの[ライセンス](https://huggingface.co/spaces/hrdtbs/rvc-mochinoa/blob/main/weights/mochinoa/LICENCE.md)に従ってください。このモデルを除き、このプロジェクトはMITライセンスで公開されています。\n\n"            
-        )
-
-        with gr.Tabs():
-            for (name, title, author, cover, vc_fn) in models:
-                with gr.TabItem(name):
-                    with gr.Row():
-                        gr.Markdown(
-                            '<div align="center">'
-                            f'<div>{title}</div>\n'+
-                            (f'<div>Model author: {author}</div>' if author else "")+
-                            (f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else "")+
-                            '</div>'
-                        )
-                    with gr.Row():
-                        with gr.Column():
-                            vc_input = gr.Audio(source="upload", label="Input audio"+' (less than 20 seconds)' if limitation else '')
-                            vc_microphone_mode = gr.Checkbox(label="use microphone", value=False)
-                            
-                            vc_transpose = gr.Number(label="Transpose", value=0, info='Type "12" to change from male to female voice. Type "-12" to change female to male voice')
-                            vc_f0method = gr.Radio(
-                                label="Pitch extraction algorithm",
-                                choices=["pm", "harvest"],
-                                value="pm",
-                                interactive=True,
-                                info="PM is fast but Harvest is better for low frequencies. (Default: PM)"
-                            )
-                            vc_index_ratio = gr.Slider(
-                                minimum=0,
-                                maximum=1,
-                                label="Retrieval feature ratio",
-                                value=0.6,
-                                interactive=True,
-                                info="(Default: 0.6)"
-                            )
-                            tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False)
-                            tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text")
-                            tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female")
-                            vc_submit = gr.Button("Generate", variant="primary")
-                        with gr.Column():
-                            vc_output = gr.Audio(label="Output Audio")
-                vc_submit.click(vc_fn, [vc_input, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output])
-                tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, vc_microphone_mode, tts_text, tts_voice])
-                vc_microphone_mode.change(change_to_microphone_mode, [vc_microphone_mode], [vc_input]) 
-        app.queue(concurrency_count=1, max_size=20, api_open=config.api).launch(share=config.colab)
\ No newline at end of file
diff --git a/spaces/hstrejoluna/dreambooth-training/convertosd.py b/spaces/hstrejoluna/dreambooth-training/convertosd.py
deleted file mode 100644
index 1211d34edf018b7c402a765c5a7ecdb684cc28e3..0000000000000000000000000000000000000000
--- a/spaces/hstrejoluna/dreambooth-training/convertosd.py
+++ /dev/null
@@ -1,302 +0,0 @@
-# Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint.
-# *Only* converts the UNet, VAE, and Text Encoder.
-# Does not convert optimizer state or any other thing.
-
-import argparse
-import os.path as osp
-import re
-
-import torch
-import gc
-
-# =================#
-# UNet Conversion #
-# =================#
-
-unet_conversion_map = [
-    # (stable-diffusion, HF Diffusers)
-    ("time_embed.0.weight", "time_embedding.linear_1.weight"),
-    ("time_embed.0.bias", "time_embedding.linear_1.bias"),
-    ("time_embed.2.weight", "time_embedding.linear_2.weight"),
-    ("time_embed.2.bias", "time_embedding.linear_2.bias"),
-    ("input_blocks.0.0.weight", "conv_in.weight"),
-    ("input_blocks.0.0.bias", "conv_in.bias"),
-    ("out.0.weight", "conv_norm_out.weight"),
-    ("out.0.bias", "conv_norm_out.bias"),
-    ("out.2.weight", "conv_out.weight"),
-    ("out.2.bias", "conv_out.bias"),
-]
-
-unet_conversion_map_resnet = [
-    # (stable-diffusion, HF Diffusers)
-    ("in_layers.0", "norm1"),
-    ("in_layers.2", "conv1"),
-    ("out_layers.0", "norm2"),
-    ("out_layers.3", "conv2"),
-    ("emb_layers.1", "time_emb_proj"),
-    ("skip_connection", "conv_shortcut"),
-]
-
-unet_conversion_map_layer = []
-# hardcoded number of downblocks and resnets/attentions...
-# would need smarter logic for other networks.
-for i in range(4):
-    # loop over downblocks/upblocks
-
-    for j in range(2):
-        # loop over resnets/attentions for downblocks
-        hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
-        sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
-        unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
-
-        if i < 3:
-            # no attention layers in down_blocks.3
-            hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
-            sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
-            unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
-
-    for j in range(3):
-        # loop over resnets/attentions for upblocks
-        hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
-        sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
-        unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
-
-        if i > 0:
-            # no attention layers in up_blocks.0
-            hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
-            sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
-            unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
-
-    if i < 3:
-        # no downsample in down_blocks.3
-        hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
-        sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
-        unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
-
-        # no upsample in up_blocks.3
-        hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
-        sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
-        unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
-
-hf_mid_atn_prefix = "mid_block.attentions.0."
-sd_mid_atn_prefix = "middle_block.1."
-unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
-
-for j in range(2):
-    hf_mid_res_prefix = f"mid_block.resnets.{j}."
-    sd_mid_res_prefix = f"middle_block.{2*j}."
-    unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
-
-
-def convert_unet_state_dict(unet_state_dict):
-    # buyer beware: this is a *brittle* function,
-    # and correct output requires that all of these pieces interact in
-    # the exact order in which I have arranged them.
-    mapping = {k: k for k in unet_state_dict.keys()}
-    for sd_name, hf_name in unet_conversion_map:
-        mapping[hf_name] = sd_name
-    for k, v in mapping.items():
-        if "resnets" in k:
-            for sd_part, hf_part in unet_conversion_map_resnet:
-                v = v.replace(hf_part, sd_part)
-            mapping[k] = v
-    for k, v in mapping.items():
-        for sd_part, hf_part in unet_conversion_map_layer:
-            v = v.replace(hf_part, sd_part)
-        mapping[k] = v
-    new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
-    return new_state_dict
-
-
-# ================#
-# VAE Conversion #
-# ================#
-
-vae_conversion_map = [
-    # (stable-diffusion, HF Diffusers)
-    ("nin_shortcut", "conv_shortcut"),
-    ("norm_out", "conv_norm_out"),
-    ("mid.attn_1.", "mid_block.attentions.0."),
-]
-
-for i in range(4):
-    # down_blocks have two resnets
-    for j in range(2):
-        hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}."
-        sd_down_prefix = f"encoder.down.{i}.block.{j}."
-        vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
-
-    if i < 3:
-        hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0."
-        sd_downsample_prefix = f"down.{i}.downsample."
-        vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
-
-        hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
-        sd_upsample_prefix = f"up.{3-i}.upsample."
-        vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
-
-    # up_blocks have three resnets
-    # also, up blocks in hf are numbered in reverse from sd
-    for j in range(3):
-        hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}."
-        sd_up_prefix = f"decoder.up.{3-i}.block.{j}."
-        vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
-
-# this part accounts for mid blocks in both the encoder and the decoder
-for i in range(2):
-    hf_mid_res_prefix = f"mid_block.resnets.{i}."
-    sd_mid_res_prefix = f"mid.block_{i+1}."
-    vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
-
-
-vae_conversion_map_attn = [
-    # (stable-diffusion, HF Diffusers)
-    ("norm.", "group_norm."),
-    ("q.", "query."),
-    ("k.", "key."),
-    ("v.", "value."),
-    ("proj_out.", "proj_attn."),
-]
-
-
-def reshape_weight_for_sd(w):
-    # convert HF linear weights to SD conv2d weights
-    return w.reshape(*w.shape, 1, 1)
-
-
-def convert_vae_state_dict(vae_state_dict):
-    mapping = {k: k for k in vae_state_dict.keys()}
-    for k, v in mapping.items():
-        for sd_part, hf_part in vae_conversion_map:
-            v = v.replace(hf_part, sd_part)
-        mapping[k] = v
-    for k, v in mapping.items():
-        if "attentions" in k:
-            for sd_part, hf_part in vae_conversion_map_attn:
-                v = v.replace(hf_part, sd_part)
-            mapping[k] = v
-    new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()}
-    weights_to_convert = ["q", "k", "v", "proj_out"]
-    print("Converting to CKPT ...") 
-    for k, v in new_state_dict.items():
-        for weight_name in weights_to_convert:
-            if f"mid.attn_1.{weight_name}.weight" in k:
-                print(f"Reshaping {k} for SD format")
-                new_state_dict[k] = reshape_weight_for_sd(v)
-    return new_state_dict
-
-
-# =========================#
-# Text Encoder Conversion #
-# =========================#
-
-
-textenc_conversion_lst = [
-    # (stable-diffusion, HF Diffusers)
-    ("resblocks.", "text_model.encoder.layers."),
-    ("ln_1", "layer_norm1"),
-    ("ln_2", "layer_norm2"),
-    (".c_fc.", ".fc1."),
-    (".c_proj.", ".fc2."),
-    (".attn", ".self_attn"),
-    ("ln_final.", "transformer.text_model.final_layer_norm."),
-    ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
-    ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
-]
-protected = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
-textenc_pattern = re.compile("|".join(protected.keys()))
-
-# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
-code2idx = {"q": 0, "k": 1, "v": 2}
-
-
-def convert_text_enc_state_dict_v20(text_enc_dict):
-    new_state_dict = {}
-    capture_qkv_weight = {}
-    capture_qkv_bias = {}
-    for k, v in text_enc_dict.items():
-        if (
-            k.endswith(".self_attn.q_proj.weight")
-            or k.endswith(".self_attn.k_proj.weight")
-            or k.endswith(".self_attn.v_proj.weight")
-        ):
-            k_pre = k[: -len(".q_proj.weight")]
-            k_code = k[-len("q_proj.weight")]
-            if k_pre not in capture_qkv_weight:
-                capture_qkv_weight[k_pre] = [None, None, None]
-            capture_qkv_weight[k_pre][code2idx[k_code]] = v
-            continue
-
-        if (
-            k.endswith(".self_attn.q_proj.bias")
-            or k.endswith(".self_attn.k_proj.bias")
-            or k.endswith(".self_attn.v_proj.bias")
-        ):
-            k_pre = k[: -len(".q_proj.bias")]
-            k_code = k[-len("q_proj.bias")]
-            if k_pre not in capture_qkv_bias:
-                capture_qkv_bias[k_pre] = [None, None, None]
-            capture_qkv_bias[k_pre][code2idx[k_code]] = v
-            continue
-
-        relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k)
-        new_state_dict[relabelled_key] = v
-
-    for k_pre, tensors in capture_qkv_weight.items():
-        if None in tensors:
-            raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
-        relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
-        new_state_dict[relabelled_key + ".in_proj_weight"] = torch.cat(tensors)
-
-    for k_pre, tensors in capture_qkv_bias.items():
-        if None in tensors:
-            raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
-        relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
-        new_state_dict[relabelled_key + ".in_proj_bias"] = torch.cat(tensors)
-
-    return new_state_dict
-
-
-def convert_text_enc_state_dict(text_enc_dict):
-    return text_enc_dict
-
-
-def convert(model_path, checkpoint_path):    
-    unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin")
-    vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin")
-    text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin")
-
-    # Convert the UNet model
-    unet_state_dict = torch.load(unet_path, map_location="cpu")
-    unet_state_dict = convert_unet_state_dict(unet_state_dict)
-    unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
-
-    # Convert the VAE model
-    vae_state_dict = torch.load(vae_path, map_location="cpu")
-    vae_state_dict = convert_vae_state_dict(vae_state_dict)
-    vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
-
-    # Convert the text encoder model
-    text_enc_dict = torch.load(text_enc_path, map_location="cpu")
-
-    # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
-    is_v20_model = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
-
-    if is_v20_model:
-        # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
-        text_enc_dict = {"transformer." + k: v for k, v in text_enc_dict.items()}
-        text_enc_dict = convert_text_enc_state_dict_v20(text_enc_dict)
-        text_enc_dict = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
-    else:
-        text_enc_dict = convert_text_enc_state_dict(text_enc_dict)
-        text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
-
-    # Put together new checkpoint
-    state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
-    state_dict = {k: v.half() for k, v in state_dict.items()}
-    state_dict = {"state_dict": state_dict}
-    torch.save(state_dict, checkpoint_path)
-    del state_dict, text_enc_dict, vae_state_dict, unet_state_dict
-    torch.cuda.empty_cache()
-    gc.collect()
-    
\ No newline at end of file
diff --git a/spaces/hussain-shk/IndiSent/subword-nmt/subword_nmt/bpe_toy.py b/spaces/hussain-shk/IndiSent/subword-nmt/subword_nmt/bpe_toy.py
deleted file mode 100644
index 0421b255861cb56eb40bf58a8225807cc396e968..0000000000000000000000000000000000000000
--- a/spaces/hussain-shk/IndiSent/subword-nmt/subword_nmt/bpe_toy.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Author: Rico Sennrich
-
-"""Use byte pair encoding (BPE) to learn a variable-length encoding of the vocabulary in a text.
-Unlike the original BPE, it does not compress the plain text, but can be used to reduce the vocabulary
-of a text to a configurable number of symbols, with only a small increase in the number of tokens.
-This is an (inefficient) toy implementation that shows the algorithm. For processing large datasets,
-indexing and incremental updates can be used to speed up the implementation (see learn_bpe.py).
-
-Reference:
-Rico Sennrich, Barry Haddow and Alexandra Birch (2016). Neural Machine Translation of Rare Words with Subword Units.
-Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany.
-"""
-
-
-import re
-import sys
-import collections
-
-def get_stats(vocab):
-  pairs = collections.defaultdict(int)
-  for word, freq in vocab.items():
-    symbols = word.split()
-    for i in range(len(symbols)-1):
-      pairs[symbols[i],symbols[i+1]] += freq
-  return pairs
-
-def merge_vocab(pair, v_in):
-  v_out = {}
-  bigram_pattern = re.escape(' '.join(pair))
-  p = re.compile(r'(?<!\S)' + bigram_pattern + r'(?!\S)')
-  for word in v_in:
-    w_out = p.sub(''.join(pair), word)
-    v_out[w_out] = v_in[word]
-  return v_out
-
-vocab = {'l o w</w>' : 5, 'l o w e r</w>' : 2,
-         'n e w e s t</w>' : 6, 'w i d e s t</w>' : 3}
-num_merges = 15
-for i in range(num_merges):
-  pairs = get_stats(vocab)
-  try:
-    best = max(pairs, key=pairs.get)
-  except ValueError:
-    break
-  if pairs[best] < 2:
-     sys.stderr.write('no pair has frequency > 1. Stopping\n')
-     break
-  vocab = merge_vocab(best, vocab)
-  print(best)
diff --git a/spaces/hussain-shk/IndiSent/subword-nmt/subword_nmt/subword_nmt.py b/spaces/hussain-shk/IndiSent/subword-nmt/subword_nmt/subword_nmt.py
deleted file mode 100644
index 29104f4d8029524a80d6fa649b69a8acec0b8abc..0000000000000000000000000000000000000000
--- a/spaces/hussain-shk/IndiSent/subword-nmt/subword_nmt/subword_nmt.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import io
-import sys
-import codecs
-import argparse
-
-from .learn_bpe import learn_bpe
-from .apply_bpe import BPE, read_vocabulary
-from .get_vocab import get_vocab
-from .learn_joint_bpe_and_vocab import learn_joint_bpe_and_vocab
-
-from .learn_bpe import create_parser as create_learn_bpe_parser
-from .apply_bpe import create_parser as create_apply_bpe_parser
-from .get_vocab import create_parser as create_get_vocab_parser
-from .learn_joint_bpe_and_vocab import create_parser as create_learn_joint_bpe_and_vocab_parser
-
-# hack for python2/3 compatibility
-argparse.open = io.open
-
-def main():
-    parser = argparse.ArgumentParser(
-        formatter_class=argparse.RawTextHelpFormatter,
-        description="subword-nmt: unsupervised word segmentation for neural machine translation and text generation ")
-    subparsers = parser.add_subparsers(dest='command',
-                                       help="""command to run. Run one of the commands with '-h' for more info.
-
-learn-bpe: learn BPE merge operations on input text.
-apply-bpe: apply given BPE operations to input text.
-get-vocab: extract vocabulary and word frequencies from input text.
-learn-joint-bpe-and-vocab: executes recommended workflow for joint BPE.""")
-
-    learn_bpe_parser = create_learn_bpe_parser(subparsers)
-    apply_bpe_parser = create_apply_bpe_parser(subparsers)
-    get_vocab_parser = create_get_vocab_parser(subparsers)
-    learn_joint_bpe_and_vocab_parser = create_learn_joint_bpe_and_vocab_parser(subparsers)
-
-    args = parser.parse_args()
-
-    if args.command == 'learn-bpe':
-        # read/write files as UTF-8
-        if args.input.name != '<stdin>':
-            args.input = codecs.open(args.input.name, encoding='utf-8')
-        if args.output.name != '<stdout>':
-            args.output = codecs.open(args.output.name, 'w', encoding='utf-8')
-
-        learn_bpe(args.input, args.output, args.symbols, args.min_frequency, args.verbose, 
-                  is_dict=args.dict_input, total_symbols=args.total_symbols)
-    elif args.command == 'apply-bpe':
-        # read/write files as UTF-8
-        args.codes = codecs.open(args.codes.name, encoding='utf-8')
-        if args.input.name != '<stdin>':
-            args.input = codecs.open(args.input.name, encoding='utf-8')
-        if args.output.name != '<stdout>':
-            args.output = codecs.open(args.output.name, 'w', encoding='utf-8')
-        if args.vocabulary:
-            args.vocabulary = codecs.open(args.vocabulary.name, encoding='utf-8')
-
-        if args.vocabulary:
-            vocabulary = read_vocabulary(args.vocabulary, args.vocabulary_threshold)
-        else:
-            vocabulary = None
-
-        if sys.version_info < (3, 0):
-            args.separator = args.separator.decode('UTF-8')
-            if args.glossaries:
-                args.glossaries = [g.decode('UTF-8') for g in args.glossaries]
-
-        bpe = BPE(args.codes, args.merges, args.separator, vocabulary, args.glossaries)
-
-        for line in args.input:
-            args.output.write(bpe.process_line(line, args.dropout))
-
-    elif args.command == 'get-vocab':
-        if args.input.name != '<stdin>':
-            args.input = codecs.open(args.input.name, encoding='utf-8')
-        if args.output.name != '<stdout>':
-            args.output = codecs.open(args.output.name, 'w', encoding='utf-8')
-        get_vocab(args.input, args.output)
-    elif args.command == 'learn-joint-bpe-and-vocab':
-        learn_joint_bpe_and_vocab(args)
-        if sys.version_info < (3, 0):
-            args.separator = args.separator.decode('UTF-8')
-    else:
-        raise Exception('Invalid command provided')
-
-
-# python 2/3 compatibility
-if sys.version_info < (3, 0):
-    sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
-    sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
-    sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
-else:
-    sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer)
-    sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer)
-    sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer)
diff --git a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf42m_pfc03_32gpu_r100.py b/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf42m_pfc03_32gpu_r100.py
deleted file mode 100644
index adf21c97a8c7c0568d0783432b4526ba78138926..0000000000000000000000000000000000000000
--- a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf42m_pfc03_32gpu_r100.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from easydict import EasyDict as edict
-
-# make training faster
-# our RAM is 256G
-# mount -t tmpfs -o size=140G  tmpfs /train_tmp
-
-config = edict()
-config.margin_list = (1.0, 0.0, 0.4)
-config.network = "r100"
-config.resume = False
-config.output = None
-config.embedding_size = 512
-config.sample_rate = 0.3
-config.fp16 = True
-config.momentum = 0.9
-config.weight_decay = 5e-4
-config.batch_size = 128
-config.lr = 0.4
-config.verbose = 2000
-config.dali = False
-
-config.rec = "/train_tmp/WebFace42M"
-config.num_classes = 2059906
-config.num_image = 42474557
-config.num_epoch = 20
-config.warmup_epoch = config.num_epoch // 10
-config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
diff --git a/spaces/hzy123/bingo/src/lib/bots/bing/tts.ts b/spaces/hzy123/bingo/src/lib/bots/bing/tts.ts
deleted file mode 100644
index cd10b7d1d7581bf9cf46ff6755fcca550c558c9b..0000000000000000000000000000000000000000
--- a/spaces/hzy123/bingo/src/lib/bots/bing/tts.ts
+++ /dev/null
@@ -1,82 +0,0 @@
-import { sleep } from './utils'
-
-const synth = window.speechSynthesis
-
-export class TTS {
-  currentText = ''
-  speakText = ''
-  private controller = new AbortController()
-  speaking = false
-  get isSpeaking() {
-    return this.speaking
-  }
-  finished = false
-  constructor() {}
-  abort = () => {
-    this.controller.abort()
-  }
-
-  reset = () => {
-    this.speaking = false
-    this.finished = true
-    this.currentText = ''
-    this.speakText = ''
-    this.abort()
-  }
-
-  speak = (text: string) => {
-    if (!synth || text?.trim()?.length < 2) {
-      return
-    }
-    this.currentText = text.replace(/[^\u4e00-\u9fa5_a-zA-Z0-9,。?,:;\.,:]+/g, '')
-    this.finished = false
-    this.loop()
-  }
-
-  private async doSpeek() {
-    return new Promise((resolve) => {
-      const endIndex = this.finished ? this.currentText.length :
-        Math.max(
-          this.currentText.lastIndexOf('。'),
-          this.currentText.lastIndexOf(';'),
-          this.currentText.lastIndexOf('、'),
-          this.currentText.lastIndexOf('?'),
-          this.currentText.lastIndexOf('\n')
-        )
-      const startIndex = this.speakText.length ? Math.max(0, this.currentText.lastIndexOf(this.speakText) + this.speakText.length) : 0
-
-      if (startIndex >= endIndex) {
-        return resolve(true)
-      }
-      const text = this.currentText.slice(startIndex, endIndex)
-      this.speakText = text
-      const utterThis = new SpeechSynthesisUtterance(text)
-      this.controller.signal.onabort = () => {
-        synth.cancel()
-        this.finished = true
-        resolve(false)
-      }
-
-      utterThis.onend = function (event) {
-        resolve(true)
-      }
-
-      utterThis.onerror = function (event) {
-        resolve(false)
-      }
-
-      const voice = synth.getVoices().find(v => v.name.includes('Microsoft Yunxi Online')) ?? null
-      utterThis.voice = voice
-      synth.speak(utterThis)
-    })
-  }
-
-  private async loop() {
-    if (this.speaking) return
-    this.speaking = true
-    while(!this.finished) {
-      await Promise.all([sleep(1000), this.doSpeek()])
-    }
-    this.speaking = false
-  }
-}
diff --git a/spaces/inamXcontru/PoeticTTS/Biosystem Bts 310 User Manual High Quality.md b/spaces/inamXcontru/PoeticTTS/Biosystem Bts 310 User Manual High Quality.md
deleted file mode 100644
index a11dca62e4781b02da40595d0c5d9e344d820b5e..0000000000000000000000000000000000000000
--- a/spaces/inamXcontru/PoeticTTS/Biosystem Bts 310 User Manual High Quality.md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>biosystem bts 310 user manual</h2><br /><p><b><b>Download Zip</b> &#127383; <a href="https://gohhs.com/2uz5rx">https://gohhs.com/2uz5rx</a></b></p><br /><br />
-<br />
-Hi, I have an old BTS 310 photometer, but I don't have the user manual. Please send me the file (.pdf) if possible. Thank you! 8a78ff9644<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Microcat Daihatsu Dongle Crack _HOT_ Free.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Microcat Daihatsu Dongle Crack _HOT_ Free.md
deleted file mode 100644
index 228b9d66abe134ff7240dbc36dab282f45c9a229..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Microcat Daihatsu Dongle Crack _HOT_ Free.md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>Microcat Daihatsu Dongle Crack Free</h2><br /><p><b><b>Download</b> >> <a href="https://urlin.us/2uExw6">https://urlin.us/2uExw6</a></b></p><br /><br />
-
-Cengage Physics Pdf Edius 7 Serial Number Keygen Free Download ... 13 autograss for 3ds max 2014 2 microcat daihatsu dongle crack ... 1fdad05405<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/inreVtussa/clothingai/Examples/Audiobro Lass [EXCLUSIVE] Full 2.0.1 Torrents.md b/spaces/inreVtussa/clothingai/Examples/Audiobro Lass [EXCLUSIVE] Full 2.0.1 Torrents.md
deleted file mode 100644
index 1f61fe45d6fdfda15bf079cfac96fb65b76819b5..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Audiobro Lass [EXCLUSIVE] Full 2.0.1 Torrents.md	
+++ /dev/null
@@ -1,8 +0,0 @@
-<h2>Audiobro Lass Full 2.0.1 Torrents</h2><br /><p><b><b>Download</b> &#9193; <a href="https://tiurll.com/2uCm7a">https://tiurll.com/2uCm7a</a></b></p><br /><br />
-<br />
-The information provided on this site is for general informational purposes only. Audiobro does not make any warranties or representations, expressed or implied, as to the accuracy, completeness, correctness, reliability, suitability or quality of any information on this website or any other information, products, or services in connection with this website. Audiobro does not warrant that this site or the use of any information obtained from this website, shall be uninterrupted or error-free. In no event shall Audiobro be liable for any direct, indirect, incidental, consequential, exemplary, special, punitive or other damages including, without limitation, the costs of procurement of substitute goods and services, loss of use, data, or profits, whether in contract, strict liability or tort, arising out of, or in connection with use of this website, any other websites linked to this site, or the browsing of this site, the downloading of any materials, or the use or operation of this website, whether caused by the negligence of, or any legal defects in the operation of, Audiobro or any third party websites. Because certain jurisdictions prohibit the exclusion or limitation of liability for consequential or incidental damages, the above limitations may not apply to you. This warranty gives you specific legal rights and you may also have other rights which vary from jurisdiction to jurisdiction. If you are unsatisfied with our limited warranty, or if you fail to comply with the warranty, you must contact Audiobro immediately for a return of the product in its original condition or a replacement of the product if defective. You should call us toll free at 1-800-427-0496.Differential effects of the amino-terminal region of human beta-defensin-2 on substrate binding and antimicrobial activity.
-
-Human beta-defensins (hBDs) are a group of evolutionarily related anti-microbial peptides consisting of 44-52 amino acid residues. The amino-terminal region of these peptides is essential for antimicrobial activity and the formation of alpha-helical conformations. Since previous data demonstrated that the amino-terminal region is important for the activity of some beta-defensins, it was of interest to investigate whether or not the amino-terminal sequence of human beta-defensin-2 (hBD-2) influences the substrate specificity of this peptide. In this study, we generated several analogs of hBD-2 with an amino-terminal region deletion (hBD-2(1-25 4fefd39f24<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/inreVtussa/clothingai/Examples/Autocom Cdp 2012.2 Keygen Download BETTER Free.md b/spaces/inreVtussa/clothingai/Examples/Autocom Cdp 2012.2 Keygen Download BETTER Free.md
deleted file mode 100644
index ea11ec7110076471c3949f317679e62be98c62c7..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Autocom Cdp 2012.2 Keygen Download BETTER Free.md	
+++ /dev/null
@@ -1,9 +0,0 @@
-<br />
-<p>Autocom 2013.3 keygen activator without limits. Hex2stuff 2013 autocom / delphi 2013 release 3 keygen activator. Free download Autocom 2013 r2 Delphi software: Autocom 2013.2 Delphi Software Activation Guide: Step 1: Setup software ( 2013.2installDelphi.exe ) Step 2: Select product: 'DS150E' (new vci) for autocom delphi VCI cars, if heavy duty please select product(DE150E heavy duty(new vci) ), don't change the install path! (win xp will install to-C:Program FilesDelphi DiagnosticsDS150E(new vci)) (win7 64bit will install to-C:Program Files (x86)Delphi DiagnosticsDS150E(new vci)) Step 3: Copy all file in patch folder replace to Ds150e(new vci) install Folder.</p>
-<h2>autocom cdp 2012.2 keygen download free</h2><br /><p><b><b>Download File</b> &#8230;&#8230;&#8230; <a href="https://tiurll.com/2uCiZb">https://tiurll.com/2uCiZb</a></b></p><br /><br />
-<p>If youre one of the millions of players that enjoy listening to online streaming services, youd probably think that the average quality when listening to YouTube Music was as good as it gets. However, in reality, the quality can be somewhat lacking in some areas. Luckily, there are options available to you. YouTUBE MUSIC COMES IN SERVER-SIDE DOWNLOADS YOUT </p>
-<p>I am totally confused if it comes with keygen what is there to activate I thought keygen was the activator and what do you use to extract the files or do they need to be extracted I ask because I am new to Delphi and Autocom. Do you also offer Teamviewer</p>
-<p>If youre one of the millions of players that enjoy listening to online streaming services, youd probably think that the average quality when listening to YouTube Music was as good as it gets. However, in reality, the quality can be somewhat lacking in some areas. However, you can buy a good quality car to increase listenability. For example, the new vci car is excellent, not,expensive, definitely will not complain about the very new situation. Or much. Or maybe only much. Or much. Or much. Or much. Or much. Or much. Or much. Or much. Or much. Or much... r2 autocom / delphi </p>
-<p></p> 899543212b<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/ivntl/MMS/uroman/lib/JSON/backportPP/Compat5006.pm b/spaces/ivntl/MMS/uroman/lib/JSON/backportPP/Compat5006.pm
deleted file mode 100644
index 7736fd8debcbb47cc38192798de6c3055222d473..0000000000000000000000000000000000000000
--- a/spaces/ivntl/MMS/uroman/lib/JSON/backportPP/Compat5006.pm
+++ /dev/null
@@ -1,173 +0,0 @@
-package # This is JSON::backportPP
-    JSON::backportPP56;
-
-use 5.006;
-use strict;
-
-my @properties;
-
-$JSON::PP56::VERSION = '1.08';
-
-BEGIN {
-
-    sub utf8::is_utf8 {
-        my $len =  length $_[0]; # char length
-        {
-            use bytes; #  byte length;
-            return $len != length $_[0]; # if !=, UTF8-flagged on.
-        }
-    }
-
-
-    sub utf8::upgrade {
-        ; # noop;
-    }
-
-
-    sub utf8::downgrade ($;$) {
-        return 1 unless ( utf8::is_utf8( $_[0] ) );
-
-        if ( _is_valid_utf8( $_[0] ) ) {
-            my $downgrade;
-            for my $c ( unpack( "U*", $_[0] ) ) {
-                if ( $c < 256 ) {
-                    $downgrade .= pack("C", $c);
-                }
-                else {
-                    $downgrade .= pack("U", $c);
-                }
-            }
-            $_[0] = $downgrade;
-            return 1;
-        }
-        else {
-            Carp::croak("Wide character in subroutine entry") unless ( $_[1] );
-            0;
-        }
-    }
-
-
-    sub utf8::encode ($) { # UTF8 flag off
-        if ( utf8::is_utf8( $_[0] ) ) {
-            $_[0] = pack( "C*", unpack( "C*", $_[0] ) );
-        }
-        else {
-            $_[0] = pack( "U*", unpack( "C*", $_[0] ) );
-            $_[0] = pack( "C*", unpack( "C*", $_[0] ) );
-        }
-    }
-
-
-    sub utf8::decode ($) { # UTF8 flag on
-        if ( _is_valid_utf8( $_[0] ) ) {
-            utf8::downgrade( $_[0] );
-            $_[0] = pack( "U*", unpack( "U*", $_[0] ) );
-        }
-    }
-
-
-    *JSON::PP::JSON_PP_encode_ascii      = \&_encode_ascii;
-    *JSON::PP::JSON_PP_encode_latin1     = \&_encode_latin1;
-    *JSON::PP::JSON_PP_decode_surrogates = \&JSON::PP::_decode_surrogates;
-    *JSON::PP::JSON_PP_decode_unicode    = \&JSON::PP::_decode_unicode;
-
-    unless ( defined &B::SVp_NOK ) { # missing in B module.
-        eval q{ sub B::SVp_NOK () { 0x02000000; } };
-    }
-
-}
-
-
-
-sub _encode_ascii {
-    join('',
-        map {
-            $_ <= 127 ?
-                chr($_) :
-            $_ <= 65535 ?
-                sprintf('\u%04x', $_) : sprintf('\u%x\u%x', JSON::PP::_encode_surrogates($_));
-        } _unpack_emu($_[0])
-    );
-}
-
-
-sub _encode_latin1 {
-    join('',
-        map {
-            $_ <= 255 ?
-                chr($_) :
-            $_ <= 65535 ?
-                sprintf('\u%04x', $_) : sprintf('\u%x\u%x', JSON::PP::_encode_surrogates($_));
-        } _unpack_emu($_[0])
-    );
-}
-
-
-sub _unpack_emu { # for Perl 5.6 unpack warnings
-    return   !utf8::is_utf8($_[0]) ? unpack('C*', $_[0]) 
-           : _is_valid_utf8($_[0]) ? unpack('U*', $_[0])
-           : unpack('C*', $_[0]);
-}
-
-
-sub _is_valid_utf8 {
-    my $str = $_[0];
-    my $is_utf8;
-
-    while ($str =~ /(?:
-          (
-             [\x00-\x7F]
-            |[\xC2-\xDF][\x80-\xBF]
-            |[\xE0][\xA0-\xBF][\x80-\xBF]
-            |[\xE1-\xEC][\x80-\xBF][\x80-\xBF]
-            |[\xED][\x80-\x9F][\x80-\xBF]
-            |[\xEE-\xEF][\x80-\xBF][\x80-\xBF]
-            |[\xF0][\x90-\xBF][\x80-\xBF][\x80-\xBF]
-            |[\xF1-\xF3][\x80-\xBF][\x80-\xBF][\x80-\xBF]
-            |[\xF4][\x80-\x8F][\x80-\xBF][\x80-\xBF]
-          )
-        | (.)
-    )/xg)
-    {
-        if (defined $1) {
-            $is_utf8 = 1 if (!defined $is_utf8);
-        }
-        else {
-            $is_utf8 = 0 if (!defined $is_utf8);
-            if ($is_utf8) { # eventually, not utf8
-                return;
-            }
-        }
-    }
-
-    return $is_utf8;
-}
-
-
-1;
-__END__
-
-=pod
-
-=head1 NAME
-
-JSON::PP56 - Helper module in using JSON::PP in Perl 5.6
-
-=head1 DESCRIPTION
-
-JSON::PP calls internally.
-
-=head1 AUTHOR
-
-Makamaka Hannyaharamitu, E<lt>makamaka[at]cpan.orgE<gt>
-
-
-=head1 COPYRIGHT AND LICENSE
-
-Copyright 2007-2012 by Makamaka Hannyaharamitu
-
-This library is free software; you can redistribute it and/or modify
-it under the same terms as Perl itself. 
-
-=cut
-
diff --git a/spaces/jackli888/stable-diffusion-webui/modules/ui_extra_networks_hypernets.py b/spaces/jackli888/stable-diffusion-webui/modules/ui_extra_networks_hypernets.py
deleted file mode 100644
index 5fe6516a71e7ca5203fdacec3f750494d5650efd..0000000000000000000000000000000000000000
--- a/spaces/jackli888/stable-diffusion-webui/modules/ui_extra_networks_hypernets.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import json
-import os
-
-from modules import shared, ui_extra_networks
-
-
-class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
-    def __init__(self):
-        super().__init__('Hypernetworks')
-
-    def refresh(self):
-        shared.reload_hypernetworks()
-
-    def list_items(self):
-        for name, path in shared.hypernetworks.items():
-            path, ext = os.path.splitext(path)
-            previews = [path + ".png", path + ".preview.png"]
-
-            preview = None
-            for file in previews:
-                if os.path.isfile(file):
-                    preview = self.link_preview(file)
-                    break
-
-            yield {
-                "name": name,
-                "filename": path,
-                "preview": preview,
-                "search_term": self.search_terms_from_path(path),
-                "prompt": json.dumps(f"<hypernet:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
-                "local_preview": path + ".png",
-            }
-
-    def allowed_directories_for_previews(self):
-        return [shared.cmd_opts.hypernetwork_dir]
-
diff --git a/spaces/jackyccl/segment-anything/app.py b/spaces/jackyccl/segment-anything/app.py
deleted file mode 100644
index c83604c0a2bb7be02af544574538fae0eb644acc..0000000000000000000000000000000000000000
--- a/spaces/jackyccl/segment-anything/app.py
+++ /dev/null
@@ -1,524 +0,0 @@
-import argparse
-import cv2
-import os
-from PIL import Image, ImageDraw, ImageFont, ImageOps
-import numpy as np
-from pathlib import Path
-import gradio as gr
-import matplotlib.pyplot as plt
-from loguru import logger
-import subprocess
-import copy
-import time
-import warnings
-import io
-import random
-
-import torch
-from torchvision.ops import box_convert
-warnings.filterwarnings("ignore")
-
-# grounding DINO
-from groundingdino.models import build_model
-from groundingdino.util.slconfig import SLConfig
-from groundingdino.util.utils import clean_state_dict
-from groundingdino.util.inference import annotate, load_image, predict
-import groundingdino.datasets.transforms as T
-
-# segment anything
-from segment_anything import build_sam, SamPredictor 
-
-# lama-cleaner
-from lama_cleaner.model_manager import ModelManager
-from lama_cleaner.schema import Config as lama_Config
-from lama_cleaner.helper import load_img, numpy_to_bytes, resize_max_size
-
-#stable diffusion
-from diffusers import StableDiffusionInpaintPipeline
-
-from huggingface_hub import hf_hub_download
-
-if not os.path.exists('./inpaint_demo.jpg'):
-    os.system("wget https://github.com/IDEA-Research/Grounded-Segment-Anything/raw/main/assets/inpaint_demo.jpg")
-
-if not os.path.exists('./sam_vit_h_4b8939.pth'):
-    logger.info(f"get sam_vit_h_4b8939.pth...")
-    result = subprocess.run(['wget', 'https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth'], check=True)
-    print(f'wget sam_vit_h_4b8939.pth result = {result}')
-
-# Use this command for evaluate the GLIP-T model
-config_file = "groundingdino/config/GroundingDINO_SwinT_OGC.py"
-ckpt_repo_id = "ShilongLiu/GroundingDINO"
-ckpt_filename = "groundingdino_swint_ogc.pth"
-sam_checkpoint = './sam_vit_h_4b8939.pth' 
-output_dir = "outputs"
-groundingdino_device = 'cpu'
-device = 'cuda' if torch.cuda.is_available() else 'cpu'
-
-print(f'device={device}')
-
-# make dir
-os.makedirs(output_dir, exist_ok=True)
-
-
-def load_model_hf(model_config_path, repo_id, filename, device='cpu'):
-    args = SLConfig.fromfile(model_config_path) 
-    model = build_model(args)
-    args.device = device
-
-    cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
-    checkpoint = torch.load(cache_file, map_location='cpu')
-    log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
-    print("Model loaded from {} \n => {}".format(cache_file, log))
-    _ = model.eval()
-    return model    
-
-def load_image_and_transform(init_image):
-    init_image = init_image.convert("RGB")
-    transform = T.Compose([
-        T.RandomResize([800], max_size=1333),
-        T.ToTensor(),
-        T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
-    ])
-    image, _ = transform(init_image, None) # 3, h, w
-    return init_image, image
-
-def image_transform_grounding_for_vis(init_image):
-    transform = T.Compose([
-        T.RandomResize([800], max_size=1333),
-    ])
-    image, _ = transform(init_image, None) # 3, h, w
-    return image
-
-def plot_boxes_to_image(image_pil, tgt):
-    H, W = tgt["size"]
-    boxes = tgt["boxes"]
-    labels = tgt["labels"]
-    assert len(boxes) == len(labels), "boxes and labels must have same length"
-
-    draw = ImageDraw.Draw(image_pil)
-    mask = Image.new("L", image_pil.size, 0)
-    mask_draw = ImageDraw.Draw(mask)
-
-    # draw boxes and masks
-    for box, label in zip(boxes, labels):
-        # from 0..1 to 0..W, 0..H
-        box = box * torch.Tensor([W, H, W, H])
-        # from xywh to xyxy
-        box[:2] -= box[2:] / 2
-        box[2:] += box[:2]
-        # random color
-        color = tuple(np.random.randint(0, 255, size=3).tolist())
-        # draw
-        x0, y0, x1, y1 = box
-        x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
-
-        draw.rectangle([x0, y0, x1, y1], outline=color, width=6)
-        # draw.text((x0, y0), str(label), fill=color)
-
-        font = ImageFont.load_default()
-        if hasattr(font, "getbbox"):
-            bbox = draw.textbbox((x0, y0), str(label), font)
-        else:
-            w, h = draw.textsize(str(label), font)
-            bbox = (x0, y0, w + x0, y0 + h)
-        # bbox = draw.textbbox((x0, y0), str(label))
-        draw.rectangle(bbox, fill=color)
-        font = os.path.join(cv2.__path__[0],'qt','fonts','DejaVuSans.ttf')
-        font_size = 20
-        new_font = ImageFont.truetype(font, font_size)
-
-        draw.text((x0+2, y0+2), str(label), font=new_font, fill="white")
-
-        mask_draw.rectangle([x0, y0, x1, y1], fill=255, width=6)
-
-    return image_pil, mask
-
-def show_mask(mask, ax, random_color=False):
-    if random_color:
-        color = np.concatenate([np.random.random(3), np.array([0.8])], axis=0)
-    else:
-        color = np.array([30/255, 144/255, 255/255, 0.6])
-    h, w = mask.shape[-2:]
-    mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
-    ax.imshow(mask_image)
-
-def show_box(box, ax, label):
-    x0, y0 = box[0], box[1]
-    w, h = box[2] - box[0], box[3] - box[1]
-    ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='red', facecolor=(0,0,0,0), lw=1)) 
-    ax.text(x0, y0+20, label, fontdict={'fontsize': 6}, color="white")
-
-def get_grounding_box(image_tensor, grounding_caption, box_threshold, text_threshold):
-    # run grounding
-    boxes, logits, phrases = predict(groundingDino_model, image_tensor, grounding_caption, box_threshold, text_threshold, device=groundingdino_device)
-    labels = [
-        f"{phrase} ({logit:.2f})"
-        for phrase, logit
-        in zip(phrases, logits)
-    ]
-    # annotated_frame = annotate(image_source=np.asarray(image_pil), boxes=boxes, logits=logits, phrases=phrases)
-    # image_with_box = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))
-    return boxes, labels
-
-def mask_extend(img, box, extend_pixels=10, useRectangle=True):
-    box[0] = int(box[0])
-    box[1] = int(box[1])
-    box[2] = int(box[2])
-    box[3] = int(box[3])
-    region = img.crop(tuple(box))                           # crop based on bb box
-    new_width = box[2] - box[0] + 2*extend_pixels           
-    new_height = box[3] - box[1] + 2*extend_pixels
-
-    region_BILINEAR = region.resize((int(new_width), int(new_height)))   # resize the cropped region based on "extend_pixels"
-    if useRectangle:
-        region_draw = ImageDraw.Draw(region_BILINEAR)
-        region_draw.rectangle((0, 0, new_width, new_height), fill=(255, 255, 255))       # draw white rectangle
-    img.paste(region_BILINEAR, (int(box[0]-extend_pixels), int(box[1]-extend_pixels)))   #pastes the resized region back into the original image at the same location as the original bounding box but with an additional padding of extend_pixels pixels on all sides
-    return img
-
-def mix_masks(imgs):
-    re_img =  1 - np.asarray(imgs[0].convert("1"))
-    for i in range(len(imgs)-1):
-        re_img = np.multiply(re_img, 1 - np.asarray(imgs[i+1].convert("1")))
-    re_img =  1 - re_img
-    return  Image.fromarray(np.uint8(255*re_img))
-
-def lama_cleaner_process(image, mask):
-    ori_image = image
-    if mask.shape[0] == image.shape[1] and mask.shape[1] == image.shape[0] and mask.shape[0] != mask.shape[1]:
-        # rotate image
-        ori_image = np.transpose(image[::-1, ...][:, ::-1], axes=(1, 0, 2))[::-1, ...]
-        image = ori_image
-    
-    original_shape = ori_image.shape
-    interpolation = cv2.INTER_CUBIC
-    
-    size_limit = 1080
-    if size_limit == "Original":
-        size_limit = max(image.shape)
-    else:
-        size_limit = int(size_limit)
-
-    config = lama_Config(
-        ldm_steps=25,
-        ldm_sampler='plms',
-        zits_wireframe=True,
-        hd_strategy='Original',
-        hd_strategy_crop_margin=196,
-        hd_strategy_crop_trigger_size=1280,
-        hd_strategy_resize_limit=2048,
-        prompt='',
-        use_croper=False,
-        croper_x=0,
-        croper_y=0,
-        croper_height=512,
-        croper_width=512,
-        sd_mask_blur=5,
-        sd_strength=0.75,
-        sd_steps=50,
-        sd_guidance_scale=7.5,
-        sd_sampler='ddim',
-        sd_seed=42,
-        cv2_flag='INPAINT_NS',
-        cv2_radius=5,
-    )
-    
-    if config.sd_seed == -1:
-        config.sd_seed = random.randint(1, 999999999)
-
-    # logger.info(f"Origin image shape_0_: {original_shape} / {size_limit}")
-    image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
-    # logger.info(f"Resized image shape_1_: {image.shape}")
-    
-    # logger.info(f"mask image shape_0_: {mask.shape} / {type(mask)}")
-    mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
-    # logger.info(f"mask image shape_1_: {mask.shape} / {type(mask)}")
-
-    res_np_img = lama_cleaner_model(image, mask, config)
-    torch.cuda.empty_cache()
-  
-    image = Image.open(io.BytesIO(numpy_to_bytes(res_np_img, 'png')))
-    return  image
-
-def xywh_to_xyxy(box, sizeW, sizeH):
-    if isinstance(box, list):
-        box = torch.Tensor(box)
-    box = box * torch.Tensor([sizeW, sizeH, sizeW, sizeH])
-    box[:2] -= box[2:] / 2  # top left corner
-    box[2:] += box[:2]      # bottom right corner
-    box = box.numpy()
-    return box
-
-def to_extend_mask(segment_mask, boxes_filt, size, remove_mask_extend, remove_mode):
-    # remove from mask
-    mask_imgs = []
-    masks_shape = segment_mask.shape        
-    boxes_filt_ori_array = boxes_filt.numpy()
-    if inpaint_mode == 'merge':
-        extend_shape_0 = masks_shape[0]
-        extend_shape_1 = masks_shape[1]
-    else:
-        extend_shape_0 = 1
-        extend_shape_1 = 1
-    for i in range(extend_shape_0):
-        for j in range(extend_shape_1):                
-            mask = segment_mask[i][j].cpu().numpy()
-            mask_pil = Image.fromarray(mask)
-        
-            if remove_mode == 'segment':
-                useRectangle = False
-            else:
-                useRectangle = True
-
-            try:
-                remove_mask_extend = int(remove_mask_extend)
-            except:
-                remove_mask_extend = 10
-            mask_pil_exp = mask_extend(copy.deepcopy(mask_pil).convert("RGB"), 
-                            xywh_to_xyxy(torch.tensor(boxes_filt_ori_array[i]), size[0], size[1]),
-                            extend_pixels=remove_mask_extend, useRectangle=useRectangle)
-            mask_imgs.append(mask_pil_exp)
-    mask_pil = mix_masks(mask_imgs)
-    return mask_pil
-
-def run_anything_task(input_image, text_prompt, task_type, inpaint_prompt, box_threshold, text_threshold, 
-            iou_threshold, inpaint_mode, mask_source_radio, remove_mode, remove_mask_extend):
-
-    text_prompt = text_prompt.strip()
-
-    # user guidance messages
-    if not (task_type == 'inpainting' or task_type == 'remove'):
-        if text_prompt == '':
-            return [], gr.Gallery.update(label='Please input detection prompt~~')
-    
-    if input_image is None:
-            return [], gr.Gallery.update(label='Please upload a image~~')
-    
-    file_temp = int(time.time())
-
-    # load mask
-    input_mask_pil = input_image['mask']
-    input_mask = np.array(input_mask_pil.convert("L"))  
-
-    # load image
-    image_pil, image_tensor = load_image_and_transform(input_image['image'])
-
-    output_images = []
-    output_images.append(input_image['image'])
-    # RUN GROUNDINGDINO: we skip DINO if we draw mask on the image
-    if (task_type == 'inpainting' or task_type == 'remove') and mask_source_radio == mask_source_draw:
-        pass
-    else:
-        boxes, phrases = get_grounding_box(image_tensor, text_prompt, box_threshold, text_threshold)
-        if boxes.size(0) == 0:
-                logger.info(f'run_grounded_sam_[]_{task_type}_[{text_prompt}]_1_[No objects detected, please try others.]_')
-                return [], gr.Gallery.update(label='No objects detected, please try others!')
-        boxes_filt_ori = copy.deepcopy(boxes)
-
-        size = image_pil.size
-        
-        pred_dict = {
-                "boxes": boxes,
-                "size": [size[1], size[0]],  # H,W
-                "labels": phrases,
-            }
-
-        # store and save DINO output
-        image_with_box = plot_boxes_to_image(copy.deepcopy(image_pil), pred_dict)[0]
-        image_path = os.path.join(output_dir, f"grounding_dino_output_{file_temp}.jpg")
-        image_with_box.save(image_path)
-        detection_image_result = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
-        os.remove(image_path)
-        output_images.append(detection_image_result)
-
-    # if mask is detected from DINO
-    logger.info(f'run_anything_task_[{file_temp}]_{task_type}_2_')
-    if task_type == 'segment' or ((task_type == 'inpainting' or task_type == 'remove') 
-                                and mask_source_radio == mask_source_segment):
-        image = np.array(input_image['image'])
-        sam_predictor.set_image(image)
-    
-        # map the bounding boxes from dino to original size
-        h, w = size[1], size[0]
-        boxes = boxes * torch.Tensor([w, h, w, h])
-        boxes = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy")
-        # can use box_convert function or below
-        # for i in range(boxes.size(0)):
-        #     boxes[i] = boxes[i] * torch.Tensor([W, H, W, H])
-        #     boxes[i][:2] -= boxes[i][2:] / 2   # top left corner
-        #     boxes[i][2:] += boxes[i][:2]       # bottom left corner
-
-        # transform boxes from original ratio to sam's zoomed ratio 
-        transformed_boxes = sam_predictor.transform.apply_boxes_torch(boxes, image.shape[:2])
-
-        # predict masks/segmentation
-        # masks: [number of masks, C, H, W] but note that H and W is 512
-        masks, _, _ = sam_predictor.predict_torch(
-            point_coords = None,
-            point_labels = None,
-            boxes = transformed_boxes,
-            multimask_output = False,
-        )
-
-        # draw output image
-        plt.figure()
-        plt.imshow(image)
-        for mask in masks:
-            show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
-        for box, label in zip(boxes, phrases):
-            show_box(box.numpy(), plt.gca(), label)
-        plt.axis('off')
-        image_path = os.path.join(output_dir, f"grounding_seg_output_{file_temp}.jpg")
-        plt.savefig(image_path, bbox_inches="tight")
-        segment_image_result = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
-        os.remove(image_path)
-        output_images.append(segment_image_result)
-    
-    logger.info(f'run_anything_task_[{file_temp}]_{task_type}_3_')
-    if task_type == 'segment':
-        logger.info(f'run_anything_task_[{file_temp}]_{task_type}_Final_')
-        return output_images, gr.Gallery.update(label='result images')
-
-    elif task_type == 'inpainting' or task_type == 'remove':
-        # if no inpaint prompt is entered, we treat it as remove
-        if inpaint_prompt.strip() == '' and mask_source_radio == mask_source_segment:
-            task_type = 'remove'
-
-        logger.info(f'run_anything_task_[{file_temp}]_{task_type}_4_')  
-        if mask_source_radio == mask_source_draw:
-            mask_pil = input_mask_pil
-            mask = input_mask          
-        else:
-            masks_ori = copy.deepcopy(masks)
-            # inpainting pipeline
-            if inpaint_mode == 'merge':
-                masks = torch.sum(masks, dim=0).unsqueeze(0)
-                masks = torch.where(masks > 0, True, False)
-
-            # simply choose the first mask, which will be refine in the future release
-            mask = masks[0][0].cpu().numpy()
-            mask_pil = Image.fromarray(mask)   
-        output_images.append(mask_pil.convert("RGB"))
-
-        if task_type == 'inpainting':
-            # inpainting pipeline
-            image_source_for_inpaint = image_pil.resize((512, 512))
-            if remove_mask_extend:
-                mask_pil = to_extend_mask(masks_ori, boxes_filt_ori, size, remove_mask_extend, remove_mode)
-                output_images.append(mask_pil.convert("RGB"))
-            image_mask_for_inpaint = mask_pil.resize((512, 512))
-            image_inpainting = sd_pipe(prompt=inpaint_prompt, image=image_source_for_inpaint, mask_image=image_mask_for_inpaint).images[0]            
-        else:
-            # remove from mask
-            if mask_source_radio == mask_source_segment:
-                if remove_mask_extend:  
-                    mask_pil = to_extend_mask(masks_ori, boxes_filt_ori, size, remove_mask_extend, remove_mode)
-                output_images.append(mask_pil.convert("RGB"))               
-            image_inpainting = lama_cleaner_process(np.array(image_pil), np.array(mask_pil.convert("L")))
-
-        image_inpainting = image_inpainting.resize((image_pil.size[0], image_pil.size[1]))
-        output_images.append(image_inpainting)
-        return output_images, gr.Gallery.update(label='result images')        
-    else:
-        logger.info(f"task_type:{task_type} error!")
-    logger.info(f'run_anything_task_[{file_temp}]_Final_Inpainting_')
-    return output_images, gr.Gallery.update(label='result images')
-
-
-def change_radio_display(task_type, mask_source_radio):
-    text_prompt_visible = True
-    inpaint_prompt_visible = False
-    mask_source_radio_visible = False
-
-    if task_type == "inpainting":
-        inpaint_prompt_visible = True
-    if task_type == "inpainting" or task_type == "remove":
-        mask_source_radio_visible = True   
-        if mask_source_radio == mask_source_draw:
-            text_prompt_visible = False
-
-    return  gr.Textbox.update(visible=text_prompt_visible), gr.Textbox.update(visible=inpaint_prompt_visible), gr.Radio.update(visible=mask_source_radio_visible) 
-
-
-
-# model initialization
-groundingDino_model = load_model_hf(config_file, ckpt_repo_id, ckpt_filename, groundingdino_device)
-sam_predictor = SamPredictor(build_sam(checkpoint=sam_checkpoint))
-lama_cleaner_model = ModelManager(name='lama',device='cpu')
-
-# initialize stable-diffusion-inpainting
-logger.info(f"initialize stable-diffusion-inpainting...")
-sd_pipe = None
-if os.environ.get('IS_MY_DEBUG') is None:
-    sd_pipe = StableDiffusionInpaintPipeline.from_pretrained(
-            "runwayml/stable-diffusion-inpainting", 
-            torch_dtype=torch.float16
-    )
-    sd_pipe = sd_pipe.to(device)
-
-if __name__ == "__main__":
-
-    mask_source_draw = "Draw mask on image."
-    mask_source_segment = "Segment based on prompt and inpaint."
-
-    parser = argparse.ArgumentParser("Grounding SAM demo", add_help=True)
-    parser.add_argument("--debug", action="store_true", help="using debug mode")
-    parser.add_argument("--share", action="store_true", help="share the app")
-    args = parser.parse_args()
-
-    print(f'args = {args}')
-
-    block = gr.Blocks().queue()
-    with block:
-        gr.Markdown("# GroundingDino SAM and Stable Diffusion")
-        with gr.Row():
-            with gr.Column():
-                input_image = gr.Image(
-                    source="upload", elem_id="image_upload", type="pil", tool="sketch", value="inpaint_demo.jpg", label="Upload")
-                task_type = gr.Radio(["segment", "inpainting", "remove"],  value="segment", 
-                                                label='Task type', visible=True)
-                
-                mask_source_radio = gr.Radio([mask_source_draw, mask_source_segment], 
-                                    value=mask_source_segment, label="Mask from",
-                                    visible=False) 
-                
-                text_prompt = gr.Textbox(label="Detection Prompt, seperating each name with dot '.', i.e.: bear.cat.dog.chair ]", \
-                                         value='dog', placeholder="Cannot be empty")                                                
-                inpaint_prompt = gr.Textbox(label="Inpaint Prompt (if this is empty, then remove)", visible=False)
-                
-                run_button = gr.Button(label="Run")
-                with gr.Accordion("Advanced options", open=False):
-                    box_threshold = gr.Slider(
-                        label="Box Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.001
-                    )
-                    text_threshold = gr.Slider(
-                        label="Text Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.001
-                    )
-                    iou_threshold = gr.Slider(
-                        label="IOU Threshold", minimum=0.0, maximum=1.0, value=0.8, step=0.001
-                    )
-                    inpaint_mode = gr.Radio(["merge", "first"], value="merge", label="inpaint_mode")
-                    with gr.Row():
-                        with gr.Column(scale=1):
-                            remove_mode = gr.Radio(["segment", "rectangle"],  value="segment", label='remove mode') 
-                        with gr.Column(scale=1):
-                            remove_mask_extend = gr.Textbox(label="Enlarge Mask (Empty: no mask extension, default: 10)", value=10)
-
-            with gr.Column():
-                gallery = gr.Gallery(label="result images", show_label=True, elem_id="gallery", visible=True
-                ).style(preview=True, columns=[5], object_fit="scale-down", height="auto")
-
-        task_type.change(fn=change_radio_display, inputs=[task_type, mask_source_radio], outputs=[text_prompt, inpaint_prompt, mask_source_radio])
-        mask_source_radio.change(fn=change_radio_display, inputs=[task_type, mask_source_radio], outputs=[text_prompt, inpaint_prompt, mask_source_radio])
-        
-        DESCRIPTION = '### This demo from [Grounded-Segment-Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything) and kudos to thier excellent works. Welcome everyone to try this out and learn together!'
-        gr.Markdown(DESCRIPTION)
-
-        run_button.click(fn=run_anything_task, inputs=[
-                        input_image, text_prompt, task_type, inpaint_prompt,
-                        box_threshold,text_threshold, iou_threshold, inpaint_mode,
-                        mask_source_radio, remove_mode, remove_mask_extend], 
-                        outputs=[gallery, gallery], show_progress=True, queue=True)
-
-    block.launch(debug=args.debug, share=args.share, show_api=False, show_error=True)
\ No newline at end of file
diff --git a/spaces/jackyccl/segment-anything/groundingdino/config/GroundingDINO_SwinB.cfg.py b/spaces/jackyccl/segment-anything/groundingdino/config/GroundingDINO_SwinB.cfg.py
deleted file mode 100644
index f490c4bbd598a35de43d36ceafcbd769e7ff21bf..0000000000000000000000000000000000000000
--- a/spaces/jackyccl/segment-anything/groundingdino/config/GroundingDINO_SwinB.cfg.py
+++ /dev/null
@@ -1,43 +0,0 @@
-batch_size = 1
-modelname = "groundingdino"
-backbone = "swin_B_384_22k"
-position_embedding = "sine"
-pe_temperatureH = 20
-pe_temperatureW = 20
-return_interm_indices = [1, 2, 3]
-backbone_freeze_keywords = None
-enc_layers = 6
-dec_layers = 6
-pre_norm = False
-dim_feedforward = 2048
-hidden_dim = 256
-dropout = 0.0
-nheads = 8
-num_queries = 900
-query_dim = 4
-num_patterns = 0
-num_feature_levels = 4
-enc_n_points = 4
-dec_n_points = 4
-two_stage_type = "standard"
-two_stage_bbox_embed_share = False
-two_stage_class_embed_share = False
-transformer_activation = "relu"
-dec_pred_bbox_embed_share = True
-dn_box_noise_scale = 1.0
-dn_label_noise_ratio = 0.5
-dn_label_coef = 1.0
-dn_bbox_coef = 1.0
-embed_init_tgt = True
-dn_labelbook_size = 2000
-max_text_len = 256
-text_encoder_type = "bert-base-uncased"
-use_text_enhancer = True
-use_fusion_layer = True
-use_checkpoint = True
-use_transformer_ckpt = True
-use_text_cross_attention = True
-text_dropout = 0.0
-fusion_dropout = 0.0
-fusion_droppath = 0.1
-sub_sentence_present = True
diff --git a/spaces/jbetker/tortoise/tortoise/__init__.py b/spaces/jbetker/tortoise/tortoise/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/jbilcke-hf/VideoQuest/src/components/ui/input.tsx b/spaces/jbilcke-hf/VideoQuest/src/components/ui/input.tsx
deleted file mode 100644
index 09fc0791ad25f88857f12280fed9882193a092e1..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/VideoQuest/src/components/ui/input.tsx
+++ /dev/null
@@ -1,25 +0,0 @@
-import * as React from "react"
-
-import { cn } from "@/lib/utils"
-
-export interface InputProps
-  extends React.InputHTMLAttributes<HTMLInputElement> {}
-
-const Input = React.forwardRef<HTMLInputElement, InputProps>(
-  ({ className, type, ...props }, ref) => {
-    return (
-      <input
-        type={type}
-        className={cn(
-          "flex h-10 w-full rounded-md border border-stone-200 bg-white px-3 py-2 text-sm ring-offset-white file:border-0 file:bg-transparent file:text-sm file:font-medium placeholder:text-stone-500 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-stone-400 focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50 dark:border-stone-800 dark:bg-stone-950 dark:ring-offset-stone-950 dark:placeholder:text-stone-400 dark:focus-visible:ring-stone-800",
-          className
-        )}
-        ref={ref}
-        {...props}
-      />
-    )
-  }
-)
-Input.displayName = "Input"
-
-export { Input }
diff --git a/spaces/jbraun19/Webcam-Object-Recognition-Yolo-n-Coco/README.md b/spaces/jbraun19/Webcam-Object-Recognition-Yolo-n-Coco/README.md
deleted file mode 100644
index 5dcc7a96368adfd3af2d32a9bd7fe46dfcd59d8c..0000000000000000000000000000000000000000
--- a/spaces/jbraun19/Webcam-Object-Recognition-Yolo-n-Coco/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: 📷 Webcam Object Recognition Yolo Coco 🔍 Live Gradio
-emoji: 📷Live
-colorFrom: purple
-colorTo: blue
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
-duplicated_from: awacke1/Webcam-Object-Recognition-Yolo-n-Coco
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/jiaxianustc/mbp/UltraFlow/models/__init__.py b/spaces/jiaxianustc/mbp/UltraFlow/models/__init__.py
deleted file mode 100644
index 1e918d160c77e60b7a6fed1ff4fb13fc418ebe20..0000000000000000000000000000000000000000
--- a/spaces/jiaxianustc/mbp/UltraFlow/models/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .sbap import *
\ No newline at end of file
diff --git a/spaces/jmartinezot/find_plane_pointcloud/README.md b/spaces/jmartinezot/find_plane_pointcloud/README.md
deleted file mode 100644
index 4aacfde33ac33c65efce792b71d3b83c95e41413..0000000000000000000000000000000000000000
--- a/spaces/jmartinezot/find_plane_pointcloud/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Find Plane Pointcloud
-emoji: 📚
-colorFrom: gray
-colorTo: blue
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Hash/__init__.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Hash/__init__.py
deleted file mode 100644
index 4bda084199333e63a0b4efcfbb28a1c5a67eb2c3..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Hash/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# ===================================================================
-# The contents of this file are dedicated to the public domain.  To
-# the extent that dedication to the public domain is not available,
-# everyone is granted a worldwide, perpetual, royalty-free,
-# non-exclusive license to exercise all rights associated with the
-# contents of this file for any purpose whatsoever.
-# No rights are reserved.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-# ===================================================================
-
-__all__ = ['HMAC', 'MD2', 'MD4', 'MD5', 'RIPEMD160', 'SHA1',
-           'SHA224', 'SHA256', 'SHA384', 'SHA512', 'CMAC', 'Poly1305',
-           'cSHAKE128', 'cSHAKE256', 'KMAC128', 'KMAC256',
-           'TupleHash128', 'TupleHash256', 'KangarooTwelve']
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/bson/_helpers.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/bson/_helpers.py
deleted file mode 100644
index 5643d77c242878b261d15d69d07a4d7cf1baded6..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/bson/_helpers.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2021-present MongoDB, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Setstate and getstate functions for objects with __slots__, allowing
-compatibility with default pickling protocol
-"""
-from typing import Any, Mapping
-
-
-def _setstate_slots(self: Any, state: Any) -> None:
-    for slot, value in state.items():
-        setattr(self, slot, value)
-
-
-def _mangle_name(name: str, prefix: str) -> str:
-    if name.startswith("__"):
-        prefix = "_" + prefix
-    else:
-        prefix = ""
-    return prefix + name
-
-
-def _getstate_slots(self: Any) -> Mapping[Any, Any]:
-    prefix = self.__class__.__name__
-    ret = {}
-    for name in self.__slots__:
-        mangled_name = _mangle_name(name, prefix)
-        if hasattr(self, mangled_name):
-            ret[mangled_name] = getattr(self, mangled_name)
-    return ret
diff --git a/spaces/jordonpeter01/ai-comic-factory/src/components/ui/accordion.tsx b/spaces/jordonpeter01/ai-comic-factory/src/components/ui/accordion.tsx
deleted file mode 100644
index 937620af27e5d8ef577f0baca229a9b753ebd017..0000000000000000000000000000000000000000
--- a/spaces/jordonpeter01/ai-comic-factory/src/components/ui/accordion.tsx
+++ /dev/null
@@ -1,60 +0,0 @@
-"use client"
-
-import * as React from "react"
-import * as AccordionPrimitive from "@radix-ui/react-accordion"
-import { ChevronDown } from "lucide-react"
-
-import { cn } from "@/lib/utils"
-
-const Accordion = AccordionPrimitive.Root
-
-const AccordionItem = React.forwardRef<
-  React.ElementRef<typeof AccordionPrimitive.Item>,
-  React.ComponentPropsWithoutRef<typeof AccordionPrimitive.Item>
->(({ className, ...props }, ref) => (
-  <AccordionPrimitive.Item
-    ref={ref}
-    className={cn("border-b", className)}
-    {...props}
-  />
-))
-AccordionItem.displayName = "AccordionItem"
-
-const AccordionTrigger = React.forwardRef<
-  React.ElementRef<typeof AccordionPrimitive.Trigger>,
-  React.ComponentPropsWithoutRef<typeof AccordionPrimitive.Trigger>
->(({ className, children, ...props }, ref) => (
-  <AccordionPrimitive.Header className="flex">
-    <AccordionPrimitive.Trigger
-      ref={ref}
-      className={cn(
-        "flex flex-1 items-center justify-between py-4 font-medium transition-all hover:underline [&[data-state=open]>svg]:rotate-180",
-        className
-      )}
-      {...props}
-    >
-      {children}
-      <ChevronDown className="h-4 w-4 shrink-0 transition-transform duration-200" />
-    </AccordionPrimitive.Trigger>
-  </AccordionPrimitive.Header>
-))
-AccordionTrigger.displayName = AccordionPrimitive.Trigger.displayName
-
-const AccordionContent = React.forwardRef<
-  React.ElementRef<typeof AccordionPrimitive.Content>,
-  React.ComponentPropsWithoutRef<typeof AccordionPrimitive.Content>
->(({ className, children, ...props }, ref) => (
-  <AccordionPrimitive.Content
-    ref={ref}
-    className={cn(
-      "overflow-hidden text-sm transition-all data-[state=closed]:animate-accordion-up data-[state=open]:animate-accordion-down",
-      className
-    )}
-    {...props}
-  >
-    <div className="pb-4 pt-0">{children}</div>
-  </AccordionPrimitive.Content>
-))
-AccordionContent.displayName = AccordionPrimitive.Content.displayName
-
-export { Accordion, AccordionItem, AccordionTrigger, AccordionContent }
diff --git a/spaces/juanhuggingface/ChuanhuChatGPT_Beta/modules/models/__init__.py b/spaces/juanhuggingface/ChuanhuChatGPT_Beta/modules/models/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/jurgendn/table-extraction/models/base_model/classification.py b/spaces/jurgendn/table-extraction/models/base_model/classification.py
deleted file mode 100644
index 41ca49c540fd82f5d714fc59e03fb1b32b59e26b..0000000000000000000000000000000000000000
--- a/spaces/jurgendn/table-extraction/models/base_model/classification.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from abc import abstractmethod
-from typing import Any, Dict, List
-
-import torch
-from pytorch_lightning import LightningModule
-from torch import Tensor
-
-
-class LightningClassification(LightningModule):
-
-    @abstractmethod
-    def __init__(self, *args, **kwargs) -> None:
-        super(LightningClassification, self).__init__(*args, **kwargs)
-        self.train_batch_output: List[Dict] = []
-        self.validation_batch_output: List[Dict] = []
-        self.log_value_list: List[str] = ['loss', 'f1', 'precision', 'recall']
-
-    @abstractmethod
-    def forward(self, *args, **kwargs) -> Any:
-        pass
-
-    @abstractmethod
-    def configure_optimizers(self):
-        pass
-
-    @abstractmethod
-    def loss(self, input: Tensor, target: Tensor, **kwargs) -> Tensor:
-        pass
-
-    @abstractmethod
-    def training_step(self, batch, batch_idx):
-        pass
-
-    def __average(self, key: str, outputs: List[Dict]) -> Tensor:
-        target_arr = torch.Tensor([val[key] for val in outputs]).float()
-        return target_arr.mean()
-
-    @torch.no_grad()
-    def on_train_epoch_start(self) -> None:
-        self.train_batch_output = []
-
-    @torch.no_grad()
-    def on_train_epoch_end(self) -> None:
-        for key in self.log_value_list:
-            val = self.__average(key=key, outputs=self.train_batch_output)
-            log_name = f"training/{key}"
-            self.log(name=log_name, value=val)
-
-    @abstractmethod
-    @torch.no_grad()
-    def validation_step(self, batch, batch_idx):
-        pass
-
-    @torch.no_grad()
-    def on_validation_epoch_start(self) -> None:
-        self.validation_batch_output = []
-
-    @torch.no_grad()
-    def on_validation_epoch_end(self) -> None:
-        for key in self.log_value_list:
-            val = self.__average(key=key, outputs=self.validation_batch_output)
-            log_name = f"val/{key}"
-            self.log(name=log_name, value=val)
diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py
deleted file mode 100644
index c6d3b9c240c24687d432197f976ee01fbf423216..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py
+++ /dev/null
@@ -1,187 +0,0 @@
-import torch
-from torch import nn
-
-__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200']
-
-
-def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
-    """3x3 convolution with padding"""
-    return nn.Conv2d(in_planes,
-                     out_planes,
-                     kernel_size=3,
-                     stride=stride,
-                     padding=dilation,
-                     groups=groups,
-                     bias=False,
-                     dilation=dilation)
-
-
-def conv1x1(in_planes, out_planes, stride=1):
-    """1x1 convolution"""
-    return nn.Conv2d(in_planes,
-                     out_planes,
-                     kernel_size=1,
-                     stride=stride,
-                     bias=False)
-
-
-class IBasicBlock(nn.Module):
-    expansion = 1
-    def __init__(self, inplanes, planes, stride=1, downsample=None,
-                 groups=1, base_width=64, dilation=1):
-        super(IBasicBlock, self).__init__()
-        if groups != 1 or base_width != 64:
-            raise ValueError('BasicBlock only supports groups=1 and base_width=64')
-        if dilation > 1:
-            raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
-        self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
-        self.conv1 = conv3x3(inplanes, planes)
-        self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
-        self.prelu = nn.PReLU(planes)
-        self.conv2 = conv3x3(planes, planes, stride)
-        self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
-        self.downsample = downsample
-        self.stride = stride
-
-    def forward(self, x):
-        identity = x
-        out = self.bn1(x)
-        out = self.conv1(out)
-        out = self.bn2(out)
-        out = self.prelu(out)
-        out = self.conv2(out)
-        out = self.bn3(out)
-        if self.downsample is not None:
-            identity = self.downsample(x)
-        out += identity
-        return out
-
-
-class IResNet(nn.Module):
-    fc_scale = 7 * 7
-    def __init__(self,
-                 block, layers, dropout=0, num_features=512, zero_init_residual=False,
-                 groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False):
-        super(IResNet, self).__init__()
-        self.fp16 = fp16
-        self.inplanes = 64
-        self.dilation = 1
-        if replace_stride_with_dilation is None:
-            replace_stride_with_dilation = [False, False, False]
-        if len(replace_stride_with_dilation) != 3:
-            raise ValueError("replace_stride_with_dilation should be None "
-                             "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
-        self.groups = groups
-        self.base_width = width_per_group
-        self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
-        self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
-        self.prelu = nn.PReLU(self.inplanes)
-        self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
-        self.layer2 = self._make_layer(block,
-                                       128,
-                                       layers[1],
-                                       stride=2,
-                                       dilate=replace_stride_with_dilation[0])
-        self.layer3 = self._make_layer(block,
-                                       256,
-                                       layers[2],
-                                       stride=2,
-                                       dilate=replace_stride_with_dilation[1])
-        self.layer4 = self._make_layer(block,
-                                       512,
-                                       layers[3],
-                                       stride=2,
-                                       dilate=replace_stride_with_dilation[2])
-        self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,)
-        self.dropout = nn.Dropout(p=dropout, inplace=True)
-        self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features)
-        self.features = nn.BatchNorm1d(num_features, eps=1e-05)
-        nn.init.constant_(self.features.weight, 1.0)
-        self.features.weight.requires_grad = False
-
-        for m in self.modules():
-            if isinstance(m, nn.Conv2d):
-                nn.init.normal_(m.weight, 0, 0.1)
-            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
-                nn.init.constant_(m.weight, 1)
-                nn.init.constant_(m.bias, 0)
-
-        if zero_init_residual:
-            for m in self.modules():
-                if isinstance(m, IBasicBlock):
-                    nn.init.constant_(m.bn2.weight, 0)
-
-    def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
-        downsample = None
-        previous_dilation = self.dilation
-        if dilate:
-            self.dilation *= stride
-            stride = 1
-        if stride != 1 or self.inplanes != planes * block.expansion:
-            downsample = nn.Sequential(
-                conv1x1(self.inplanes, planes * block.expansion, stride),
-                nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ),
-            )
-        layers = []
-        layers.append(
-            block(self.inplanes, planes, stride, downsample, self.groups,
-                  self.base_width, previous_dilation))
-        self.inplanes = planes * block.expansion
-        for _ in range(1, blocks):
-            layers.append(
-                block(self.inplanes,
-                      planes,
-                      groups=self.groups,
-                      base_width=self.base_width,
-                      dilation=self.dilation))
-
-        return nn.Sequential(*layers)
-
-    def forward(self, x):
-        with torch.cuda.amp.autocast(self.fp16):
-            x = self.conv1(x)
-            x = self.bn1(x)
-            x = self.prelu(x)
-            x = self.layer1(x)
-            x = self.layer2(x)
-            x = self.layer3(x)
-            x = self.layer4(x)
-            x = self.bn2(x)
-            x = torch.flatten(x, 1)
-            x = self.dropout(x)
-        x = self.fc(x.float() if self.fp16 else x)
-        x = self.features(x)
-        return x
-
-
-def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
-    model = IResNet(block, layers, **kwargs)
-    if pretrained:
-        raise ValueError()
-    return model
-
-
-def iresnet18(pretrained=False, progress=True, **kwargs):
-    return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained,
-                    progress, **kwargs)
-
-
-def iresnet34(pretrained=False, progress=True, **kwargs):
-    return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,
-                    progress, **kwargs)
-
-
-def iresnet50(pretrained=False, progress=True, **kwargs):
-    return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,
-                    progress, **kwargs)
-
-
-def iresnet100(pretrained=False, progress=True, **kwargs):
-    return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,
-                    progress, **kwargs)
-
-
-def iresnet200(pretrained=False, progress=True, **kwargs):
-    return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained,
-                    progress, **kwargs)
-
diff --git a/spaces/kevinwang676/M4Singer/modules/hifigan/mel_utils.py b/spaces/kevinwang676/M4Singer/modules/hifigan/mel_utils.py
deleted file mode 100644
index 04c1e3ea5de2cd24bbb14ab72206539a8d37d9c0..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/M4Singer/modules/hifigan/mel_utils.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import numpy as np
-import torch
-import torch.utils.data
-from librosa.filters import mel as librosa_mel_fn
-from scipy.io.wavfile import read
-
-MAX_WAV_VALUE = 32768.0
-
-
-def load_wav(full_path):
-    sampling_rate, data = read(full_path)
-    return data, sampling_rate
-
-
-def dynamic_range_compression(x, C=1, clip_val=1e-5):
-    return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
-
-
-def dynamic_range_decompression(x, C=1):
-    return np.exp(x) / C
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
-    return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
-    return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
-    output = dynamic_range_compression_torch(magnitudes)
-    return output
-
-
-def spectral_de_normalize_torch(magnitudes):
-    output = dynamic_range_decompression_torch(magnitudes)
-    return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def mel_spectrogram(y, hparams, center=False, complex=False):
-    # hop_size: 512  # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate)
-    # win_size: 2048  # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate)
-    # fmin: 55  # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
-    # fmax: 10000  # To be increased/reduced depending on data.
-    # fft_size: 2048  # Extra window size is filled with 0 paddings to match this parameter
-    # n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax,
-    n_fft = hparams['fft_size']
-    num_mels = hparams['audio_num_mel_bins']
-    sampling_rate = hparams['audio_sample_rate']
-    hop_size = hparams['hop_size']
-    win_size = hparams['win_size']
-    fmin = hparams['fmin']
-    fmax = hparams['fmax']
-    y = y.clamp(min=-1., max=1.)
-    global mel_basis, hann_window
-    if fmax not in mel_basis:
-        mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
-        mel_basis[str(fmax) + '_' + str(y.device)] = torch.from_numpy(mel).float().to(y.device)
-        hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
-
-    y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
-                                mode='reflect')
-    y = y.squeeze(1)
-
-    spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
-                      center=center, pad_mode='reflect', normalized=False, onesided=True)
-
-    if not complex:
-        spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
-        spec = torch.matmul(mel_basis[str(fmax) + '_' + str(y.device)], spec)
-        spec = spectral_normalize_torch(spec)
-    else:
-        B, C, T, _ = spec.shape
-        spec = spec.transpose(1, 2)  # [B, T, n_fft, 2]
-    return spec
-
diff --git a/spaces/kevinwang676/VoiceChangers/src/utils/face_enhancer.py b/spaces/kevinwang676/VoiceChangers/src/utils/face_enhancer.py
deleted file mode 100644
index 15851a15966c963d7bd04f35eebdaa6b22a3d966..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/VoiceChangers/src/utils/face_enhancer.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import os
-import torch 
-
-from gfpgan import GFPGANer
-
-from tqdm import tqdm
-
-from src.utils.videoio import load_video_to_cv2
-
-import cv2
-
-
-class GeneratorWithLen(object):
-    """ From https://stackoverflow.com/a/7460929 """
-
-    def __init__(self, gen, length):
-        self.gen = gen
-        self.length = length
-
-    def __len__(self):
-        return self.length
-
-    def __iter__(self):
-        return self.gen
-
-def enhancer_list(images, method='gfpgan', bg_upsampler='realesrgan'):
-    gen = enhancer_generator_no_len(images, method=method, bg_upsampler=bg_upsampler)
-    return list(gen)
-
-def enhancer_generator_with_len(images, method='gfpgan', bg_upsampler='realesrgan'):
-    """ Provide a generator with a __len__ method so that it can passed to functions that
-    call len()"""
-
-    if os.path.isfile(images): # handle video to images
-        # TODO: Create a generator version of load_video_to_cv2
-        images = load_video_to_cv2(images)
-
-    gen = enhancer_generator_no_len(images, method=method, bg_upsampler=bg_upsampler)
-    gen_with_len = GeneratorWithLen(gen, len(images))
-    return gen_with_len
-
-def enhancer_generator_no_len(images, method='gfpgan', bg_upsampler='realesrgan'):
-    """ Provide a generator function so that all of the enhanced images don't need
-    to be stored in memory at the same time. This can save tons of RAM compared to
-    the enhancer function. """
-
-    print('face enhancer....')
-    if not isinstance(images, list) and os.path.isfile(images): # handle video to images
-        images = load_video_to_cv2(images)
-
-    # ------------------------ set up GFPGAN restorer ------------------------
-    if  method == 'gfpgan':
-        arch = 'clean'
-        channel_multiplier = 2
-        model_name = 'GFPGANv1.4'
-        url = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth'
-    elif method == 'RestoreFormer':
-        arch = 'RestoreFormer'
-        channel_multiplier = 2
-        model_name = 'RestoreFormer'
-        url = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth'
-    elif method == 'codeformer': # TODO:
-        arch = 'CodeFormer'
-        channel_multiplier = 2
-        model_name = 'CodeFormer'
-        url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
-    else:
-        raise ValueError(f'Wrong model version {method}.')
-
-
-    # ------------------------ set up background upsampler ------------------------
-    if bg_upsampler == 'realesrgan':
-        if not torch.cuda.is_available():  # CPU
-            import warnings
-            warnings.warn('The unoptimized RealESRGAN is slow on CPU. We do not use it. '
-                          'If you really want to use it, please modify the corresponding codes.')
-            bg_upsampler = None
-        else:
-            from basicsr.archs.rrdbnet_arch import RRDBNet
-            from realesrgan import RealESRGANer
-            model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
-            bg_upsampler = RealESRGANer(
-                scale=2,
-                model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
-                model=model,
-                tile=400,
-                tile_pad=10,
-                pre_pad=0,
-                half=True)  # need to set False in CPU mode
-    else:
-        bg_upsampler = None
-
-    # determine model paths
-    model_path = os.path.join('gfpgan/weights', model_name + '.pth')
-    
-    if not os.path.isfile(model_path):
-        model_path = os.path.join('checkpoints', model_name + '.pth')
-    
-    if not os.path.isfile(model_path):
-        # download pre-trained models from url
-        model_path = url
-
-    restorer = GFPGANer(
-        model_path=model_path,
-        upscale=2,
-        arch=arch,
-        channel_multiplier=channel_multiplier,
-        bg_upsampler=bg_upsampler)
-
-    # ------------------------ restore ------------------------
-    for idx in tqdm(range(len(images)), 'Face Enhancer:'):
-        
-        img = cv2.cvtColor(images[idx], cv2.COLOR_RGB2BGR)
-        
-        # restore faces and background if necessary
-        cropped_faces, restored_faces, r_img = restorer.enhance(
-            img,
-            has_aligned=False,
-            only_center_face=False,
-            paste_back=True)
-        
-        r_img = cv2.cvtColor(r_img, cv2.COLOR_BGR2RGB)
-        yield r_img
diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/runner/hooks/memory.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/runner/hooks/memory.py
deleted file mode 100644
index 70cf9a838fb314e3bd3c07aadbc00921a81e83ed..0000000000000000000000000000000000000000
--- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/runner/hooks/memory.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-
-from .hook import HOOKS, Hook
-
-
-@HOOKS.register_module()
-class EmptyCacheHook(Hook):
-
-    def __init__(self, before_epoch=False, after_epoch=True, after_iter=False):
-        self._before_epoch = before_epoch
-        self._after_epoch = after_epoch
-        self._after_iter = after_iter
-
-    def after_iter(self, runner):
-        if self._after_iter:
-            torch.cuda.empty_cache()
-
-    def before_epoch(self, runner):
-        if self._before_epoch:
-            torch.cuda.empty_cache()
-
-    def after_epoch(self, runner):
-        if self._after_epoch:
-            torch.cuda.empty_cache()
diff --git a/spaces/koajoel/PolyFormer/bert/configuration_bert.py b/spaces/koajoel/PolyFormer/bert/configuration_bert.py
deleted file mode 100644
index 8e815837bc4dbc5fc8eec7ee37547b5d41519af5..0000000000000000000000000000000000000000
--- a/spaces/koajoel/PolyFormer/bert/configuration_bert.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# coding=utf-8
-# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
-# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" BERT model configuration """
-
-
-import logging
-
-from .configuration_utils import PretrainedConfig
-
-
-logger = logging.getLogger(__name__)
-
-BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
-    "bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
-    "bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
-    "bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
-    "bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
-    "bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
-    "bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
-    "bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
-    "bert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
-    "bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
-    "bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
-    "bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
-    "bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
-    "bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
-    "bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-config.json",
-    "bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-config.json",
-    "cl-tohoku/bert-base-japanese": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese/config.json",
-    "cl-tohoku/bert-base-japanese-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking/config.json",
-    "cl-tohoku/bert-base-japanese-char": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char/config.json",
-    "cl-tohoku/bert-base-japanese-char-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking/config.json",
-    "TurkuNLP/bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/config.json",
-    "TurkuNLP/bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/config.json",
-    "wietsedv/bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/config.json",
-    # See all BERT models at https://huggingface.co/models?filter=bert
-}
-
-
-class BertConfig(PretrainedConfig):
-    r"""
-        This is the configuration class to store the configuration of a :class:`~transformers.BertModel`.
-        It is used to instantiate an BERT model according to the specified arguments, defining the model
-        architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
-        the BERT `bert-base-uncased <https://huggingface.co/bert-base-uncased>`__ architecture.
-
-        Configuration objects inherit from  :class:`~transformers.PretrainedConfig` and can be used
-        to control the model outputs. Read the documentation from  :class:`~transformers.PretrainedConfig`
-        for more information.
-
-
-        Args:
-            vocab_size (:obj:`int`, optional, defaults to 30522):
-                Vocabulary size of the BERT model. Defines the different tokens that
-                can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.BertModel`.
-            hidden_size (:obj:`int`, optional, defaults to 768):
-                Dimensionality of the encoder layers and the pooler layer.
-            num_hidden_layers (:obj:`int`, optional, defaults to 12):
-                Number of hidden layers in the Transformer encoder.
-            num_attention_heads (:obj:`int`, optional, defaults to 12):
-                Number of attention heads for each attention layer in the Transformer encoder.
-            intermediate_size (:obj:`int`, optional, defaults to 3072):
-                Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
-            hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu"):
-                The non-linear activation function (function or string) in the encoder and pooler.
-                If string, "gelu", "relu", "swish" and "gelu_new" are supported.
-            hidden_dropout_prob (:obj:`float`, optional, defaults to 0.1):
-                The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
-            attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0.1):
-                The dropout ratio for the attention probabilities.
-            max_position_embeddings (:obj:`int`, optional, defaults to 512):
-                The maximum sequence length that this model might ever be used with.
-                Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
-            type_vocab_size (:obj:`int`, optional, defaults to 2):
-                The vocabulary size of the `token_type_ids` passed into :class:`~transformers.BertModel`.
-            initializer_range (:obj:`float`, optional, defaults to 0.02):
-                The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
-            layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
-                The epsilon used by the layer normalization layers.
-            gradient_checkpointing (:obj:`bool`, optional, defaults to False):
-                If True, use gradient checkpointing to save memory at the expense of slower backward pass.
-
-        Example::
-
-            >>> from transformers import BertModel, BertConfig
-
-            >>> # Initializing a BERT bert-base-uncased style configuration
-            >>> configuration = BertConfig()
-
-            >>> # Initializing a model from the bert-base-uncased style configuration
-            >>> model = BertModel(configuration)
-
-            >>> # Accessing the model configuration
-            >>> configuration = model.config
-    """
-    model_type = "bert"
-
-    def __init__(
-        self,
-        vocab_size=30522,
-        hidden_size=768,
-        num_hidden_layers=12,
-        num_attention_heads=12,
-        intermediate_size=3072,
-        hidden_act="gelu",
-        hidden_dropout_prob=0.1,
-        attention_probs_dropout_prob=0.1,
-        max_position_embeddings=512,
-        type_vocab_size=2,
-        initializer_range=0.02,
-        layer_norm_eps=1e-12,
-        pad_token_id=0,
-        gradient_checkpointing=False,
-        **kwargs
-    ):
-        super().__init__(pad_token_id=pad_token_id, **kwargs)
-
-        self.vocab_size = vocab_size
-        self.hidden_size = hidden_size
-        self.num_hidden_layers = num_hidden_layers
-        self.num_attention_heads = num_attention_heads
-        self.hidden_act = hidden_act
-        self.intermediate_size = intermediate_size
-        self.hidden_dropout_prob = hidden_dropout_prob
-        self.attention_probs_dropout_prob = attention_probs_dropout_prob
-        self.max_position_embeddings = max_position_embeddings
-        self.type_vocab_size = type_vocab_size
-        self.initializer_range = initializer_range
-        self.layer_norm_eps = layer_norm_eps
-        self.gradient_checkpointing = gradient_checkpointing
diff --git a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/losses/__init__.py b/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/losses/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/kukuhtw/VToonify/vtoonify/model/raft/train_standard.sh b/spaces/kukuhtw/VToonify/vtoonify/model/raft/train_standard.sh
deleted file mode 100644
index 7f559b386b6b596ec14a94f0d8c13974309b7d80..0000000000000000000000000000000000000000
--- a/spaces/kukuhtw/VToonify/vtoonify/model/raft/train_standard.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-mkdir -p checkpoints
-python -u train.py --name raft-chairs --stage chairs --validation chairs --gpus 0 1 --num_steps 100000 --batch_size 10 --lr 0.0004 --image_size 368 496 --wdecay 0.0001
-python -u train.py --name raft-things --stage things --validation sintel --restore_ckpt checkpoints/raft-chairs.pth --gpus 0 1 --num_steps 100000 --batch_size 6 --lr 0.000125 --image_size 400 720 --wdecay 0.0001
-python -u train.py --name raft-sintel --stage sintel --validation sintel --restore_ckpt checkpoints/raft-things.pth --gpus 0 1 --num_steps 100000 --batch_size 6 --lr 0.000125 --image_size 368 768 --wdecay 0.00001 --gamma=0.85
-python -u train.py --name raft-kitti  --stage kitti --validation kitti --restore_ckpt checkpoints/raft-sintel.pth --gpus 0 1 --num_steps 50000 --batch_size 6 --lr 0.0001 --image_size 288 960 --wdecay 0.00001 --gamma=0.85
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/altair/utils/plugin_registry.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/altair/utils/plugin_registry.py
deleted file mode 100644
index 37d3db222ef2c7920628971a92e863d9915514c6..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/altair/utils/plugin_registry.py
+++ /dev/null
@@ -1,228 +0,0 @@
-import sys
-from typing import Any, Dict, List, Optional, Generic, TypeVar, cast
-from types import TracebackType
-
-if sys.version_info >= (3, 8):
-    from importlib.metadata import entry_points
-else:
-    from importlib_metadata import entry_points
-
-from toolz import curry
-
-
-PluginType = TypeVar("PluginType")
-
-
-class NoSuchEntryPoint(Exception):
-    def __init__(self, group, name):
-        self.group = group
-        self.name = name
-
-    def __str__(self):
-        return f"No {self.name!r} entry point found in group {self.group!r}"
-
-
-class PluginEnabler:
-    """Context manager for enabling plugins
-
-    This object lets you use enable() as a context manager to
-    temporarily enable a given plugin::
-
-        with plugins.enable('name'):
-            do_something()  # 'name' plugin temporarily enabled
-        # plugins back to original state
-    """
-
-    def __init__(self, registry: "PluginRegistry", name: str, **options):
-        self.registry = registry  # type: PluginRegistry
-        self.name = name  # type: str
-        self.options = options  # type: Dict[str, Any]
-        self.original_state = registry._get_state()  # type: Dict[str, Any]
-        self.registry._enable(name, **options)
-
-    def __enter__(self) -> "PluginEnabler":
-        return self
-
-    def __exit__(self, typ: type, value: Exception, traceback: TracebackType) -> None:
-        self.registry._set_state(self.original_state)
-
-    def __repr__(self) -> str:
-        return "{}.enable({!r})".format(self.registry.__class__.__name__, self.name)
-
-
-class PluginRegistry(Generic[PluginType]):
-    """A registry for plugins.
-
-    This is a plugin registry that allows plugins to be loaded/registered
-    in two ways:
-
-    1. Through an explicit call to ``.register(name, value)``.
-    2. By looking for other Python packages that are installed and provide
-       a setuptools entry point group.
-
-    When you create an instance of this class, provide the name of the
-    entry point group to use::
-
-        reg = PluginRegister('my_entrypoint_group')
-
-    """
-
-    # this is a mapping of name to error message to allow custom error messages
-    # in case an entrypoint is not found
-    entrypoint_err_messages = {}  # type: Dict[str, str]
-
-    # global settings is a key-value mapping of settings that are stored globally
-    # in the registry rather than passed to the plugins
-    _global_settings = {}  # type: Dict[str, Any]
-
-    def __init__(self, entry_point_group: str = "", plugin_type: type = object):
-        """Create a PluginRegistry for a named entry point group.
-
-        Parameters
-        ==========
-        entry_point_group: str
-            The name of the entry point group.
-        plugin_type: object
-            A type that will optionally be used for runtime type checking of
-            loaded plugins using isinstance.
-        """
-        self.entry_point_group = entry_point_group  # type: str
-        self.plugin_type = plugin_type  # type: Optional[type]
-        self._active = None  # type: Optional[PluginType]
-        self._active_name = ""  # type: str
-        self._plugins = {}  # type: Dict[str, PluginType]
-        self._options = {}  # type: Dict[str, Any]
-        self._global_settings = self.__class__._global_settings.copy()  # type: dict
-
-    def register(self, name: str, value: Optional[PluginType]) -> Optional[PluginType]:
-        """Register a plugin by name and value.
-
-        This method is used for explicit registration of a plugin and shouldn't be
-        used to manage entry point managed plugins, which are auto-loaded.
-
-        Parameters
-        ==========
-        name: str
-            The name of the plugin.
-        value: PluginType or None
-            The actual plugin object to register or None to unregister that plugin.
-
-        Returns
-        =======
-        plugin: PluginType or None
-            The plugin that was registered or unregistered.
-        """
-        if value is None:
-            return self._plugins.pop(name, None)
-        else:
-            assert isinstance(value, self.plugin_type)  # type: ignore[arg-type]  # Should ideally be fixed by better annotating plugin_type
-            self._plugins[name] = value
-            return value
-
-    def names(self) -> List[str]:
-        """List the names of the registered and entry points plugins."""
-        exts = list(self._plugins.keys())
-        e_points = importlib_metadata_get(self.entry_point_group)
-        more_exts = [ep.name for ep in e_points]
-        exts.extend(more_exts)
-        return sorted(set(exts))
-
-    def _get_state(self) -> Dict[str, Any]:
-        """Return a dictionary representing the current state of the registry"""
-        return {
-            "_active": self._active,
-            "_active_name": self._active_name,
-            "_plugins": self._plugins.copy(),
-            "_options": self._options.copy(),
-            "_global_settings": self._global_settings.copy(),
-        }
-
-    def _set_state(self, state: Dict[str, Any]) -> None:
-        """Reset the state of the registry"""
-        assert set(state.keys()) == {
-            "_active",
-            "_active_name",
-            "_plugins",
-            "_options",
-            "_global_settings",
-        }
-        for key, val in state.items():
-            setattr(self, key, val)
-
-    def _enable(self, name: str, **options) -> None:
-        if name not in self._plugins:
-            try:
-                (ep,) = [
-                    ep
-                    for ep in importlib_metadata_get(self.entry_point_group)
-                    if ep.name == name
-                ]
-            except ValueError as err:
-                if name in self.entrypoint_err_messages:
-                    raise ValueError(self.entrypoint_err_messages[name]) from err
-                else:
-                    raise NoSuchEntryPoint(self.entry_point_group, name) from err
-            value = cast(PluginType, ep.load())
-            self.register(name, value)
-        self._active_name = name
-        self._active = self._plugins[name]
-        for key in set(options.keys()) & set(self._global_settings.keys()):
-            self._global_settings[key] = options.pop(key)
-        self._options = options
-
-    def enable(self, name: Optional[str] = None, **options) -> PluginEnabler:
-        """Enable a plugin by name.
-
-        This can be either called directly, or used as a context manager.
-
-        Parameters
-        ----------
-        name : string (optional)
-            The name of the plugin to enable. If not specified, then use the
-            current active name.
-        **options :
-            Any additional parameters will be passed to the plugin as keyword
-            arguments
-
-        Returns
-        -------
-        PluginEnabler:
-            An object that allows enable() to be used as a context manager
-        """
-        if name is None:
-            name = self.active
-        return PluginEnabler(self, name, **options)
-
-    @property
-    def active(self) -> str:
-        """Return the name of the currently active plugin"""
-        return self._active_name
-
-    @property
-    def options(self) -> Dict[str, Any]:
-        """Return the current options dictionary"""
-        return self._options
-
-    def get(self) -> Optional[PluginType]:
-        """Return the currently active plugin."""
-        if self._options:
-            return curry(self._active, **self._options)
-        else:
-            return self._active
-
-    def __repr__(self) -> str:
-        return "{}(active={!r}, registered={!r})" "".format(
-            self.__class__.__name__, self._active_name, list(self.names())
-        )
-
-
-def importlib_metadata_get(group):
-    ep = entry_points()
-    # 'select' was introduced in Python 3.10 and 'get' got deprecated
-    # We don't check for Python version here as by checking with hasattr we
-    # also get compatibility with the importlib_metadata package which had a different
-    # deprecation cycle for 'get'
-    if hasattr(ep, "select"):
-        return ep.select(group=group)
-    else:
-        return ep.get(group, [])
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/ModifyUpload-00319b5e.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/ModifyUpload-00319b5e.js
deleted file mode 100644
index 0864772e60373453bb453af15cec6832f1f7ee8f..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/ModifyUpload-00319b5e.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{S as h,i as b,s as _,G as k,e as x,C as r,g as v,E as p,m as I,J as E,p as d,t as g,q as w,n as C,y as L,B as m,D as c,F as f,H as z,M,l as j,o as q,r as D}from"./index-8c3da1d9.js";import"./Button-62634b34.js";/* empty css                                                    */import"./ModifyUpload.svelte_svelte_type_style_lang-ba6baa96.js";function S(a){let e,l,t,s,n,o;return t=new a[0]({}),{c(){e=k("button"),l=k("div"),x(t.$$.fragment),r(l,"class","svelte-1p4r00v"),r(e,"aria-label",a[1]),r(e,"class","svelte-1p4r00v")},m(i,u){v(i,e,u),p(e,l),I(t,l,null),s=!0,n||(o=E(e,"click",a[2]),n=!0)},p(i,[u]){(!s||u&2)&&r(e,"aria-label",i[1])},i(i){s||(d(t.$$.fragment,i),s=!0)},o(i){g(t.$$.fragment,i),s=!1},d(i){i&&w(e),C(t),n=!1,o()}}}function y(a,e,l){let{Icon:t}=e,{label:s=""}=e;function n(o){L.call(this,a,o)}return a.$$set=o=>{"Icon"in o&&l(0,t=o.Icon),"label"in o&&l(1,s=o.label)},[t,s,n]}class B extends h{constructor(e){super(),b(this,e,y,S,_,{Icon:0,label:1})}}function F(a){let e,l,t,s;return{c(){e=m("svg"),l=m("g"),t=m("path"),s=m("path"),r(t,"d","M18,6L6.087,17.913"),c(t,"fill","none"),c(t,"fill-rule","nonzero"),c(t,"stroke-width","2px"),r(l,"transform","matrix(1.14096,-0.140958,-0.140958,1.14096,-0.0559523,0.0559523)"),r(s,"d","M4.364,4.364L19.636,19.636"),c(s,"fill","none"),c(s,"fill-rule","nonzero"),c(s,"stroke-width","2px"),r(e,"width","100%"),r(e,"height","100%"),r(e,"viewBox","0 0 24 24"),r(e,"version","1.1"),r(e,"xmlns","http://www.w3.org/2000/svg"),r(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),r(e,"xml:space","preserve"),r(e,"stroke","currentColor"),c(e,"fill-rule","evenodd"),c(e,"clip-rule","evenodd"),c(e,"stroke-linecap","round"),c(e,"stroke-linejoin","round")},m(n,o){v(n,e,o),p(e,l),p(l,t),p(e,s)},p:f,i:f,o:f,d(n){n&&w(e)}}}class G extends h{constructor(e){super(),b(this,e,null,F,_,{})}}function H(a){let e,l;return{c(){e=m("svg"),l=m("path"),r(l,"d","M17 3a2.828 2.828 0 1 1 4 4L7.5 20.5 2 22l1.5-5.5L17 3z"),r(e,"xmlns","http://www.w3.org/2000/svg"),r(e,"width","100%"),r(e,"height","100%"),r(e,"viewBox","0 0 24 24"),r(e,"fill","none"),r(e,"stroke","currentColor"),r(e,"stroke-width","1.5"),r(e,"stroke-linecap","round"),r(e,"stroke-linejoin","round"),r(e,"class","feather feather-edit-2")},m(t,s){v(t,e,s),p(e,l)},p:f,i:f,o:f,d(t){t&&w(e)}}}class J extends h{constructor(e){super(),b(this,e,null,H,_,{})}}function $(a){let e,l;return e=new B({props:{Icon:J,label:"Edit"}}),e.$on("click",a[3]),{c(){x(e.$$.fragment)},m(t,s){I(e,t,s),l=!0},p:f,i(t){l||(d(e.$$.fragment,t),l=!0)},o(t){g(e.$$.fragment,t),l=!1},d(t){C(e,t)}}}function P(a){let e,l,t,s,n=a[0]&&$(a);return t=new B({props:{Icon:G,label:"Clear"}}),t.$on("click",a[4]),{c(){e=k("div"),n&&n.c(),l=z(),x(t.$$.fragment),r(e,"class","svelte-19sk1im"),M(e,"not-absolute",!a[1]),c(e,"position",a[1]?"absolute":"static")},m(o,i){v(o,e,i),n&&n.m(e,null),p(e,l),I(t,e,null),s=!0},p(o,[i]){o[0]?n?(n.p(o,i),i&1&&d(n,1)):(n=$(o),n.c(),d(n,1),n.m(e,l)):n&&(j(),g(n,1,1,()=>{n=null}),q()),(!s||i&2)&&M(e,"not-absolute",!o[1]),i&2&&c(e,"position",o[1]?"absolute":"static")},i(o){s||(d(n),d(t.$$.fragment,o),s=!0)},o(o){g(n),g(t.$$.fragment,o),s=!1},d(o){o&&w(e),n&&n.d(),C(t)}}}function U(a,e,l){let{editable:t=!1}=e,{absolute:s=!0}=e;const n=D(),o=()=>n("edit"),i=u=>{n("clear"),u.stopPropagation()};return a.$$set=u=>{"editable"in u&&l(0,t=u.editable),"absolute"in u&&l(1,s=u.absolute)},[t,s,n,o,i]}class Q extends h{constructor(e){super(),b(this,e,U,P,_,{editable:0,absolute:1})}}export{G as C,B as I,Q as M};
-//# sourceMappingURL=ModifyUpload-00319b5e.js.map
diff --git a/spaces/leogabraneth/text-generation-webui-main/modules/RWKV.py b/spaces/leogabraneth/text-generation-webui-main/modules/RWKV.py
deleted file mode 100644
index 8a15e5406f84b2f2c0ee9838b41dfed2c8c6b966..0000000000000000000000000000000000000000
--- a/spaces/leogabraneth/text-generation-webui-main/modules/RWKV.py
+++ /dev/null
@@ -1,154 +0,0 @@
-'''
-This loader is not currently maintained as RWKV can now be loaded
-through the transformers library.
-'''
-
-import copy
-import os
-from pathlib import Path
-
-import numpy as np
-from tokenizers import Tokenizer
-from transformers import is_torch_xpu_available
-
-import modules.shared as shared
-from modules.callbacks import Iteratorize
-
-np.set_printoptions(precision=4, suppress=True, linewidth=200)
-
-os.environ['RWKV_JIT_ON'] = '1'
-os.environ["RWKV_CUDA_ON"] = '1' if shared.args.rwkv_cuda_on else '0'  # use CUDA kernel for seq mode (much faster)
-
-from rwkv.model import RWKV
-from rwkv.utils import PIPELINE, PIPELINE_ARGS
-
-
-class RWKVModel:
-    def __init__(self):
-        pass
-
-    @classmethod
-    def from_pretrained(self, path, dtype="bf16" if is_torch_xpu_available() else "fp16", device="xpu" if is_torch_xpu_available() else "cuda"):
-        tokenizer_path = Path(f"{path.parent}/20B_tokenizer.json")
-        if shared.args.rwkv_strategy is None:
-            model = RWKV(model=str(path), strategy=f'{device} {dtype}')
-        else:
-            model = RWKV(model=str(path), strategy=shared.args.rwkv_strategy)
-
-        pipeline = PIPELINE(model, str(tokenizer_path))
-        result = self()
-        result.pipeline = pipeline
-        result.model = model
-        result.cached_context = ""
-        result.cached_model_state = None
-        result.cached_output_logits = None
-        return result
-
-    def generate(self, prompt, state, callback=None):
-        args = PIPELINE_ARGS(
-            temperature=state['temperature'],
-            top_p=state['top_p'],
-            top_k=state['top_k'],
-            alpha_frequency=0.1,  # Frequency Penalty (as in GPT-3)
-            alpha_presence=0.1,  # Presence Penalty (as in GPT-3)
-            token_ban=[0],  # ban the generation of some tokens
-            token_stop=[]
-        )
-
-        if self.cached_context != "":
-            if prompt.startswith(self.cached_context):
-                prompt = prompt[len(self.cached_context):]
-            else:
-                self.cached_context = ""
-                self.cached_model_state = None
-                self.cached_output_logits = None
-
-        # out = self.pipeline.generate(prompt, token_count=state['max_new_tokens'], args=args, callback=callback)
-        out = self.generate_from_cached_state(prompt, token_count=state['max_new_tokens'], args=args, callback=callback)
-        return out
-
-    def generate_with_streaming(self, *args, **kwargs):
-        with Iteratorize(self.generate, args, kwargs, callback=None) as generator:
-            reply = ''
-            for token in generator:
-                reply += token
-                yield reply
-
-    # Similar to the PIPELINE.generate, but lets us maintain the cached_model_state
-    def generate_from_cached_state(self, ctx="", token_count=20, args=None, callback=None):
-        all_tokens = []
-        out_str = ''
-        occurrence = {}
-        state = copy.deepcopy(self.cached_model_state) if self.cached_model_state is not None else None
-
-        # if we ended up with an empty context, just reuse the cached logits
-        # this can happen if a user undoes a message and then sends the exact message again
-        # in that case the full context ends up being the same as the cached_context, so the remaining context is empty.
-        if ctx == "":
-            out = self.cached_output_logits
-
-        token = None
-        for i in range(token_count):
-            # forward
-            tokens = self.pipeline.encode(ctx) if i == 0 else [token]
-            while len(tokens) > 0:
-                out, state = self.model.forward(tokens[:args.chunk_len], state)
-                tokens = tokens[args.chunk_len:]
-            if i == 0:
-                begin_token = len(all_tokens)
-                last_token_posi = begin_token
-            # cache the model state after scanning the context
-            # we don't cache the state after processing our own generated tokens because
-            # the output string might be post-processed arbitrarily. Therefore, what's fed into the model
-            # on the next round of chat might be slightly different what what it output on the previous round
-            if i == 0:
-                self.cached_context += ctx
-                self.cached_model_state = copy.deepcopy(state)
-                self.cached_output_logits = copy.deepcopy(out)
-
-            # adjust probabilities
-            for n in args.token_ban:
-                out[n] = -float('inf')
-
-            for n in occurrence:
-                out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
-
-            # sampler
-            token = self.pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p, top_k=args.top_k)
-            if token in args.token_stop:
-                break
-
-            all_tokens += [token]
-            if token not in occurrence:
-                occurrence[token] = 1
-            else:
-                occurrence[token] += 1
-
-            # output
-            tmp = self.pipeline.decode(all_tokens[last_token_posi:])
-            if '\ufffd' not in tmp:  # is valid utf-8 string?
-                if callback:
-                    callback(tmp)
-
-                out_str += tmp
-                last_token_posi = begin_token + i + 1
-        return out_str
-
-
-class RWKVTokenizer:
-    def __init__(self):
-        pass
-
-    @classmethod
-    def from_pretrained(self, path):
-        tokenizer_path = path / "20B_tokenizer.json"
-        tokenizer = Tokenizer.from_file(str(tokenizer_path))
-        result = self()
-        result.tokenizer = tokenizer
-        return result
-
-    def encode(self, prompt):
-        return self.tokenizer.encode(prompt).ids
-
-    def decode(self, ids):
-        return self.tokenizer.decode(ids)
diff --git a/spaces/lewisliuX123/wechatgpt3/bot/chatgpt/chat_gpt_bot.py b/spaces/lewisliuX123/wechatgpt3/bot/chatgpt/chat_gpt_bot.py
deleted file mode 100644
index 94127104e0e92da5149ae5aa1e06885f0f8a4682..0000000000000000000000000000000000000000
--- a/spaces/lewisliuX123/wechatgpt3/bot/chatgpt/chat_gpt_bot.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# encoding:utf-8
-
-from bot.bot import Bot
-from config import conf
-from common.log import logger
-import openai
-import time
-
-user_session = dict()
-
-# OpenAI对话模型API (可用)
-class ChatGPTBot(Bot):
-    def __init__(self):
-        openai.api_key = conf().get('open_ai_api_key')
-        openai.api_base="https://apai.zyai.online/v1"
-
-    def reply(self, query, context=None):
-        # acquire reply content
-        if not context or not context.get('type') or context.get('type') == 'TEXT':
-            logger.info("[OPEN_AI] query={}".format(query))
-            from_user_id = context['from_user_id']
-            if query == '#清除记忆':
-                Session.clear_session(from_user_id)
-                return '记忆已清除'
-
-            new_query = Session.build_session_query(query, from_user_id)
-            logger.debug("[OPEN_AI] session query={}".format(new_query))
-
-            # if context.get('stream'):
-            #     # reply in stream
-            #     return self.reply_text_stream(query, new_query, from_user_id)
-
-            reply_content = self.reply_text(new_query, from_user_id, 0)
-            logger.debug("[OPEN_AI] new_query={}, user={}, reply_cont={}".format(new_query, from_user_id, reply_content))
-            if reply_content:
-                Session.save_session(query, reply_content, from_user_id)
-            return reply_content
-
-        elif context.get('type', None) == 'IMAGE_CREATE':
-            return self.create_img(query, 0)
-
-    def reply_text(self, query, user_id, retry_count=0):
-        try:
-            response = openai.ChatCompletion.create(
-                model="gpt-3.5-turbo",  # 对话模型的名称
-                messages=query,
-                temperature=1,  # 值在[0,1]之间,越大表示回复越具有不确定性
-                max_tokens=600,  # 回复最大的字符数
-                top_p=1,
-                frequency_penalty=0,  # [-2,2]之间,该值越大则更倾向于产生不同的内容
-                presence_penalty=0,  # [-2,2]之间,该值越大则更倾向于产生不同的内容
-            )
-            # res_content = response.choices[0]['text'].strip().replace('<|endoftext|>', '')
-            logger.info(response.choices[0]['message']['content'])
-            # log.info("[OPEN_AI] reply={}".format(res_content))
-            return response.choices[0]['message']['content']
-        except openai.error.RateLimitError as e:
-            # rate limit exception
-            logger.warn(e)
-            if retry_count < 3:
-                time.sleep(5)
-                logger.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1))
-                return self.reply_text(query, user_id, retry_count+1)
-            else:
-                return "问太快了,慢点行不行"
-        except Exception as e:
-            # unknown exception
-            logger.exception(e)
-            Session.clear_session(user_id)
-            return "没听懂"
-
-    def create_img(self, query, retry_count=0):
-        try:
-            logger.info("[OPEN_AI] image_query={}".format(query))
-            response = openai.Image.create(
-                prompt=query,    #图片描述
-                n=1,             #每次生成图片的数量
-                size="1024x1024"   #图片大小,可选有 256x256, 512x512, 1024x1024
-            )
-            image_url = response['data'][0]['url']
-            logger.info("[OPEN_AI] image_url={}".format(image_url))
-            return image_url
-        except openai.error.RateLimitError as e:
-            logger.warn(e)
-            if retry_count < 3:
-                time.sleep(5)
-                logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count+1))
-                return self.reply_text(query, retry_count+1)
-            else:
-                return "问太快了,慢点行不行"
-        except Exception as e:
-            logger.exception(e)
-            return None
-
-class Session(object):
-    @staticmethod
-    def build_session_query(query, user_id):
-        '''
-        build query with conversation history
-        e.g.  [
-            {"role": "system", "content": "You are a helpful assistant,let's think step by step in multiple different ways."},
-            {"role": "user", "content": "Who won the world series in 2020?"},
-            {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
-            {"role": "user", "content": "Where was it played?"}
-        ]
-        :param query: query content
-        :param user_id: from user id
-        :return: query content with conversaction
-        '''
-        session = user_session.get(user_id, [])
-        if len(session) == 0:
-            system_prompt = conf().get("character_desc", "")
-            system_item = {'role': 'system', 'content': system_prompt}
-            session.append(system_item)
-            user_session[user_id] = session
-        user_item = {'role': 'user', 'content': query}
-        session.append(user_item)
-        return session
-
-    @staticmethod
-    def save_session(query, answer, user_id):
-        session = user_session.get(user_id)
-        if session:
-            # append conversation
-            gpt_item = {'role': 'assistant', 'content': answer}
-            session.append(gpt_item)
-
-    @staticmethod
-    def clear_session(user_id):
-        user_session[user_id] = []
-
diff --git a/spaces/lewisliuX123/wechatllama2/bridge/bridge.py b/spaces/lewisliuX123/wechatllama2/bridge/bridge.py
deleted file mode 100644
index 6c164e87bb9f1623c70180e55d689c588f6509f4..0000000000000000000000000000000000000000
--- a/spaces/lewisliuX123/wechatllama2/bridge/bridge.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from bot import bot_factory
-
-
-class Bridge(object):
-    def __init__(self):
-        pass
-
-    def fetch_reply_content(self, query, context):
-        return bot_factory.create_bot("chatGPT").reply(query, context)
diff --git a/spaces/lewiswu1209/MockingBird/utils/util.py b/spaces/lewiswu1209/MockingBird/utils/util.py
deleted file mode 100644
index 34bcffd6c0975377a54ae1ce89002be1dae8151d..0000000000000000000000000000000000000000
--- a/spaces/lewiswu1209/MockingBird/utils/util.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import matplotlib
-matplotlib.use('Agg')
-import time
-
-class Timer():
-    ''' Timer for recording training time distribution. '''
-    def __init__(self):
-        self.prev_t = time.time()
-        self.clear()
-
-    def set(self):
-        self.prev_t = time.time()
-
-    def cnt(self, mode):
-        self.time_table[mode] += time.time()-self.prev_t
-        self.set()
-        if mode == 'bw':
-            self.click += 1
-
-    def show(self):
-        total_time = sum(self.time_table.values())
-        self.time_table['avg'] = total_time/self.click
-        self.time_table['rd'] = 100*self.time_table['rd']/total_time
-        self.time_table['fw'] = 100*self.time_table['fw']/total_time
-        self.time_table['bw'] = 100*self.time_table['bw']/total_time
-        msg = '{avg:.3f} sec/step (rd {rd:.1f}% | fw {fw:.1f}% | bw {bw:.1f}%)'.format(
-            **self.time_table)
-        self.clear()
-        return msg
-
-    def clear(self):
-        self.time_table = {'rd': 0, 'fw': 0, 'bw': 0}
-        self.click = 0
-
-# Reference : https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/e2e_asr.py#L168
-
-def human_format(num):
-    magnitude = 0
-    while num >= 1000:
-        magnitude += 1
-        num /= 1000.0
-    # add more suffixes if you need them
-    return '{:3.1f}{}'.format(num, [' ', 'K', 'M', 'G', 'T', 'P'][magnitude])
-
-
-# provide easy access of attribute from dict, such abc.key 
-class AttrDict(dict):
-    def __init__(self, *args, **kwargs):
-        super(AttrDict, self).__init__(*args, **kwargs)
-        self.__dict__ = self
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Acer A200 Simple Tool V2.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Acer A200 Simple Tool V2.md
deleted file mode 100644
index 74d45bf52da629be3a1b5a5887ee947a88dd7785..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Acer A200 Simple Tool V2.md	
+++ /dev/null
@@ -1,44 +0,0 @@
-<h2>Acer A200 Simple Tool V2</h2><br /><p><b><b>DOWNLOAD</b> &bull; <a href="https://bytlly.com/2uGwcX">https://bytlly.com/2uGwcX</a></b></p><br /><br />
-<br />
-.5
-
-password protected b200 as 200 pwd protected files
-
-download backup-files b200 backuped as 200 b200 files
-
-download-protected-photos b200 download protected photos as b200 files
-
-download-protected-video-files b200 download protected video as 200 b200 files
-
-download-torrent-video b200 download protected video as 200 b200 files
-
-download-torrent-video-files b200 download protected video as 200 b200 files
-
-download-video-from-url b200 download protected video from url as b200 files
-
-download-video-from-url-nopass b200 download protected video from url as 200 b200 files
-
-download-video-from-url-pass b200 download protected video from url as 200 b200 files
-
-download-video-from-url-pass-nopass b200 download protected video from url as 200 b200 files
-
-download-video-from-url-pass-http b200 download protected video from url as 200 b200 files
-
-download-video-from-url-pass-http-nopass b200 download protected video from url as 200 b200 files
-
-download-video-from-url-pass-http-a-movie-site b200 download protected video from url as 200 b200 files
-
-download-video-from-url-pass-http-a-movie-site-nopass b200 download protected video from url as 200 b200 files
-
-download-video-from-url-pass-http-a-movie-site-download-restricted-files b200 download protected video from url as 200 b200 files
-
-download-video-from-url-pass-http-a-movie-site-download-restricted-files-nopass b200 download protected video from url as 200 b200 files
-
-download-video-from-url-pass-http-a-movie-site-download-restricted-files-http-bluestacks b200 download protected video from url as 200 b200 files
-
-download-video-from-url-pass-http-a-movie-site-download-restricted-files-http-bluestacks-nopass b200 download protected video from url as 200 b200 files
-
-download-video-from-url-pass-http-a-movie-site-download-restricted-files 4fefd39f24<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Free Download !LINK! Dhoom Movie In Hindi Hd.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Free Download !LINK! Dhoom Movie In Hindi Hd.md
deleted file mode 100644
index aad60a7222baf5f7d089f6aa986bcaf60e8d1e5a..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Free Download !LINK! Dhoom Movie In Hindi Hd.md	
+++ /dev/null
@@ -1,11 +0,0 @@
-<h2>free download Dhoom movie in hindi hd</h2><br /><p><b><b>Download</b> &#9989; <a href="https://bytlly.com/2uGxg4">https://bytlly.com/2uGxg4</a></b></p><br /><br />
-<br />
-amazon.com: Dhoom 2: Bollywood Movie: Aishwarya Rai, Abhishek Bachan ,.2 Back in Action Bollywood DVD with English subtitles Free HD movies online, Hindi. Aishwarya Rai and Akshay Kumar in "Dhoom 2: In Shock" (Korean Version), Directed by Anmol Raj.
-Bollywood, Dhoom, Aishwarya Rai and Akshay Kumar, Movie, Back in Action, Bollywood Movie, Aishwarya Rai, Bollywood Movies, Dhoom, Full Movie, Back In Action, Full Movie.
-Aishwarya Rai and Akshay Kumar Film Dhoom 2 (Movie) with English Subtitles.
-Check Out The Full Screen for the Full Movie
-Bollywood Movie.
-Aishwarya Rai and Akshay Kumar In "Dhoom 2: In Shock" (Korean Version), Directed by Anmol Raj. 8a78ff9644<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Grammarly Cracked Version Torrent Download HOT Torrent Download HOT.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Grammarly Cracked Version Torrent Download HOT Torrent Download HOT.md
deleted file mode 100644
index f1850fd5b7cd3c094108d6419a1b251f31fdd684..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Grammarly Cracked Version Torrent Download HOT Torrent Download HOT.md	
+++ /dev/null
@@ -1,42 +0,0 @@
-<br />
-<h1>Grammarly Cracked Version Torrent Download - How to Get Grammarly Premium for Free</h1>
-<p>Grammarly is a popular online writing tool that helps you improve your grammar, spelling, punctuation, and style. Grammarly can also detect plagiarism and suggest ways to enhance your vocabulary and tone. Grammarly is used by millions of writers, students, professionals, and educators around the world.</p>
-<h2>Grammarly Cracked Version Torrent Download Torrent Download</h2><br /><p><b><b>Download Zip</b> &#10084; <a href="https://bytlly.com/2uGy7v">https://bytlly.com/2uGy7v</a></b></p><br /><br />
-<p>Grammarly offers two versions: a free version and a premium version. The free version provides basic writing feedback and corrections, while the premium version offers more advanced features and insights, such as:</p>
-<ul>
-<li>Writing goals and performance reports</li>
-<li>Word choice and clarity suggestions</li>
-<li>Genre-specific writing style checks</li>
-<li>Human proofreading service</li>
-<li>Integration with Google Docs and other platforms</li>
-</ul>
-<p>The premium version of Grammarly costs $29.95 per month, $59.95 per quarter, or $139.95 per year. However, some people may not be able to afford or justify paying for Grammarly premium, especially if they only use it occasionally or for personal purposes. That's why some people look for ways to get Grammarly cracked version torrent download.</p>
-<h2>What is Grammarly Cracked Version Torrent Download?</h2>
-<p>Grammarly cracked version torrent download is a way of getting Grammarly premium for free by downloading a modified or hacked version of the software from a torrent site. A torrent site is a platform that allows users to share files over the internet using a peer-to-peer network. A cracked version of a software is a version that has been altered or bypassed to remove or disable its security features, such as license keys or activation codes.</p>
-<p></p>
-<p>Some people may think that getting Grammarly cracked version torrent download is a good idea because they can enjoy all the benefits of Grammarly premium without paying anything. However, this is not true. There are many risks and disadvantages of getting Grammarly cracked version torrent download that you should be aware of before you decide to do it.</p>
-<h2>What are the Risks and Disadvantages of Getting Grammarly Cracked Version Torrent Download?</h2>
-<p>Getting Grammarly cracked version torrent download is not only illegal but also dangerous and unethical. Here are some of the risks and disadvantages of getting Grammarly cracked version torrent download:</p>
-<h3>Risks</h3>
-<ul>
-<li>You may infect your computer or device with viruses, malware, spyware, or ransomware that can damage your system, steal your data, or extort you for money.</li>
-<li>You may expose your personal or professional information to hackers or cybercriminals who can use it for identity theft, fraud, blackmail, or other malicious purposes.</li>
-<li>You may face legal consequences or penalties for violating the intellectual property rights or terms of service of Grammarly or the torrent site.</li>
-<li>You may lose access to Grammarly updates, support, or features that may improve your writing experience or fix any bugs or errors.</li>
-<li>You may compromise the quality or accuracy of your writing feedback or corrections because the cracked version of Grammarly may not work properly or have all the features of the original version.</li>
-</ul>
-<h3>Disadvantages</h3>
-<ul>
-<li>You may waste your time or bandwidth downloading a fake or corrupted file that does not work or contains harmful content.</li>
-<li>You may miss out on the opportunity to support the developers and creators of Grammarly who work hard to provide you with a valuable and reliable writing tool.</li>
-<li>You may lose your credibility or reputation as a writer or professional if you use a cracked version of Grammarly that produces poor or inaccurate results.</li>
-<li>You may violate the academic or ethical standards of your institution or organization if you use a cracked version of Grammarly that contains plagiarism or other errors.</li>
-</ul>
-<h2>Conclusion</h2>
-<p>Grammarly is a powerful and useful online writing tool that can help you improve your grammar, spelling, punctuation, and style. Grammarly offers a free version and a premium version that has more features and benefits. However, some people may try to get Grammarly cracked version torrent download to get Grammarly premium for free.</p>
-<p>This is not a good idea because getting Grammarly cracked version torrent download is illegal, dangerous, and unethical. There are many risks and disadvantages of getting Grammarly cracked version torrent download that can harm your computer, device, data, writing, and reputation. Therefore, you should avoid getting Grammarly cracked version torrent download and instead use the legitimate and safe ways to get Grammarly premium.</p>
-<h2>Conclusion</h2>
-<p>Grammarly is a powerful and useful online writing tool that can help you improve your grammar, spelling, punctuation, and style. Grammarly offers a free version and a premium version that has more features and benefits. However, some people may try to get Grammarly cracked version torrent download to get Grammarly premium for free.</p>
-<p>This is not a good idea because getting Grammarly cracked version torrent download is illegal, dangerous, and unethical. There are many risks and disadvantages of getting Grammarly cracked version torrent download that can harm your computer, device, data, writing, and reputation. Therefore, you should avoid getting Grammarly cracked version torrent download and instead use the legitimate and safe ways to get Grammarly premium.</p> 3cee63e6c2<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/lithiumice/SadTalker/src/audio2pose_models/networks.py b/spaces/lithiumice/SadTalker/src/audio2pose_models/networks.py
deleted file mode 100644
index 8aa0b1390e7b4bb0e16057ac94d2fe84f48421af..0000000000000000000000000000000000000000
--- a/spaces/lithiumice/SadTalker/src/audio2pose_models/networks.py
+++ /dev/null
@@ -1,140 +0,0 @@
-import torch.nn as nn
-import torch
-
-
-class ResidualConv(nn.Module):
-    def __init__(self, input_dim, output_dim, stride, padding):
-        super(ResidualConv, self).__init__()
-
-        self.conv_block = nn.Sequential(
-            nn.BatchNorm2d(input_dim),
-            nn.ReLU(),
-            nn.Conv2d(
-                input_dim, output_dim, kernel_size=3, stride=stride, padding=padding
-            ),
-            nn.BatchNorm2d(output_dim),
-            nn.ReLU(),
-            nn.Conv2d(output_dim, output_dim, kernel_size=3, padding=1),
-        )
-        self.conv_skip = nn.Sequential(
-            nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=stride, padding=1),
-            nn.BatchNorm2d(output_dim),
-        )
-
-    def forward(self, x):
-
-        return self.conv_block(x) + self.conv_skip(x)
-
-
-class Upsample(nn.Module):
-    def __init__(self, input_dim, output_dim, kernel, stride):
-        super(Upsample, self).__init__()
-
-        self.upsample = nn.ConvTranspose2d(
-            input_dim, output_dim, kernel_size=kernel, stride=stride
-        )
-
-    def forward(self, x):
-        return self.upsample(x)
-
-
-class Squeeze_Excite_Block(nn.Module):
-    def __init__(self, channel, reduction=16):
-        super(Squeeze_Excite_Block, self).__init__()
-        self.avg_pool = nn.AdaptiveAvgPool2d(1)
-        self.fc = nn.Sequential(
-            nn.Linear(channel, channel // reduction, bias=False),
-            nn.ReLU(inplace=True),
-            nn.Linear(channel // reduction, channel, bias=False),
-            nn.Sigmoid(),
-        )
-
-    def forward(self, x):
-        b, c, _, _ = x.size()
-        y = self.avg_pool(x).view(b, c)
-        y = self.fc(y).view(b, c, 1, 1)
-        return x * y.expand_as(x)
-
-
-class ASPP(nn.Module):
-    def __init__(self, in_dims, out_dims, rate=[6, 12, 18]):
-        super(ASPP, self).__init__()
-
-        self.aspp_block1 = nn.Sequential(
-            nn.Conv2d(
-                in_dims, out_dims, 3, stride=1, padding=rate[0], dilation=rate[0]
-            ),
-            nn.ReLU(inplace=True),
-            nn.BatchNorm2d(out_dims),
-        )
-        self.aspp_block2 = nn.Sequential(
-            nn.Conv2d(
-                in_dims, out_dims, 3, stride=1, padding=rate[1], dilation=rate[1]
-            ),
-            nn.ReLU(inplace=True),
-            nn.BatchNorm2d(out_dims),
-        )
-        self.aspp_block3 = nn.Sequential(
-            nn.Conv2d(
-                in_dims, out_dims, 3, stride=1, padding=rate[2], dilation=rate[2]
-            ),
-            nn.ReLU(inplace=True),
-            nn.BatchNorm2d(out_dims),
-        )
-
-        self.output = nn.Conv2d(len(rate) * out_dims, out_dims, 1)
-        self._init_weights()
-
-    def forward(self, x):
-        x1 = self.aspp_block1(x)
-        x2 = self.aspp_block2(x)
-        x3 = self.aspp_block3(x)
-        out = torch.cat([x1, x2, x3], dim=1)
-        return self.output(out)
-
-    def _init_weights(self):
-        for m in self.modules():
-            if isinstance(m, nn.Conv2d):
-                nn.init.kaiming_normal_(m.weight)
-            elif isinstance(m, nn.BatchNorm2d):
-                m.weight.data.fill_(1)
-                m.bias.data.zero_()
-
-
-class Upsample_(nn.Module):
-    def __init__(self, scale=2):
-        super(Upsample_, self).__init__()
-
-        self.upsample = nn.Upsample(mode="bilinear", scale_factor=scale)
-
-    def forward(self, x):
-        return self.upsample(x)
-
-
-class AttentionBlock(nn.Module):
-    def __init__(self, input_encoder, input_decoder, output_dim):
-        super(AttentionBlock, self).__init__()
-
-        self.conv_encoder = nn.Sequential(
-            nn.BatchNorm2d(input_encoder),
-            nn.ReLU(),
-            nn.Conv2d(input_encoder, output_dim, 3, padding=1),
-            nn.MaxPool2d(2, 2),
-        )
-
-        self.conv_decoder = nn.Sequential(
-            nn.BatchNorm2d(input_decoder),
-            nn.ReLU(),
-            nn.Conv2d(input_decoder, output_dim, 3, padding=1),
-        )
-
-        self.conv_attn = nn.Sequential(
-            nn.BatchNorm2d(output_dim),
-            nn.ReLU(),
-            nn.Conv2d(output_dim, 1, 1),
-        )
-
-    def forward(self, x1, x2):
-        out = self.conv_encoder(x1) + self.conv_decoder(x2)
-        out = self.conv_attn(out)
-        return out * x2
\ No newline at end of file
diff --git a/spaces/lithiumice/SadTalker/src/audio2pose_models/res_unet.py b/spaces/lithiumice/SadTalker/src/audio2pose_models/res_unet.py
deleted file mode 100644
index f2611e1d1a9bf233507427b34928fca60e094224..0000000000000000000000000000000000000000
--- a/spaces/lithiumice/SadTalker/src/audio2pose_models/res_unet.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import torch
-import torch.nn as nn
-from src.audio2pose_models.networks import ResidualConv, Upsample
-
-
-class ResUnet(nn.Module):
-    def __init__(self, channel=1, filters=[32, 64, 128, 256]):
-        super(ResUnet, self).__init__()
-
-        self.input_layer = nn.Sequential(
-            nn.Conv2d(channel, filters[0], kernel_size=3, padding=1),
-            nn.BatchNorm2d(filters[0]),
-            nn.ReLU(),
-            nn.Conv2d(filters[0], filters[0], kernel_size=3, padding=1),
-        )
-        self.input_skip = nn.Sequential(
-            nn.Conv2d(channel, filters[0], kernel_size=3, padding=1)
-        )
-
-        self.residual_conv_1 = ResidualConv(filters[0], filters[1], stride=(2,1), padding=1)
-        self.residual_conv_2 = ResidualConv(filters[1], filters[2], stride=(2,1), padding=1)
-
-        self.bridge = ResidualConv(filters[2], filters[3], stride=(2,1), padding=1)
-
-        self.upsample_1 = Upsample(filters[3], filters[3], kernel=(2,1), stride=(2,1))
-        self.up_residual_conv1 = ResidualConv(filters[3] + filters[2], filters[2], stride=1, padding=1)
-
-        self.upsample_2 = Upsample(filters[2], filters[2], kernel=(2,1), stride=(2,1))
-        self.up_residual_conv2 = ResidualConv(filters[2] + filters[1], filters[1], stride=1, padding=1)
-
-        self.upsample_3 = Upsample(filters[1], filters[1], kernel=(2,1), stride=(2,1))
-        self.up_residual_conv3 = ResidualConv(filters[1] + filters[0], filters[0], stride=1, padding=1)
-
-        self.output_layer = nn.Sequential(
-            nn.Conv2d(filters[0], 1, 1, 1),
-            nn.Sigmoid(),
-        )
-
-    def forward(self, x):
-        # Encode
-        x1 = self.input_layer(x) + self.input_skip(x)
-        x2 = self.residual_conv_1(x1)
-        x3 = self.residual_conv_2(x2)
-        # Bridge
-        x4 = self.bridge(x3)
-
-        # Decode
-        x4 = self.upsample_1(x4)
-        x5 = torch.cat([x4, x3], dim=1)
-
-        x6 = self.up_residual_conv1(x5)
-
-        x6 = self.upsample_2(x6)
-        x7 = torch.cat([x6, x2], dim=1)
-
-        x8 = self.up_residual_conv2(x7)
-
-        x8 = self.upsample_3(x8)
-        x9 = torch.cat([x8, x1], dim=1)
-
-        x10 = self.up_residual_conv3(x9)
-
-        output = self.output_layer(x10)
-
-        return output
\ No newline at end of file
diff --git a/spaces/livekhh/formal_project/README.md b/spaces/livekhh/formal_project/README.md
deleted file mode 100644
index 76784ec336d5f2721b7dde4aadee7a1737e48fd1..0000000000000000000000000000000000000000
--- a/spaces/livekhh/formal_project/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: ChatAllFree
-emoji: 🐨
-colorFrom: indigo
-colorTo: blue
-sdk: gradio
-sdk_version: 3.50.2
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/lkeab/transfiner/configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py b/spaces/lkeab/transfiner/configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py
deleted file mode 100644
index 18e5f0720c568db4ef0c97b59688b5e7866df606..0000000000000000000000000000000000000000
--- a/spaces/lkeab/transfiner/configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from .mask_rcnn_R_101_FPN_100ep_LSJ import (
-    dataloader,
-    lr_multiplier,
-    model,
-    optimizer,
-    train,
-)
-
-train.max_iter *= 2  # 100ep -> 200ep
-
-lr_multiplier.scheduler.milestones = [
-    milestone * 2 for milestone in lr_multiplier.scheduler.milestones
-]
-lr_multiplier.scheduler.num_updates = train.max_iter
diff --git a/spaces/lmz/candle-whisper/README.md b/spaces/lmz/candle-whisper/README.md
deleted file mode 100644
index 37b32f8bf2a5544c04048f926c36453ab3d6d5ae..0000000000000000000000000000000000000000
--- a/spaces/lmz/candle-whisper/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Candle Whisper
-emoji: 👀
-colorFrom: green
-colorTo: green
-sdk: static
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/loralora/sovits_aishell3/inference.py b/spaces/loralora/sovits_aishell3/inference.py
deleted file mode 100644
index 8e055847f22348b1f25d3e5c89f6b8ac3e1e57de..0000000000000000000000000000000000000000
--- a/spaces/loralora/sovits_aishell3/inference.py
+++ /dev/null
@@ -1,86 +0,0 @@
-import onnxruntime
-import numpy as np
-import pyworld as pw
-import librosa
-import soundfile as sf
-
-def resize2d(source, target_len):
-    source[source<0.001] = np.nan
-    target = np.interp(np.linspace(0, len(source)-1, num=target_len,endpoint=True), np.arange(0, len(source)), source)
-    return np.nan_to_num(target)
-
-def _calculate_f0(input: np.ndarray,length,sr,f0min,f0max,
-                      use_continuous_f0: bool=True,
-                      use_log_f0: bool=True) -> np.ndarray:
-        input = input.astype(float)
-        frame_period = len(input)/sr/(length)*1000
-        f0, timeaxis = pw.dio(
-            input,
-            fs=sr,
-            f0_floor=f0min,
-            f0_ceil=f0max,
-            frame_period=frame_period)
-        f0 = pw.stonemask(input, f0, timeaxis, sr)
-        if use_log_f0:
-            nonzero_idxs = np.where(f0 != 0)[0]
-            f0[nonzero_idxs] = np.log(f0[nonzero_idxs])
-        return f0.reshape(-1)
-
-
-def get_text(wav,sr,transform=1.0):
-
-    #wav, sr = librosa.load(file,sr=None)
-    if len(wav.shape) > 1:
-        wav = librosa.to_mono(wav.transpose(1, 0)) 
-    if sr!=16000:  
-        wav16 = librosa.resample(wav, sr, 16000)
-    else:
-        wav16=wav
-    
-    source = {"source":np.expand_dims(np.expand_dims(wav16,0),0)}
-    hubertsession = onnxruntime.InferenceSession("hubert.onnx",providers=['CUDAExecutionProvider'])
-    units = np.array(hubertsession.run(['embed'], source)[0])
-    f0=_calculate_f0(wav,units.shape[1],sr,
-            f0min=librosa.note_to_hz('C2'),
-            f0max=librosa.note_to_hz('C7'))
-    f0=resize2d(f0,units.shape[1])
-    f0[f0!=0]=f0[f0!=0]+np.log(transform)
-    expf0 = np.expand_dims(f0,(0,2))
-    output=np.concatenate((units,expf0,expf0),axis=2)
-    return output.astype(np.float32),f0
-
-def getkey(key):
-    return np.power(2,key/12.0)
-
-def infer(f,r,speaker,key,reqf0=False):
-    speaker=int(speaker[7:])
-    if not f is None:
-        file=f
-    elif not r is None:
-        file=r
-    else:
-        return "请上传音频", None
-    audio,sr = librosa.load(file,sr=None)
-    if sr<16000:
-        return "采样率过低,请上传至少拥有16000Hz采样率的音频",None
-    duration = audio.shape[0] / sr
-    print(audio,sr,duration)
-    #if duration > 120:
-        #return "请上传小于2min的音频", None
-    #audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
-    x,sourcef0 = get_text(audio,sr,getkey(key))
-    x_lengths = [np.size(x,1)]
-    print(x_lengths[0],sr,speaker,key)
-    sid = [speaker]
-    ort_inputs = {'x':x,'x_lengths':x_lengths,'sid':sid,"noise_scale":[0.667],"length_scale":[1.0],"noise_scale_w":[0.8]} 
-    infersession = onnxruntime.InferenceSession("onnxmodel334.onnx",providers=['CUDAExecutionProvider'])
-    ort_output = infersession.run(['audio'], ort_inputs)
-    #sf.write(o,ort_output[0][0][0],22050,'PCM_16',format='wav')
-    genf0=np.array([])
-    if reqf0:
-        wav, sr = librosa.load(o,sr=None)
-        genf0=_calculate_f0(wav,x_lengths[0],sr,
-            f0min=librosa.note_to_hz('C2'),
-            f0max=librosa.note_to_hz('C7'))
-        genf0=resize2d(genf0,x_lengths[0])
-    return 'success',(22050,ort_output[0][0][0])#sourcef0.tolist(),genf0.tolist()
\ No newline at end of file
diff --git a/spaces/luxuedong/lxd/src/components/turn-counter.tsx b/spaces/luxuedong/lxd/src/components/turn-counter.tsx
deleted file mode 100644
index 08a9e488f044802a8600f4d195b106567c35aab4..0000000000000000000000000000000000000000
--- a/spaces/luxuedong/lxd/src/components/turn-counter.tsx
+++ /dev/null
@@ -1,23 +0,0 @@
-import React from 'react'
-import { Throttling } from '@/lib/bots/bing/types'
-
-export interface TurnCounterProps {
-  throttling?: Throttling
-}
-
-export function TurnCounter({ throttling }: TurnCounterProps) {
-  if (!throttling) {
-    return null
-  }
-
-  return (
-    <div className="turn-counter">
-      <div className="text">
-        <span>{throttling.numUserMessagesInConversation}</span>
-        <span> 共 </span>
-        <span>{throttling.maxNumUserMessagesInConversation}</span>
-      </div>
-      <div className="indicator"></div>
-    </div>
-  )
-}
diff --git a/spaces/ma-xu/LIVE/pybind11/tests/test_numpy_vectorize.py b/spaces/ma-xu/LIVE/pybind11/tests/test_numpy_vectorize.py
deleted file mode 100644
index 54e44cd8d3f9630a4d76c419e449c8ce1e7cee59..0000000000000000000000000000000000000000
--- a/spaces/ma-xu/LIVE/pybind11/tests/test_numpy_vectorize.py
+++ /dev/null
@@ -1,194 +0,0 @@
-# -*- coding: utf-8 -*-
-import pytest
-from pybind11_tests import numpy_vectorize as m
-
-np = pytest.importorskip("numpy")
-
-
-def test_vectorize(capture):
-    assert np.isclose(m.vectorized_func3(np.array(3 + 7j)), [6 + 14j])
-
-    for f in [m.vectorized_func, m.vectorized_func2]:
-        with capture:
-            assert np.isclose(f(1, 2, 3), 6)
-        assert capture == "my_func(x:int=1, y:float=2, z:float=3)"
-        with capture:
-            assert np.isclose(f(np.array(1), np.array(2), 3), 6)
-        assert capture == "my_func(x:int=1, y:float=2, z:float=3)"
-        with capture:
-            assert np.allclose(f(np.array([1, 3]), np.array([2, 4]), 3), [6, 36])
-        assert capture == """
-            my_func(x:int=1, y:float=2, z:float=3)
-            my_func(x:int=3, y:float=4, z:float=3)
-        """
-        with capture:
-            a = np.array([[1, 2], [3, 4]], order='F')
-            b = np.array([[10, 20], [30, 40]], order='F')
-            c = 3
-            result = f(a, b, c)
-            assert np.allclose(result, a * b * c)
-            assert result.flags.f_contiguous
-        # All inputs are F order and full or singletons, so we the result is in col-major order:
-        assert capture == """
-            my_func(x:int=1, y:float=10, z:float=3)
-            my_func(x:int=3, y:float=30, z:float=3)
-            my_func(x:int=2, y:float=20, z:float=3)
-            my_func(x:int=4, y:float=40, z:float=3)
-        """
-        with capture:
-            a, b, c = np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3
-            assert np.allclose(f(a, b, c), a * b * c)
-        assert capture == """
-            my_func(x:int=1, y:float=2, z:float=3)
-            my_func(x:int=3, y:float=4, z:float=3)
-            my_func(x:int=5, y:float=6, z:float=3)
-            my_func(x:int=7, y:float=8, z:float=3)
-            my_func(x:int=9, y:float=10, z:float=3)
-            my_func(x:int=11, y:float=12, z:float=3)
-        """
-        with capture:
-            a, b, c = np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2
-            assert np.allclose(f(a, b, c), a * b * c)
-        assert capture == """
-            my_func(x:int=1, y:float=2, z:float=2)
-            my_func(x:int=2, y:float=3, z:float=2)
-            my_func(x:int=3, y:float=4, z:float=2)
-            my_func(x:int=4, y:float=2, z:float=2)
-            my_func(x:int=5, y:float=3, z:float=2)
-            my_func(x:int=6, y:float=4, z:float=2)
-        """
-        with capture:
-            a, b, c = np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2
-            assert np.allclose(f(a, b, c), a * b * c)
-        assert capture == """
-            my_func(x:int=1, y:float=2, z:float=2)
-            my_func(x:int=2, y:float=2, z:float=2)
-            my_func(x:int=3, y:float=2, z:float=2)
-            my_func(x:int=4, y:float=3, z:float=2)
-            my_func(x:int=5, y:float=3, z:float=2)
-            my_func(x:int=6, y:float=3, z:float=2)
-        """
-        with capture:
-            a, b, c = np.array([[1, 2, 3], [4, 5, 6]], order='F'), np.array([[2], [3]]), 2
-            assert np.allclose(f(a, b, c), a * b * c)
-        assert capture == """
-            my_func(x:int=1, y:float=2, z:float=2)
-            my_func(x:int=2, y:float=2, z:float=2)
-            my_func(x:int=3, y:float=2, z:float=2)
-            my_func(x:int=4, y:float=3, z:float=2)
-            my_func(x:int=5, y:float=3, z:float=2)
-            my_func(x:int=6, y:float=3, z:float=2)
-        """
-        with capture:
-            a, b, c = np.array([[1, 2, 3], [4, 5, 6]])[::, ::2], np.array([[2], [3]]), 2
-            assert np.allclose(f(a, b, c), a * b * c)
-        assert capture == """
-            my_func(x:int=1, y:float=2, z:float=2)
-            my_func(x:int=3, y:float=2, z:float=2)
-            my_func(x:int=4, y:float=3, z:float=2)
-            my_func(x:int=6, y:float=3, z:float=2)
-        """
-        with capture:
-            a, b, c = np.array([[1, 2, 3], [4, 5, 6]], order='F')[::, ::2], np.array([[2], [3]]), 2
-            assert np.allclose(f(a, b, c), a * b * c)
-        assert capture == """
-            my_func(x:int=1, y:float=2, z:float=2)
-            my_func(x:int=3, y:float=2, z:float=2)
-            my_func(x:int=4, y:float=3, z:float=2)
-            my_func(x:int=6, y:float=3, z:float=2)
-        """
-
-
-def test_type_selection():
-    assert m.selective_func(np.array([1], dtype=np.int32)) == "Int branch taken."
-    assert m.selective_func(np.array([1.0], dtype=np.float32)) == "Float branch taken."
-    assert m.selective_func(np.array([1.0j], dtype=np.complex64)) == "Complex float branch taken."
-
-
-def test_docs(doc):
-    assert doc(m.vectorized_func) == """
-        vectorized_func(arg0: numpy.ndarray[numpy.int32], arg1: numpy.ndarray[numpy.float32], arg2: numpy.ndarray[numpy.float64]) -> object
-    """  # noqa: E501 line too long
-
-
-def test_trivial_broadcasting():
-    trivial, vectorized_is_trivial = m.trivial, m.vectorized_is_trivial
-
-    assert vectorized_is_trivial(1, 2, 3) == trivial.c_trivial
-    assert vectorized_is_trivial(np.array(1), np.array(2), 3) == trivial.c_trivial
-    assert vectorized_is_trivial(np.array([1, 3]), np.array([2, 4]), 3) == trivial.c_trivial
-    assert trivial.c_trivial == vectorized_is_trivial(
-        np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3)
-    assert vectorized_is_trivial(
-        np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2) == trivial.non_trivial
-    assert vectorized_is_trivial(
-        np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2) == trivial.non_trivial
-    z1 = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype='int32')
-    z2 = np.array(z1, dtype='float32')
-    z3 = np.array(z1, dtype='float64')
-    assert vectorized_is_trivial(z1, z2, z3) == trivial.c_trivial
-    assert vectorized_is_trivial(1, z2, z3) == trivial.c_trivial
-    assert vectorized_is_trivial(z1, 1, z3) == trivial.c_trivial
-    assert vectorized_is_trivial(z1, z2, 1) == trivial.c_trivial
-    assert vectorized_is_trivial(z1[::2, ::2], 1, 1) == trivial.non_trivial
-    assert vectorized_is_trivial(1, 1, z1[::2, ::2]) == trivial.c_trivial
-    assert vectorized_is_trivial(1, 1, z3[::2, ::2]) == trivial.non_trivial
-    assert vectorized_is_trivial(z1, 1, z3[1::4, 1::4]) == trivial.c_trivial
-
-    y1 = np.array(z1, order='F')
-    y2 = np.array(y1)
-    y3 = np.array(y1)
-    assert vectorized_is_trivial(y1, y2, y3) == trivial.f_trivial
-    assert vectorized_is_trivial(y1, 1, 1) == trivial.f_trivial
-    assert vectorized_is_trivial(1, y2, 1) == trivial.f_trivial
-    assert vectorized_is_trivial(1, 1, y3) == trivial.f_trivial
-    assert vectorized_is_trivial(y1, z2, 1) == trivial.non_trivial
-    assert vectorized_is_trivial(z1[1::4, 1::4], y2, 1) == trivial.f_trivial
-    assert vectorized_is_trivial(y1[1::4, 1::4], z2, 1) == trivial.c_trivial
-
-    assert m.vectorized_func(z1, z2, z3).flags.c_contiguous
-    assert m.vectorized_func(y1, y2, y3).flags.f_contiguous
-    assert m.vectorized_func(z1, 1, 1).flags.c_contiguous
-    assert m.vectorized_func(1, y2, 1).flags.f_contiguous
-    assert m.vectorized_func(z1[1::4, 1::4], y2, 1).flags.f_contiguous
-    assert m.vectorized_func(y1[1::4, 1::4], z2, 1).flags.c_contiguous
-
-
-def test_passthrough_arguments(doc):
-    assert doc(m.vec_passthrough) == (
-        "vec_passthrough(" + ", ".join([
-            "arg0: float",
-            "arg1: numpy.ndarray[numpy.float64]",
-            "arg2: numpy.ndarray[numpy.float64]",
-            "arg3: numpy.ndarray[numpy.int32]",
-            "arg4: int",
-            "arg5: m.numpy_vectorize.NonPODClass",
-            "arg6: numpy.ndarray[numpy.float64]"]) + ") -> object")
-
-    b = np.array([[10, 20, 30]], dtype='float64')
-    c = np.array([100, 200])  # NOT a vectorized argument
-    d = np.array([[1000], [2000], [3000]], dtype='int')
-    g = np.array([[1000000, 2000000, 3000000]], dtype='int')  # requires casting
-    assert np.all(
-        m.vec_passthrough(1, b, c, d, 10000, m.NonPODClass(100000), g) ==
-        np.array([[1111111, 2111121, 3111131],
-                  [1112111, 2112121, 3112131],
-                  [1113111, 2113121, 3113131]]))
-
-
-def test_method_vectorization():
-    o = m.VectorizeTestClass(3)
-    x = np.array([1, 2], dtype='int')
-    y = np.array([[10], [20]], dtype='float32')
-    assert np.all(o.method(x, y) == [[14, 15], [24, 25]])
-
-
-def test_array_collapse():
-    assert not isinstance(m.vectorized_func(1, 2, 3), np.ndarray)
-    assert not isinstance(m.vectorized_func(np.array(1), 2, 3), np.ndarray)
-    z = m.vectorized_func([1], 2, 3)
-    assert isinstance(z, np.ndarray)
-    assert z.shape == (1, )
-    z = m.vectorized_func(1, [[[2]]], 3)
-    assert isinstance(z, np.ndarray)
-    assert z.shape == (1, 1, 1)
diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/omp/detail/reduce.h b/spaces/ma-xu/LIVE/thrust/thrust/system/omp/detail/reduce.h
deleted file mode 100644
index c058e05db4c78c8601c0bd322a7edd8f9ba16d89..0000000000000000000000000000000000000000
--- a/spaces/ma-xu/LIVE/thrust/thrust/system/omp/detail/reduce.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- *  Copyright 2008-2013 NVIDIA Corporation
- *
- *  Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-
-/*! \file reduce.h
- *  \brief OpenMP implementation of reduce algorithms.
- */
-
-#pragma once
-
-#include <thrust/detail/config.h>
-#include <thrust/system/omp/detail/execution_policy.h>
-
-namespace thrust
-{
-namespace system
-{
-namespace omp
-{
-namespace detail
-{
-
-
-template<typename DerivedPolicy,
-         typename InputIterator, 
-         typename OutputType,
-         typename BinaryFunction>
-  OutputType reduce(execution_policy<DerivedPolicy> &exec,
-                    InputIterator first,
-                    InputIterator last,
-                    OutputType init,
-                    BinaryFunction binary_op);
-
-
-} // end namespace detail
-} // end namespace omp
-} // end namespace system
-} // end namespace thrust
-
-#include <thrust/system/omp/detail/reduce.inl>
-
diff --git a/spaces/maheshwaranumapathy/meta-llama-Llama-2-7b-hf/app.py b/spaces/maheshwaranumapathy/meta-llama-Llama-2-7b-hf/app.py
deleted file mode 100644
index 2a93fe531fb6e43d9b64f582671e5b110c1c20f3..0000000000000000000000000000000000000000
--- a/spaces/maheshwaranumapathy/meta-llama-Llama-2-7b-hf/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/meta-llama/Llama-2-7b-hf").launch()
\ No newline at end of file
diff --git a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Face_Enhancement/models/networks/generator.py b/spaces/manhkhanhUIT/Image_Restoration_Colorization/Face_Enhancement/models/networks/generator.py
deleted file mode 100644
index 6e24cadc882caab9ee439bb3dd288e536878565a..0000000000000000000000000000000000000000
--- a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Face_Enhancement/models/networks/generator.py
+++ /dev/null
@@ -1,233 +0,0 @@
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from models.networks.base_network import BaseNetwork
-from models.networks.normalization import get_nonspade_norm_layer
-from models.networks.architecture import ResnetBlock as ResnetBlock
-from models.networks.architecture import SPADEResnetBlock as SPADEResnetBlock
-from models.networks.architecture import SPADEResnetBlock_non_spade as SPADEResnetBlock_non_spade
-
-
-class SPADEGenerator(BaseNetwork):
-    @staticmethod
-    def modify_commandline_options(parser, is_train):
-        parser.set_defaults(norm_G="spectralspadesyncbatch3x3")
-        parser.add_argument(
-            "--num_upsampling_layers",
-            choices=("normal", "more", "most"),
-            default="normal",
-            help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator",
-        )
-
-        return parser
-
-    def __init__(self, opt):
-        super().__init__()
-        self.opt = opt
-        nf = opt.ngf
-
-        self.sw, self.sh = self.compute_latent_vector_size(opt)
-
-        print("The size of the latent vector size is [%d,%d]" % (self.sw, self.sh))
-
-        if opt.use_vae:
-            # In case of VAE, we will sample from random z vector
-            self.fc = nn.Linear(opt.z_dim, 16 * nf * self.sw * self.sh)
-        else:
-            # Otherwise, we make the network deterministic by starting with
-            # downsampled segmentation map instead of random z
-            if self.opt.no_parsing_map:
-                self.fc = nn.Conv2d(3, 16 * nf, 3, padding=1)
-            else:
-                self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
-
-        if self.opt.injection_layer == "all" or self.opt.injection_layer == "1":
-            self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
-        else:
-            self.head_0 = SPADEResnetBlock_non_spade(16 * nf, 16 * nf, opt)
-
-        if self.opt.injection_layer == "all" or self.opt.injection_layer == "2":
-            self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
-            self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
-
-        else:
-            self.G_middle_0 = SPADEResnetBlock_non_spade(16 * nf, 16 * nf, opt)
-            self.G_middle_1 = SPADEResnetBlock_non_spade(16 * nf, 16 * nf, opt)
-
-        if self.opt.injection_layer == "all" or self.opt.injection_layer == "3":
-            self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
-        else:
-            self.up_0 = SPADEResnetBlock_non_spade(16 * nf, 8 * nf, opt)
-
-        if self.opt.injection_layer == "all" or self.opt.injection_layer == "4":
-            self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
-        else:
-            self.up_1 = SPADEResnetBlock_non_spade(8 * nf, 4 * nf, opt)
-
-        if self.opt.injection_layer == "all" or self.opt.injection_layer == "5":
-            self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
-        else:
-            self.up_2 = SPADEResnetBlock_non_spade(4 * nf, 2 * nf, opt)
-
-        if self.opt.injection_layer == "all" or self.opt.injection_layer == "6":
-            self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
-        else:
-            self.up_3 = SPADEResnetBlock_non_spade(2 * nf, 1 * nf, opt)
-
-        final_nc = nf
-
-        if opt.num_upsampling_layers == "most":
-            self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
-            final_nc = nf // 2
-
-        self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
-
-        self.up = nn.Upsample(scale_factor=2)
-
-    def compute_latent_vector_size(self, opt):
-        if opt.num_upsampling_layers == "normal":
-            num_up_layers = 5
-        elif opt.num_upsampling_layers == "more":
-            num_up_layers = 6
-        elif opt.num_upsampling_layers == "most":
-            num_up_layers = 7
-        else:
-            raise ValueError("opt.num_upsampling_layers [%s] not recognized" % opt.num_upsampling_layers)
-
-        sw = opt.load_size // (2 ** num_up_layers)
-        sh = round(sw / opt.aspect_ratio)
-
-        return sw, sh
-
-    def forward(self, input, degraded_image, z=None):
-        seg = input
-
-        if self.opt.use_vae:
-            # we sample z from unit normal and reshape the tensor
-            if z is None:
-                z = torch.randn(input.size(0), self.opt.z_dim, dtype=torch.float32, device=input.get_device())
-            x = self.fc(z)
-            x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
-        else:
-            # we downsample segmap and run convolution
-            if self.opt.no_parsing_map:
-                x = F.interpolate(degraded_image, size=(self.sh, self.sw), mode="bilinear")
-            else:
-                x = F.interpolate(seg, size=(self.sh, self.sw), mode="nearest")
-            x = self.fc(x)
-
-        x = self.head_0(x, seg, degraded_image)
-
-        x = self.up(x)
-        x = self.G_middle_0(x, seg, degraded_image)
-
-        if self.opt.num_upsampling_layers == "more" or self.opt.num_upsampling_layers == "most":
-            x = self.up(x)
-
-        x = self.G_middle_1(x, seg, degraded_image)
-
-        x = self.up(x)
-        x = self.up_0(x, seg, degraded_image)
-        x = self.up(x)
-        x = self.up_1(x, seg, degraded_image)
-        x = self.up(x)
-        x = self.up_2(x, seg, degraded_image)
-        x = self.up(x)
-        x = self.up_3(x, seg, degraded_image)
-
-        if self.opt.num_upsampling_layers == "most":
-            x = self.up(x)
-            x = self.up_4(x, seg, degraded_image)
-
-        x = self.conv_img(F.leaky_relu(x, 2e-1))
-        x = F.tanh(x)
-
-        return x
-
-
-class Pix2PixHDGenerator(BaseNetwork):
-    @staticmethod
-    def modify_commandline_options(parser, is_train):
-        parser.add_argument(
-            "--resnet_n_downsample", type=int, default=4, help="number of downsampling layers in netG"
-        )
-        parser.add_argument(
-            "--resnet_n_blocks",
-            type=int,
-            default=9,
-            help="number of residual blocks in the global generator network",
-        )
-        parser.add_argument(
-            "--resnet_kernel_size", type=int, default=3, help="kernel size of the resnet block"
-        )
-        parser.add_argument(
-            "--resnet_initial_kernel_size", type=int, default=7, help="kernel size of the first convolution"
-        )
-        # parser.set_defaults(norm_G='instance')
-        return parser
-
-    def __init__(self, opt):
-        super().__init__()
-        input_nc = 3
-
-        # print("xxxxx")
-        # print(opt.norm_G)
-        norm_layer = get_nonspade_norm_layer(opt, opt.norm_G)
-        activation = nn.ReLU(False)
-
-        model = []
-
-        # initial conv
-        model += [
-            nn.ReflectionPad2d(opt.resnet_initial_kernel_size // 2),
-            norm_layer(nn.Conv2d(input_nc, opt.ngf, kernel_size=opt.resnet_initial_kernel_size, padding=0)),
-            activation,
-        ]
-
-        # downsample
-        mult = 1
-        for i in range(opt.resnet_n_downsample):
-            model += [
-                norm_layer(nn.Conv2d(opt.ngf * mult, opt.ngf * mult * 2, kernel_size=3, stride=2, padding=1)),
-                activation,
-            ]
-            mult *= 2
-
-        # resnet blocks
-        for i in range(opt.resnet_n_blocks):
-            model += [
-                ResnetBlock(
-                    opt.ngf * mult,
-                    norm_layer=norm_layer,
-                    activation=activation,
-                    kernel_size=opt.resnet_kernel_size,
-                )
-            ]
-
-        # upsample
-        for i in range(opt.resnet_n_downsample):
-            nc_in = int(opt.ngf * mult)
-            nc_out = int((opt.ngf * mult) / 2)
-            model += [
-                norm_layer(
-                    nn.ConvTranspose2d(nc_in, nc_out, kernel_size=3, stride=2, padding=1, output_padding=1)
-                ),
-                activation,
-            ]
-            mult = mult // 2
-
-        # final output conv
-        model += [
-            nn.ReflectionPad2d(3),
-            nn.Conv2d(nc_out, opt.output_nc, kernel_size=7, padding=0),
-            nn.Tanh(),
-        ]
-
-        self.model = nn.Sequential(*model)
-
-    def forward(self, input, degraded_image, z=None):
-        return self.model(degraded_image)
-
diff --git a/spaces/manjunathshiva/BibleGPT/Dockerfile b/spaces/manjunathshiva/BibleGPT/Dockerfile
deleted file mode 100644
index 6989eeca9f650ab2bd514bc0a475f67e007dd909..0000000000000000000000000000000000000000
--- a/spaces/manjunathshiva/BibleGPT/Dockerfile
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM python:3.11.4
-
-WORKDIR /usr/src/app
-
-COPY requirements.txt .
-RUN pip install --no-cache-dir -r requirements.txt
-
-COPY . .
-
-RUN mkdir /.cache
-RUN chmod 777 /.cache
-
-CMD [ "chainlit", "run","app.py","-w","--host", "0.0.0.0", "--port", "7860" ]
\ No newline at end of file
diff --git a/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/AdaptiveWingLoss/README.md b/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/AdaptiveWingLoss/README.md
deleted file mode 100644
index fbde5b8f0c42b5cb2d38cb6b0fb3a454f124a7d9..0000000000000000000000000000000000000000
--- a/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/AdaptiveWingLoss/README.md
+++ /dev/null
@@ -1,82 +0,0 @@
-# AdaptiveWingLoss
-## [arXiv](https://arxiv.org/abs/1904.07399)
-Pytorch Implementation of Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression.
-
-<img src='images/wflw.png' width="1000px">
-
-## Update Logs:
-### October 28, 2019
-* Pretrained Model and evaluation code on WFLW dataset is released.
-
-## Installation
-#### Note: Code was originally developed under Python2.X and Pytorch 0.4. This released version was revisioned from original code and was tested on Python3.5.7 and Pytorch 1.3.0.
-
-Install system requirements:
-```
-sudo apt-get install python3-dev python3-pip python3-tk libglib2.0-0
-```
-
-Install python dependencies:
-```
-pip3 install -r requirements.txt
-```
-
-## Run Evaluation on WFLW dataset
-1. Download and process WFLW dataset
-    * Download WFLW dataset and annotation from [Here](https://wywu.github.io/projects/LAB/WFLW.html).
-    * Unzip WFLW dataset and annotations and move files into ```./dataset``` directory. Your directory should look like this:
-        ```
-        AdaptiveWingLoss
-        └───dataset
-           │
-           └───WFLW_annotations
-           │   └───list_98pt_rect_attr_train_test
-           │   │
-           │   └───list_98pt_test
-           │
-           └───WFLW_images
-               └───0--Parade
-               │
-               └───...
-        ```
-    * Inside ```./dataset``` directory, run:
-        ```
-        python convert_WFLW.py
-        ```
-        A new directory ```./dataset/WFLW_test``` should be generated with 2500 processed testing images and corresponding landmarks.
-
-2. Download pretrained model from [Google Drive](https://drive.google.com/file/d/1HZaSjLoorQ4QCEx7PRTxOmg0bBPYSqhH/view?usp=sharing) and put it in ```./ckpt``` directory.
-
-3. Within ```./Scripts``` directory, run following command:
-    ```
-    sh eval_wflw.sh
-    ```
-    
-    <img src='images/wflw_table.png' width="800px">
-    *GTBbox indicates the ground truth landmarks are used as bounding box to crop faces.
-
-## Future Plans
-- [x] Release evaluation code and pretrained model on WFLW dataset.
-
-- [ ] Release training code on WFLW dataset.
- 
-- [ ] Release pretrained model and code on 300W, AFLW and COFW dataset.
-
-- [ ] Replease facial landmark detection API
-
-
-## Citation
-If you find this useful for your research, please cite the following paper.
-
-```
-@InProceedings{Wang_2019_ICCV,
-author = {Wang, Xinyao and Bo, Liefeng and Fuxin, Li},
-title = {Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression},
-booktitle = {The IEEE International Conference on Computer Vision (ICCV)},
-month = {October},
-year = {2019}
-}
-```
-
-## Acknowledgments
-This repository borrows or partially modifies hourglass model and data processing code from [face alignment](https://github.com/1adrianb/face-alignment) and [pose-hg-train](https://github.com/princeton-vl/pose-hg-train).
diff --git a/spaces/merve/anonymization/public/base-rate/script.js b/spaces/merve/anonymization/public/base-rate/script.js
deleted file mode 100644
index efc40861466afc2bb19cee8d3ef6cd5a98d80ddc..0000000000000000000000000000000000000000
--- a/spaces/merve/anonymization/public/base-rate/script.js
+++ /dev/null
@@ -1,317 +0,0 @@
-/* Copyright 2020 Google LLC. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-
-
-
-
-console.clear()
-var ttSel = d3.select('body').selectAppend('div.tooltip.tooltip-hidden')
-
-window.renderFns = []
-
-window.m = (function(){
-  var rv = {b: .7, tpr: .8, fnr: .5, update, str: 'kids', titleStr: 'Children',}
-
-  function update(obj={}){
-    Object.assign(rv, obj)
-    window.renderFns.forEach(d => d())
-  }
-
-  return rv
-})()
-
-window.f = (function(){
-  var rv = {b: .3, tpr: .8, fnr: .5, update, str: 'adults', titleStr: 'Adults'}
-
-  function update(obj={}){
-    window.renderFns.forEach(d => d())
-  }
-
-  return rv
-})()
-
-
-var wLarge = d3.clamp(0, innerWidth/2 - 30, 300)
-
-d3.select('#big-matrix').html('')
-  .appendMany('div.big-container', [{w: wLarge, s: f, isText: 1}, {w: wLarge, s: m, isText: 1}])
-  .each(drawMatrix)
-
-
-addPattern(10, `pattern-${wLarge}-`)
-addPattern(5, 'pattern-50-')
-
-function addPattern(s, str){
-  var cColors = [colors.sick, colors.sick, colors.well, colors.well, lcolors.sick, lcolors.sick, lcolors.well, lcolors.well]
-  var rColors = [lcolors.sick, lcolors.well, lcolors.sick, lcolors.well, llcolors.sick, llcolors.well, llcolors.sick, llcolors.well]
-
-  d3.select('#big-matrix')
-    .append('svg')
-    .st({height: 0, position: 'absolute'})
-    .append('defs').appendMany('pattern', d3.range(8))
-    .at({ id: i => str + i, width: s, height: s})
-    .attr('patternUnits', 'userSpaceOnUse')
-    .append('rect')
-    .at({width: s, height: s, fill: i => rColors[i]})
-    .parent().append('circle')
-    .at({r: s == 10 ? 2.5 : 1.5, cx: s/2, cy: s/2, fill: i => cColors[i]})
-}
-
-
-var scale = d3.clamp(0, ((innerWidth - 50) / 3)/280, 1)
-var isScaled = scale != 1
-
-d3.select('#metrics').html('').st({height: 350*scale + 30})
-  .appendMany('div', [0, 1, 2])
-  .st({width: 280*scale, display: 'inline-block'})
-  .append('div')
-  .st({transform: `scale(${scale})`, transformOrigin: '0% 0%'})
-  .append('div.metrics-container').st({width: 280})
-  .each(drawMetric)
-
-d3.selectAll('rect.drag')
-  .on('mouseover.style', d => d3.selectAll('rect.' + d).st({strokeWidth: 3, stroke: '#000'}))
-  .on('mouseout.style', d => d3.selectAll('rect.' + d).st({strokeWidth: 0}))
-
-function drawMetric(i){
-  var sel = d3.select(this)
-
-  var text = [
-    // 'Percentage of <span style="background: #fcf">sick people</span><br> who <span style="background: #f0f">test positive<span>',
-    'Percentage of sick people<br> who test positive',
-    'Percentage of positive tests<br> who are actually sick',
-    'Percentage of well people <br>who test negative',
-  ][i]
-
-  var percentFn = [
-    s => s.tpr,
-    s => s.b*s.tpr/(s.b*s.tpr + (1 - s.b)*(s.fnr)),
-    s => 1 - s.fnr,
-  ][i]
-
-  var colors = [
-    ['#f0f', '#fcf', '#fff', '#fff'],
-    ['#f0f', '#fff', '#fcf', '#fff'],
-    ['#fff', '#fff', '#fcf', '#f0f'],
-  ][i]
-
-  sel.append('h3').st({marginBottom: 20, fontSize: isScaled ? 30 : 20}).html(isScaled ? text.replace('<br>', '') : text)
-
-  var h = 200
-  var width = 100
-
-  var fDiv = sel.append('div').st({position: 'relative', top: -h + 7})
-    .datum({w: 50, s: f, isText: 0, colors}).each(drawMatrix)
-
-  var svg = sel.append('svg')
-    .at({width, height: h})
-    .st({fontSize: 14, fontFamily: 'monospace'})
-
-  svg.append('path').at({stroke: '#ccc', d: `M ${width/2 + .5} 0 V ${h}`})
-
-  var errorSel = svg.append('path')
-    .translate(width/2 + .5, 0)
-    .at({stroke: 'orange', strokeWidth: 3})
-
-  var fSel = svg.append('g')
-  var mSel = svg.append('g')
-
-  mSel.append('circle').at({r: 4, cx: width/2 + .5, fill: 'none', stroke: '#000'})
-  fSel.append('circle').at({r: 4, cx: width/2 + .5, fill: 'none', stroke: '#000'})
-
-  var fTextSel = fSel.append('text').text('23%')
-    .at({dy: '.33em', textAnchor: 'middle', x: width/4 - 3, fontSize: isScaled ? 20 : 16})
-  var mTextSel = mSel.append('text').text('23%')
-    .at({dy: '.33em', textAnchor: 'middle', x: width/4*3 + 5, fontSize: isScaled ? 20 : 16})
-
-  fSel.append('text').text('Adults').st({fontSize: isScaled ? 18 : 12})
-    .at({textAnchor: 'middle', x: -23, y: -30})
-  mSel.append('text').text('Children').st({fontSize: isScaled ? 18 : 12})
-    .at({textAnchor: 'middle', x: 124, y: -30})
-
-  var mDiv = sel.append('div').st({position: 'relative', top: -h + 7})
-    .datum({w: 50, s: m, isText: 0, colors}).each(drawMatrix)
-
-
-  renderFns.push(() => {
-    var fPercent = percentFn(f)
-    fSel.translate(h - h*fPercent, 1)
-    fTextSel.text(d3.format('.0%')(fPercent))
-
-    var mPercent = percentFn(m)
-    mSel.translate(h - h*mPercent, 1)
-    mTextSel.text(d3.format('.0%')(mPercent))
-
-    fDiv.translate(h - h*fPercent, 1)
-    mDiv.translate(h - h*mPercent, 1)
-
-    errorSel.at({d: 'M 0 ' + (h - h*fPercent) + ' V ' + (h - h*mPercent) })
-  })
-}
-
-function drawMatrix({s, w, isText, colors}){
-  var svg = d3.select(this).append('svg')
-    .at({width: w, height: w})
-    
-
-  svg.append('rect').at({width: w + 1, height: w + 1})
-
-  if (!colors) colors = ['#000', '#000', '#000', '#000']
-
-  var rects = [
-    {n: 'tp', x: 0, y: 0, width: _ => s.b*w, height: _ => s.tpr*w},
-    {n: 'fn', x: 0, y: _ => 1 + s.tpr*w, width: _ => s.b*w, height: _ => w - s.tpr*w},
-    {n: 'fp', x: _ => 1 + s.b*w, y: 0, width: _ => w - s.b*w, height: _ => s.fnr*w},
-    {n: 'tn', x: _ => 1 + s.b*w, y: _ => 1 + s.fnr*w, width: _ => w - s.b*w, height: _ => w - s.fnr*w},
-  ]
-  rects.forEach((d, i) => d.i = i)
-
-  var rectSel = svg.appendMany('rect', rects)
-    .at({fill: d =>  `url(#pattern-${w}-${d.i}`})
-    // .at({opacity: d => colors[d.i] == '#fff' ? .5 : 1})
-    // .at({fill: d =>  `url(#pattern-${w}-${d.i + (colors[d.i] == '#ccc' ? 4 : 0)})`})
-    // .at({fill: d =>  colors[d.i] == '#ccc' ? '#000' : `url(#pattern-${w}-${d.i + (colors[d.i] == '#ccc' ? 4 : 0)})`})
-    .each(function(d){ d.sel = d3.select(this) })
-  rectSel.filter(d => colors[d.i] == '#fff').at({fill: '#eee'})
-
-  var bh = .5
-  svg.append('rect.tpr').at({height: bh}).translate(-bh/2, 1)
-    .datum('tpr')
-
-  svg.append('rect.fnr').at({height: bh}).translate(-bh/2, 1)
-    .datum('fnr')
-
-  svg.append('rect.b').at({width: bh, height: w}).translate(-bh/2, 0)
-    .datum('b')
-
-  var bh = 20
-  svg.append('rect.drag.tpr').at({height: bh}).translate(-bh/2, 1)
-    .call(makeDrag('tpr', 1)).datum('tpr').call(d3.attachTooltip).on('mouseover', ttFormat)
-
-  svg.append('rect.drag.fnr').at({height: bh}).translate(-bh/2, 1)
-    .call(makeDrag('fnr', 1)).datum('fnr').call(d3.attachTooltip).on('mouseover', ttFormat)
-
-  svg.append('rect.drag.b').at({width: bh, height: w}).translate(-bh/2, 0)
-    .call(makeDrag('b', 0)).datum('b').call(d3.attachTooltip).on('mouseover', ttFormat)
-
-
-  var tprRect = svg.selectAll('rect.tpr')
-  var fnrRect = svg.selectAll('rect.fnr')
-  var bRect = svg.selectAll('rect.b')
-
-  function ttFormat(str){
-    var html = ''
-    if (str == 'tpr') html = `${d3.format('.0%')(s.tpr)} of sick ${s.titleStr.toLowerCase()} test positive`
-    if (str == 'fnr') html = `${d3.format('.0%')(s.fnr)} of well ${s.titleStr.toLowerCase()} test negative`
-    if (str == 'b') html = `${d3.format('.0%')(s.b)} of ${s.titleStr.toLowerCase()} are sick`
-    ttSel.html(html) 
-  }
-
-  function makeDrag(str, index){
-
-    return d3.drag()
-      .on('drag', function(){
-        var percent = d3.mouse(this)[index]/w
-        s[str] = d3.clamp(.15, percent, .85)
-
-        window.basetimer.stop()
-        s.update()
-
-        ttMove()
-        ttFormat(str)
-      })
-      .on('start', _ => svg.classed('dragging', 1))
-      .on('end', _ => svg.classed('dragging', 0))
-  }
-
-  renderFns.push(() => {
-    rectSel.each(d => d.sel.at(d))
-
-    tprRect.at({width: w*s.b, y: w*s.tpr})
-    fnrRect.at({x: w*s.b, width: w - w*s.b, y: w*s.fnr})
-    bRect.at({x: w*s.b})
-
-    // s => s.tpr,
-    // s => s.b*s.tpr/(s.b*s.tpr + (1 - s.b)*(s.fnr)),
-    // s => 1 - s.fnr,
-    if (!isText) return
-  })
-
-
-  if (!isText) return
-
-  svg.append('text').text(s.titleStr).at({textAnchor: 'middle', x: w/2, y: -8, fontSize: 20})
-
-  if (innerWidth < 800) return
-  // if (true)  
-
-  svg.appendMany('text', d3.range(4)).each(function(i){
-    var isSick = i < 2
-    var isPos = i % 2
-
-    var pad = 5
-    d3.select(this)
-      .translate([isSick ? pad : w - pad, isPos ? 13 : w - 23])
-      .at({
-        textAnchor: isSick ? 'start' : 'end',
-        fill: '#000',
-        fontSize: 12,
-        fontFamily: 'monospace',
-        pointerEvents: 'none',
-      })
-      .tspans([
-        ' test : ' + (isPos ? 'sick' : 'well'), 
-        'truth: ' +  (isSick ? 'sick' : 'well')])
-  })
-}
-
-
-if (window.basetimer) window.basetimer.stop()
-window.basetimer = d3.timer(t => {
-
-  var val = t/1000 % (Math.PI*4)
-
-  if (val < Math.PI*2){
-    m.b = (Math.sin(val + Math.PI/2))/4 + .4
-  } else if (Math.PI*3 < val && val < Math.PI*5 || true){
-    f.tpr = (Math.sin(val + Math.PI/2))/4 + .4
-  }
-  m.update()
-})
-
-
-
-
-
-m.update()
-
-
-
-function ttMove(d){
-  if (!ttSel.size()) return;
-
-  var e = d3.event.sourceEvent,
-      x = e.clientX,
-      y = e.clientY,
-      bb = ttSel.node().getBoundingClientRect(),
-      left = d3.clamp(20, (x-bb.width/2), window.innerWidth - bb.width - 20),
-      top = innerHeight > y + 20 + bb.height ? y + 20 : y - bb.height - 20;
-
-  ttSel
-    .style('left', left +'px')
-    .style('top', top + 'px');
-}
-
diff --git a/spaces/merve/anonymization/public/uncertainty-calibration/draw_calibrationcurve.js b/spaces/merve/anonymization/public/uncertainty-calibration/draw_calibrationcurve.js
deleted file mode 100644
index c7992a7c79b1a5187bc3f267350869904c836626..0000000000000000000000000000000000000000
--- a/spaces/merve/anonymization/public/uncertainty-calibration/draw_calibrationcurve.js
+++ /dev/null
@@ -1,102 +0,0 @@
-
-window.drawCalibrationCurve = function (graphSel, fig_height, fig_width){
-  var width = Math.min(fig_height, fig_width)
-  var sel = graphSel
-    .append('div').st({textAlign: 'center'})
-    .append('div').st({display: 'inline-block'})
-
-  var c = d3.conventions({
-    sel,
-    width,
-    height: width,
-    margin: {top: 40}
-  });
-
-  c.svg.parent()
-
-  //TODO(nthain) Who owns the buckets? We have at least 2 instances, reduce to 1
-  var buckets = d3.pairs(window.weatherGraph.thresholds)
-  buckets.forEach(bucket => {
-    bucket.val = d3.mean(bucket, d => d.origVal)
-  })
-
-  c.xAxis.tickValues(buckets.map(d => d.val)).tickFormat(d3.format('.2f'))
-  c.yAxis.tickValues(buckets.map(d => d.val)).tickFormat(d3.format('.2f'))
-  d3.drawAxis(c)
-  window.util.ggPlotBg(c)
-
-  window.util.addAxisLabel(c, 'Calibrated Model Score', 'Probability of Rain')
-
-  var eceSel = c.svg.append('g.ece')
-  var eceBox = eceSel.append('rect.val-box')
-    .at({width: 55, height: 20, x: c.width/2 + 72.5, y: -35, rx: 3, ry: 3})
-  var eceText = eceSel.append('text.big-text')
-    .at({y: -20, x: c.width/2-30, textAnchor: 'middle'})
-  var eceVal = eceSel.append('text.val-text')
-    .at({y: -20, x: c.width/2+100, textAnchor: 'middle'})
-
-  c.svg.append('path')
-    .at({
-      d: ['M', 0, c.height, 'L', c.width, 0].join(' '), 
-      stroke: '#555',
-      strokeDasharray: '3 3',
-    })
-
-  var bucketSel = c.svg.appendMany('g.bucket', buckets)
-
-  var circleSel = bucketSel.append('circle')
-    .at({fillOpacity: .4, fill: 'steelblue'})
-
-  var pathSel = bucketSel.append('path')
-    .at({stroke: 'steelblue', strokeWidth: 3})
-
-  var bucketText = bucketSel.append('text').text('8 / 10')
-    .at({textAnchor: 'start', dy: '.33em', fontSize: 10, fill: '#000'})
-
-
-  // function remap_score(s) {
-  //   // new_score = min_threshold_new + (old_score-min_threshold_old)(max_threshold_new-min_threshold_new)/(max_threshold_old-min_threshold_old)
-  //   //find index less than score
-  // }
-
-  function renderBuckets(){
-    var filter_rain = window.slides.slide?.filter_rain
-
-    buckets.forEach(bucket => {
-      bucket.data = weatherdata
-        .filter(d => bucket[0].val <= d.score && d.score <= bucket[1].val)
-        .filter(d => !filter_rain || !d.is_filter)
-
-      bucket.nPositive = d3.sum(bucket.data, d => d.label)
-      bucket.percent = bucket.nPositive/bucket.data.length
-
-      if (isNaN(bucket.percent)) bucket.percent = bucket[0].val
-    })
-
-    var ece = d3.sum(buckets, d => d.data.length*Math.abs(d.val - d.percent))
-    ece = ece/d3.sum(buckets, d => d.data.length)
-
-    eceText.text('Expected Calibration Error: ')
-    eceVal.text(d3.format('.3f')(ece))
-
-    var rScale = d3.scaleSqrt().domain([0, 50]).range([0, 20])
-
-    bucketSel
-      .st({opacity: d => d.data.length})
-      .filter(d => d.data.length)
-      .translate(d => [c.x(d.val), c.y(d.percent)])
-
-    circleSel
-      .at({r: d => rScale(d.data.length)})
-
-    pathSel.at({d: d => 'M 0 0 V ' + (c.y(d.val) - c.y(d.percent))})
-
-    bucketText
-      .text(d => `${d.nPositive} / ${d.data.length}`)
-      .at({x: d => rScale(d.data.length) + 2})
-  }
-
-  return {renderBuckets, c, buckets, calibrationDataFn: () => console.log('test')}
-}
-
-if (window.init) window.init()
diff --git a/spaces/merve/anonymization/source/uncertainty-calibration/draw_weathergraph.js b/spaces/merve/anonymization/source/uncertainty-calibration/draw_weathergraph.js
deleted file mode 100644
index 068615fb14b8e5d27869a0d270d8f0c5580e4fcc..0000000000000000000000000000000000000000
--- a/spaces/merve/anonymization/source/uncertainty-calibration/draw_weathergraph.js
+++ /dev/null
@@ -1,264 +0,0 @@
-window.drawWeatherGraph = function (graphSel, fig_height, fig_width){
-  
-  var threshold = .4
-
-  var thresholds = [0, .2, .4, .6, .8, 1].map((val, i) => {
-    var isLocked = val == 0 || val == 1
-    return {val, i, isLocked, origVal: val}
-  })
-
-  var c = d3.conventions({
-      sel: graphSel.html('').append('div'),
-      height: fig_height,
-      totalWidth: fig_width,
-      margin: {top: 100, bottom: 100}
-  });
-
-  var {predictionSel, weatherGroupSel} = (function(){
-    c.y.domain([0,9]).clamp(true);
-  
-    // x-Axis
-    c.xAxis.ticks(5).tickFormat(d3.format('.2f'))
-    c.yAxis.ticks(0)
-    d3.drawAxis(c)
-    c.svg.select('.x')
-      .translate(-40, 1)
-      .selectAll('line').translate(20, 1)
-
-    // x-Axis label
-    c.svg.append('text.axis-label')
-        .translate([c.width/2, -50])
-        .at({textAnchor: 'middle'})
-        .at({fill: '#000', fontSize: 14})
-        .text('Model Score');
-
-    // Weather icons
-    var weatherGroupSel = c.svg.appendMany('g.weatherdata', weatherdata)
-      .translate(d => [c.x(d.score), c.y(d.h)])
-      //.call(d3.attachTooltip)
-      // .on("mouseover", function(d) {    
-      //   ttSel.html("");
-      //   var gtSel = ttSel.append("div").html(`ground truth: <span>${d.label}</span>`);
-      //   ttSel.classed("tt-text", true); 
-      // })
-
-    weatherGroupSel.append('text.icon')
-        .text(function(d,i){return emojis[d.label];})
-        .at({fontSize: 18, textAnchor: 'middle', dy: 8})
-
-    // Add prediction circles
-    weatherGroupSel.append('circle.prediction')
-        .at({cx: 0, cy: 0, r: 14, opacity: 0, fillOpacity: 0, stroke: 'red'});
-    weatherGroupSel.append('path.prediction')
-        .at({d: d => ['M', -10, 10, 'L', 10, -10].join(' '), stroke: 'red', opacity: 0})
-
-    var predictionSel = c.svg.selectAll('.prediction');
-
-    return {predictionSel, weatherGroupSel}
-  })()
-
-  var {thresholdSel, messageSel, setThreshold} = (function(){
-    var thresholdSel = c.svg.append('g.threshold')
-
-    var thresholdGroupSel = thresholdSel.append('g')
-      .call(d3.drag().on('drag', 
-        () => renderThreshold(c.x.invert(d3.clamp(0, d3.event.x, c.width))))
-      )
-
-    var thesholdTextSel = thresholdGroupSel.append('g.axis').append('text')
-      .at({
-        textAnchor: 'middle',
-        dy: '.33em',
-        y: c.height + 30
-      })
-      .text('Threshold')
-
-    var rw = 16
-    thresholdGroupSel.append('rect')
-      .at({
-        width: rw,
-        x: -rw/2,
-        y: -10,
-        height: c.height + 30,
-        fillOpacity: .07,
-      })
-
-    var pathSel = thresholdGroupSel.append('path')
-      .at({
-        stroke: '#000',
-        strokeDasharray: '2 2',
-        fill: 'none',
-        d: `M 0 -10 V ` + (c.height + 20),
-      })
-
-    
-    var accuracyValBox = thresholdSel.append('rect.val-box')
-      .at({width: 55, height: 20, x: c.width/2 + 32.5, y: c.height + 65, rx: 3, ry: 3})
-
-    var accuracySel = thresholdSel.append('text.big-text')
-      .at({x: c.width/2 - 10, y: c.height + 80, textAnchor: 'middle'})
-
-    var accuracyValSel = thresholdSel.append('text.val-text')
-      .at({x: c.width/2 + 60, y: c.height + 80, textAnchor: 'middle'})
-
-
-    var messageSel = thresholdSel.append('text.tmessage')
-      .at({x: c.width/2, y: c.height + 120, textAnchor: 'middle'})
-  
-    function renderThreshold(t){
-      if (isNaN(t)) return // TODO debug this
-
-      thresholdGroupSel.translate(c.x(t), 0)
-
-      predictionSel.at({opacity: d => isClassifiedCorrectly(d, t) ? 0 : 1})
-
-      var acc = d3.mean(
-        weatherdata, 
-        d => isClassifiedCorrectly(d, t)
-      )
-      accuracySel.text('Accuracy: ');
-      accuracyValSel.text(d3.format('.1%')(acc))
-      messageSel.text('Try dragging the threshold to find the highest accuracy.')
-      thesholdTextSel.text('Threshold: ' + d3.format('.2f')(t))
-
-      threshold = t
-
-      function isClassifiedCorrectly(d,t) {
-        return d.score >= t ? d.label == 1 : d.label == 0;
-      };
-    }
-
-    renderThreshold(threshold)
-
-    var timer = null
-    function setThreshold(newThreshold, duration){
-      var interpolateFn = d3.interpolate(threshold, newThreshold)
-
-      if (timer) timer.stop()
-      timer = d3.timer(ms => {
-        var t = Math.min(ms/duration, 1)
-        if (t == 1) timer.stop()
-
-        renderThreshold(interpolateFn(t))
-      })
-    }
-
-    return {thresholdSel, messageSel, setThreshold}
-  })()
-
-  function drawTrueLegend(c){
-    var truthAxis = c.svg.append('g').translate([fig_width + 40, 1])
-    truthAxis.append('text.legend-title').text('Truth') // TODO: Maybe more of a label? "what actually happened?" or just remove this legend
-      .at({textAnchor: 'middle', fontWeight: 500, x: 20})
-
-    truthAxis.append('g').translate([20, 40])
-      .append('text.legend-text').text('Sunny').parent()
-        .at({fontSize: 15})
-      .append('text').text(emojis[0])
-        .at({fontSize: 25, x: -30, y: 5})
-    
-    truthAxis.append('g').translate([20, 80])
-      .append('text.legend-text').text('Rainy').parent()
-        .at({fontSize: 15})
-      .append('text').text(emojis[1])
-        .at({fontSize: 25, x: -30, y: 5})
-  }
-  drawTrueLegend(c);
-
-
-  var {thresholdsGroupSel, renderThresholds, setThresholds} = (function(){
-    var valsCache = []
-    var drag = d3.drag()
-      .on('drag', function(){
-        var val = d3.clamp(0, c.x.invert(d3.mouse(c.svg.node())[0]), 1)
-
-        // Force thresholds to stay sorted
-        valsCache[valsCache.activeIndex] = val
-        _.sortBy(valsCache).forEach((val, i) => thresholds[i].val = val)
-
-        renderThresholds()
-      })
-      .on('start', d => {
-        valsCache = thresholds.map(d => d.val)
-        valsCache.activeIndex = d.i
-      })
-
-    var thresholdsGroupSel = c.svg.append('g')
-
-    thresholdsGroupSel.append('text.axis-label')
-      .text('Calibrated Model Score')
-      .translate([c.width/2, c.height + 50])
-      .at({textAnchor: 'middle'})
-      .at({fill: '#000', fontSize: 14})
-
-    thresholdsSel = thresholdsGroupSel.appendMany('g.thresholds', thresholds)
-      .call(drag)
-      .st({pointerEvents: d => d.isLocked ? 'none' : ''})
-      
-    thresholdsSel.append('g.axis').append('text')
-      .at({
-        textAnchor: 'middle',
-        dy: '.33em',
-        y: c.height + 20
-      })
-      .text(d => d3.format('.2f')(d.origVal))
-
-    var rw = 16
-    thresholdsSel.append('rect')
-      .at({
-        width: rw,
-        x: -rw/2,
-        height: c.height + 10,
-        fillOpacity: d => d.isLocked ? 0 : .07,
-      })
-
-    var pathSel = thresholdsSel.append('path')
-      .at({
-        stroke: '#000',
-        strokeDasharray: '2 2',
-        fill: 'none',
-      })
-
-    function renderThresholds(){
-      if (thresholds.some(d => isNaN(d.val))) return
-
-      thresholdsSel
-        .translate(d => c.x(d.val) + .5, 0)
-
-      pathSel.at({
-        d: d => [
-          'M', 0, c.height + 10,
-          'L', 0, 0, 
-          'L', c.x(d.origVal - d.val), -12, 
-        ].join(' ')
-      })
-
-      if (window.calibrationCurve) calibrationCurve.renderBuckets()
-    }
-
-    renderThresholds()
-
-    var timer = null
-    function setThresholds(newThresholds, duration){
-      var interpolateFns = thresholds
-        .map((d, i) => d3.interpolate(d.val, newThresholds[i]))
-
-      if (timer) timer.stop()
-      timer = d3.timer(ms => {
-        var t = Math.min(ms/duration, 1)
-        if (t == 1) timer.stop()
-
-        thresholds.forEach((d, i) => d.val = interpolateFns[i](t))
-
-        renderThresholds()
-      })
-    }
-
-    return {thresholdsGroupSel, renderThresholds, setThresholds}
-  })()
-
-  return {c, thresholdSel, messageSel, setThreshold, predictionSel, thresholds, thresholdsGroupSel, renderThresholds, setThresholds, weatherGroupSel};    
-
-}
-
-if (window.init) window.init()
\ No newline at end of file
diff --git a/spaces/merve/dataset-worldviews/public/fill-in-the-blank/tokenizer.js b/spaces/merve/dataset-worldviews/public/fill-in-the-blank/tokenizer.js
deleted file mode 100644
index 47c8bee46f60ed69ce963ee36af39bacacd9a095..0000000000000000000000000000000000000000
--- a/spaces/merve/dataset-worldviews/public/fill-in-the-blank/tokenizer.js
+++ /dev/null
@@ -1,193 +0,0 @@
-/**
- * @license
- * Copyright 2019 Google LLC. All Rights Reserved.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * =============================================================================
- */
-
-
-// https://github.com/tensorflow/tfjs-models/blob/master/universal-sentence-encoder/src/tokenizer/trie.ts
-
-class TrieNode {
-  constructor(key) {
-    this.key = key;
-    this.parent = null;
-    this.children = {};
-    this.end = false;
-  }
-
-  getWord() {
-    const output = [];
-    let node = this;
-
-    while (node !== null) {
-      if (node.key !== null) {
-        output.unshift(node.key);
-      }
-      node = node.parent;
-    }
-
-    return [output, this.score, this.index];
-  }
-}
-
-class Trie {
-  constructor() {
-    this.root = new TrieNode(null);
-  }
-
-  insert(word, score, index) {
-    let node = this.root;
-
-    const symbols = [];
-    for (const symbol of word) {
-      symbols.push(symbol);
-    }
-
-    for (let i = 0; i < symbols.length; i++) {
-      if (!node.children[symbols[i]]) {
-        node.children[symbols[i]] = new TrieNode(symbols[i]);
-        node.children[symbols[i]].parent = node;
-      }
-
-      node = node.children[symbols[i]];
-
-      if (i === symbols.length - 1) {
-        node.end = true;
-        node.score = score;
-        node.index = index;
-      }
-    }
-  }
-
-  find(ss) {
-    let node = this.root;
-    let iter = 0;
-
-    while (iter < ss.length && node != null) {
-      node = node.children[ss[iter]];
-      iter++;
-    }
-
-    return node;
-  }
-}
-
-const bert = {
-  loadTokenizer: async () => {
-    const tokenizer = new BertTokenizer();
-    await tokenizer.load();
-
-    return tokenizer;
-  }
-};
-
-class BertTokenizer {
-  constructor() {
-    this.separator = '\u2581';
-    this.UNK_INDEX = 100;
-  }
-
-  async load() {
-    this.vocab = await this.loadVocab();
-
-    this.trie = new Trie();
-    // Actual tokens start at 999.
-    for (let i = 999; i < this.vocab.length; i++) {
-      const word = this.vocab[i];
-      this.trie.insert(word, 1, i);
-    }
-
-    this.token2Id = {}
-    this.vocab.forEach((d, i) => {
-      this.token2Id[d] = i
-    })
-
-    this.decode = a => a.map(d => this.vocab[d].replace('▁', ' ')).join('')
-    // Adds [CLS] and [SEP]
-    this.tokenizeCLS = str => [101, ...this.tokenize(str), 102]
-  }
-
-  async loadVocab() {
-    if (!window.bertProcessedVocab){
-      window.bertProcessedVocab = await (await fetch('data/processed_vocab.json')).json()
-    }
-    return window.bertProcessedVocab
-  }
-
-  processInput(text) {
-    const words = text.split(' ');
-    return words.map(word => {
-      if (word !== '[CLS]' && word !== '[SEP]') {
-        return this.separator + word.toLowerCase().normalize('NFKC');
-      }
-      return word;
-    });
-  }
-
-  tokenize(text) {
-    // Source:
-    // https://github.com/google-research/bert/blob/88a817c37f788702a363ff935fd173b6dc6ac0d6/tokenization.py#L311
-
-    let outputTokens = [];
-
-    const words = this.processInput(text);
-
-    for (let i = 0; i < words.length; i++) {
-      const chars = [];
-      for (const symbol of words[i]) {
-        chars.push(symbol);
-      }
-
-      let isUnknown = false;
-      let start = 0;
-      const subTokens = [];
-
-      const charsLength = chars.length;
-
-      while (start < charsLength) {
-        let end = charsLength;
-        let currIndex;
-
-        while (start < end) {
-          let substr = chars.slice(start, end).join('');
-
-          const match = this.trie.find(substr);
-
-          if (match != null && match.end) {
-            currIndex = match.getWord()[2];
-            break;
-          }
-
-          end = end - 1;
-        }
-
-        if (currIndex == null) {
-          isUnknown = true;
-          break;
-        }
-
-        subTokens.push(currIndex);
-        start = end;
-      }
-
-      if (isUnknown) {
-        outputTokens.push(this.UNK_INDEX);
-      } else {
-        outputTokens = outputTokens.concat(subTokens);
-      }
-    }
-
-    return outputTokens;
-  }
-}
\ No newline at end of file
diff --git a/spaces/merve/fill-in-the-blank/public/anonymization/style-graph-scroll.css b/spaces/merve/fill-in-the-blank/public/anonymization/style-graph-scroll.css
deleted file mode 100644
index 7680e8c43222b6993d2bedfe43a682236680541e..0000000000000000000000000000000000000000
--- a/spaces/merve/fill-in-the-blank/public/anonymization/style-graph-scroll.css
+++ /dev/null
@@ -1,160 +0,0 @@
-/** { border: 1px solid #f00; }*/
-
-
-#container{
-  position: relative;
-  width: auto;
-  margin-left: -25px;
-  /*margin-bottom: 100px;*/
-}
-
-#sections{
-  width: 330px;
-  pointer-events: none;
-}
-
-#sections > div{
-  background: white;
-  opacity: .2;
-  margin-bottom: 400px;
-  line-height: 1.4em;
-  transition: opacity .2s;
-  pointer-events: all;
-}
-#sections > div:last-child{
-  height: 480px;
-  margin-bottom: 0px;
-}
-#sections > div.graph-scroll-active{
-  opacity: 1;
-}
-
-#graph{
-  margin-left: 40px;
-  width: 500px;
-  position: -webkit-sticky;
-  position: sticky;
-  top: 0px;
-  float: right;
-  height: 580px;
-}
-
-.slider-outer {
-  display: block;
-  max-width: 300px;
-}
-
-@media (max-width: 925px)  {
-  #container{
-    margin-left: 0px;
-  }
-  
-  #graph{
-    width: 100%;
-    float: none;
-    max-width: 500px;
-    margin: 0px auto;
-  }
-
-  #graph > div{
-    position: relative;
-    left:12px;
-  }
-
-  #sections{
-    width: auto;
-    position: relative;
-    margin: 0px auto;
-  }
-
-  #sections > div{
-    background: rgba(255,255,255,.8);
-    padding: 10px;
-    border-top: 1px solid;
-    border-bottom: 1px solid;
-    margin-bottom: 80vh;
-    width: calc(100vw - 20px);
-    margin-left: -5px;
-  }
-
-  #sections > div > *{
-    max-width: 750px;
-  }
-
-  #sections > div:first-child{
-    opacity: 1;
-    margin-top: -260px;
-  }
-
-  #sections > div:last-child{
-    height: auto;
-  }
-
-  #sections h3{
-    margin-top: .5em;
-  }
-
-  /* Adjust buttons for mobile. */
-
-  .button-container{
-    text-align: center;
-    left:0px;
-  }
-
-  /* Adjust sliders for mobile. */
-  input[type="range" i] {
-    width: 280px;
-  }
-  .slider-label-container{
-    width: 145px;
-    /* display: inline-block; */
-  }
-
-  .slide-container-heads-prob, .slide-container-population {
-    text-align: center;
-  }
-
-  .slider-container {
-    margin-bottom: 5px;
-    text-align: center;
-    width: 300px;
-    /* display:inline-block; */
-  }
-
-  .slider-outer {
-    text-align: center;
-    display: flex;
-    max-width: 300px;
-  }
-
-  .headsProb, .population {
-    margin-left: 15px;
-  }
-
-  .slide-container-population {
-    margin-bottom: -10px;
-  }
-
-  .pointer div {
-    left: 10px;
-    top: 37px;
-  }
-
-  /* Adjust post summary test for mobile. */
-  .post-summary{
-    margin-left: 8px;
-    margin-bottom: 60px;
-    margin-top: 40px;
-  }
-  
-}
-
-#graph > div{
-  margin: 20 35px;
-}
-
-
-#end{
-  height: 15vh;
-}
-
diff --git a/spaces/merve/fill-in-the-blank/source/private-and-fair/top-bot-digits.js b/spaces/merve/fill-in-the-blank/source/private-and-fair/top-bot-digits.js
deleted file mode 100644
index bc2f85ec8cb3b5544245f159aa62ff2fbffbcbb5..0000000000000000000000000000000000000000
--- a/spaces/merve/fill-in-the-blank/source/private-and-fair/top-bot-digits.js
+++ /dev/null
@@ -1,66 +0,0 @@
-
-!(async function(){
-  await util.getFile(`cns-cache/mnist_train_raw_3.npy`)
-  var digitMetadata = await util.getFile('mnist_train.csv')
-  var {byLabel} = util.decorateDigitMetadata(digitMetadata)
-
-  var sel = d3.select('.top-bot-digits').html('')
-      .at({role: 'graphics-document', 'aria-label': `The twenty-five MNIST 3 digits most and least senstive to higher and lower privacy. The digits most sensitive to higher privacy are much more poorly drawn than the onces least sensitive to higher privacy.`})
-
-  var digitSel = sel.append('div')
-  var buttonSel = sel.append('div.digit-button-container')
-    .appendMany('div.button', d3.range(10))
-    .text(d => d)
-    .on('click', d => drawClass(byLabel[d]))
-
-  drawClass(byLabel[3])
-
-  async function drawClass(digitClass){
-    buttonSel.classed('active', d => d == digitClass.key)
-    await util.getFile(`cns-cache/mnist_train_raw_${digitClass.key}.npy`)
-
-    var nRows = 5
-    var nCols = 5
-
-    var bot = _.sortBy(digitClass, d => +d.priv_order).slice(0, nRows*nCols)
-    var top = _.sortBy(digitClass, d => -d.priv_order).slice(0, nRows*nCols)
-    
-    digitSel.html('').append('div')
-      .st({maxWidth: 640, margin: '0 auto'})
-      .appendMany('div', [bot, top])
-      .st({display: 'inline-block'})
-      .each(drawDigitBlock)
-
-
-    function drawDigitBlock(digits, isBot){
-      var s = 2
-
-      var sel = d3.select(this).append('div')
-
-      var c = d3.conventions({
-        sel,
-        width: s*29*nCols,
-        height: s*29*nRows,
-        layers: 'cs',
-        margin: {top: 30, bottom: 10, right: 10, left: 10}
-      })
-
-      var ctx = c.layers[0]
-
-      digits.forEach((d, i) => {
-        util.drawDigit(
-          ctx, 
-          +d.i, 
-          s,
-          (i % nCols)*s*29,
-          Math.floor(i/nCols)*s*29
-        )
-      })
-
-      c.svg.append('text')
-        .text(isBot ? 'Least sensitive to higher privacy' : 'Most sensitive to higher privacy')
-        .at({dy: '-.4em', textAnchor: 'middle', x: c.width/2, fontWeight: 600, fontSize: 14})
-    }
-  }
-
-})()
\ No newline at end of file
diff --git a/spaces/merve/hidden-bias/source/anonymization/annotations.js b/spaces/merve/hidden-bias/source/anonymization/annotations.js
deleted file mode 100644
index ed45db46369d1bb2a709b20bd97c29451d4284c0..0000000000000000000000000000000000000000
--- a/spaces/merve/hidden-bias/source/anonymization/annotations.js
+++ /dev/null
@@ -1,38 +0,0 @@
-var annotations = 
-
-[
-]
-
-
-
-
-function addSwoop(c){
-  var swoopy = d3.swoopyDrag()
-    .x(d => c.x(d.x))
-    .y(d => c.y(d.y))
-    .draggable(0)
-    .annotations(annotations)
-
-  var swoopySel = c.svg.append('g.annotations').call(swoopy)
-
-  c.svg.append('marker#arrow')
-      .attr('viewBox', '-10 -10 20 20')
-      .attr('markerWidth', 20)
-      .attr('markerHeight', 20)
-      .attr('orient', 'auto')
-    .append('path').at({d: 'M-6.75,-6.75 L 0,0 L -6.75,6.75'})
-
-
-  swoopySel.selectAll('path').attr('marker-end', 'url(#arrow)')
-  window.annotationSel = swoopySel.selectAll('g')
-    .st({fontSize: 12, opacity: d => d.slide == 0 ? 1 : 0})
-
-  swoopySel.selectAll('text')
-    .each(function(d){
-      d3.select(this)
-        .text('')                        //clear existing text
-        .tspans(d3.wordwrap(d.text, d.width || 20), 12) //wrap after 20 char
-    })  
-}
-
-
diff --git a/spaces/merve/measuring-fairness/public/measuring-fairness/sel.js b/spaces/merve/measuring-fairness/public/measuring-fairness/sel.js
deleted file mode 100644
index 0aefefe517d53ca634ed6e58d6cf8554cc386afa..0000000000000000000000000000000000000000
--- a/spaces/merve/measuring-fairness/public/measuring-fairness/sel.js
+++ /dev/null
@@ -1,151 +0,0 @@
-/* Copyright 2020 Google LLC. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-
-window.makeSel = function(){
-  var s = c.width/(nCols -2) -1
-
-  var personSel = c.svg.appendMany('g', students)
-  var rectSel = personSel.append('rect')
-    .at({
-      height: s,
-      width: s,
-      x: -s/2,
-      y: -s/2,
-      // fillOpacity: .2
-    })
-
-  var textSel = personSel.append('text.weepeople')
-    .text(d => d.letter)
-    .at({fontSize: d => d.isMale ? 26 : 34, dy: '.33em', textAnchor: 'middle'})
-    .st({stroke: d => d.isSick ? dcolors.sick : dcolors.well})
-
-  addSwoop(c)
-
-  var botAxis = c.svg.append('g').translate(c.width + 150, 1)
-  var truthAxis = botAxis.append('g.axis').translate([0, 0])
-
-  truthAxis.append('text').text('Truth')
-    .at({textAnchor: 'middle', fontWeight: 500, x: s*2.65})
-
-  truthAxis.append('g').translate([45, 22])
-    .append('text').text('Sick').parent()
-    .append('text.weepeople').text('k')
-      .at({fontSize: 34, x: 22, y: 5})
-      .st({fill: colors.sick})
-
-  truthAxis.append('g').translate([95, 22])
-    .append('text').text('Well').parent()
-    .append('text.weepeople').text('d')
-      .at({fontSize: 34, fill: colors.well, x: 22, y: 5})
-      .st({fill: colors.well})
-
-
-  var mlAxis = botAxis.append('g.axis').translate([220, 0])
-
-  mlAxis.append('text').text('ML Prediction')
-    .at({textAnchor: 'middle', fontWeight: 500, x: s*2.8})
-
-  mlAxis.append('g').translate([35, 22])
-    .append('text').text('Sick').parent()
-    .append('rect')
-      .at({width: s*.7, height: s*.7, fill: lcolors.sick, x: 28, y: -17})
-
-  mlAxis.append('g').translate([100, 22])
-    .append('text').text('Well').parent()
-    .append('rect')
-      .at({width: s*.7, height: s*.7, fill: lcolors.well, x: 28, y: -17})
-
-
-
-  var fpAxis = c.svg.append('g.axis')
-
-  // fpAxis.append('rect')
-  //   .translate(nCols*s - 20, 1)
-  //   .at({
-  //     fill: lcolors.well,
-  //     x: -82,
-  //     y: -12,
-  //     width: 56,
-  //     height: 28,
-  //     // stroke: '#000',
-  //   })
-
-  // fpAxis.append('text')
-  //   .translate(nCols*s - 20, 1)
-  //   .tspans(['False', 'Negatives'], 12)
-  //   .at({textAnchor: 'end', x: -s/2 - 10, fill: colors.sick})
-
-
-  // fpAxis.append('text')
-  //   .translate(nCols*s, 0)
-  //   .tspans(['False', 'Positives'], 12)
-  //   .at({textAnchor: 'start', x: s/2 + 7, fill: colors.well})
-
-
-  var sexAxis = c.svg.append('g.axis')
- 
-  sexAxis.append('text').st({fontWeight: 500, fill: ''})
-    .translate([-15, -30])
-    .text('Adults')
-
-  sexAxis.append('text').st({fontWeight: 500, fill: ''})
-    .translate([-15, -30 + students.maleOffsetPx])
-    .text('Children')
-
-
-  var brAxis = c.svg.append('g.axis')
-  var cpx = 0
-
-  brAxis.append('path')
-    .translate([-15, -20])
-    .at({
-      stroke: colors.sick,
-      fill: 'none',
-      d: ['M -3 -3 v', -cpx, 'h', students.fSickCols*students.colWidth, 'v', cpx].join('')
-    })
-
-  brAxis.append('path')
-    .translate([-15, -20  + students.maleOffsetPx])
-    .at({
-      stroke: colors.sick,
-      fill: 'none',
-      d: ['M -3 -3 v', -cpx, 'h', students.mSickCols*students.colWidth, 'v', cpx].join('')
-    })
-
-  brAxis.append('text').st({fontWeight: 500, fill: colors.sick})
-    .translate([-15, -30])
-    .text('Sick Adults')
-
-  brAxis.append('text').st({fontWeight: 500, fill: colors.sick})
-    .translate([-15, -30 + students.maleOffsetPx])
-    .text('Sick Children')
-
-
-
-
-  return {personSel, textSel, rectSel, fpAxis, sexAxis, brAxis, truthAxis, mlAxis, botAxis}
-}
-
-
-
-
-
-
-
-
-
-
-if (window.init) window.init()
diff --git a/spaces/merve/measuring-fairness/source/measuring-diversity/style.css b/spaces/merve/measuring-fairness/source/measuring-diversity/style.css
deleted file mode 100644
index 38a1149b1a986d176009fce1d0d2861091ef2c1e..0000000000000000000000000000000000000000
--- a/spaces/merve/measuring-fairness/source/measuring-diversity/style.css
+++ /dev/null
@@ -1,229 +0,0 @@
-html{
-  min-width: 800px;
-  overflow-x: auto;
-}
-
-p{
-  max-width: 750px;
-  margin-left: 0px auto;
-  margin-right: 0px auto;
-  margin: 0px auto;
-  margin-top: 1em;
-  margin-bottom: 1em;
-}
-
-.white{
-  stroke: #fff;
-  fill: none;
-  stroke-width: 1;
-}
-
-.player{
-	cursor: pointer;
-  stroke: #000;
-  stroke-width: 2;
-}
-
-.button{
-  border: .5px solid #000;
-  /*border-bottom-width: 4px;*/
-  /*border-right-width: 4px;*/
-  border-radius: 8px;
-  padding: 4px;
-  margin: 2px;
-  cursor: pointer;
-  display: inline-block;
-  /*font-family: monospace;*/
-  /*font-family: 'Roboto Slab', serif;*/
-  /*font-size: 16px;*/
-  user-select: none;
-  font-family: 'Google Sans', sans-serif;
-  font-family: 'Roboto', Helvetica, sans-serif;
-
-  /*font-weight: 300;*/
-
-}
-.button:hover{
-  background: #eee !important;
-}
-
-.button:active{
-}
-
-
-svg{
-  overflow: visible;
-}
-
-.axis text{
-  fill: #999;
-  font-family: 'Google Sans', sans-serif;
-  font-family: 'Roboto', Helvetica, sans-serif;
-}
-.axis text.chart-title{
-  fill: #000;
-  font-size: 16px;
-}
-
-.field{
-  font-family: 'Roboto', Helvetica, sans-serif;
-}
-
-.chart-title span{
-  padding: 4px;
-}
-
-
-.shapes{
-  line-height: 0px;
-  margin-bottom: 80px;
-  margin-top: 20px;
-}
-
-.shape{
-  display: inline-block;
-  outline: 1px solid #bbb;
-  margin: 5px;
-  cursor: pointer;
-}
-.shape:hover{
-  outline: 1px solid #000;
-  background: #eee !important;
-}
-.measure:hover{
-  outline: 1px solid #ccc;
-  background: #eee !important;
-  outline: 1px solid #000 !important;
-}
-.measure.active{
-}
-
-.shape{
-  opacity: .3;
-}
-
-.shapes{
-  user-select: none;
-}
-
-
-.shape.active{
-  opacity: 1;
-  outline: 1px solid #bf0bbf;
-  background: rgba(255,0,255,.03);
-}
-.shape.active:hover{
-  background: rgba(255,0,255,.1) !important;
-}
-#all-shapes .shape.active{
-  outline: 1px solid #bbb;
-  background: #fff;
-}
-
-
-.top, .bot{
-  line-height: 1.8em;
-}
-
-.measure{
-  cursor: pointer;
-  outline: 1px solid #ccc;
-  margin: 10px;
-}
-
-.measure-container{
-  display:inline-block;
-  width: 300px;
-  margin-top: 15px;
-}
-
-.measure-description{
-  font-size: 14px;
-  max-width: 120px;
-  line-height: 16px;
-  display: inline-block;
-}
-
-.emphasized{
-  font-weight: 400;
-}
-
-.set.no-stroke{
-  opacity: 0;
-}
-.set{
-  stroke: #000;
-  opacity: .3;
-}
-.set.selected{
-  stroke: #fcb2f7;
-  stroke: #bf0bbf;
-  stroke-width: 1;
-  opacity: 1;
-}
-.row.selected text{
-  opacity: 1 !important;
-  fill: #bf0bbf;
-  font-weight: 500;
-}
-
-text.selected{
-  opacity: 1 !important;
-  fill: #bf0bbf;
-  font-weight: 500;
-
-}
-
-
-
-text{
-  /*pointer-events: none;*/
-  text-shadow: 0 1px 0 #fff, 1px 0 0 #fff, 0 -1px 0 #fff, -1px 0 0 #fff;
-}
-
-#coat-v-gender, #pick-green, #pick-triangle, #pick-metric, #all-shapes{
-  width: 850px;
-}
-#coat-v-gender > div > div{
-  background-size: cover;
-  background-position: center;
-}
-
-.note, ul{
-  opacity: .5;
-  max-width: 750px;
-  max-width: 750px;
-  margin-left: 0px auto;
-  margin-right: 0px auto;
-  margin: 0px auto;
-  margin-top: 1em;
-  margin-bottom: 1em;
-
-}
-
-#columns-height {
-  margin-bottom: 70px;
-}
-
-.post-summary{
-
-  margin-bottom: auto;
-}
-
-
-#all-shapes{
-  pointer-events: none;
-}
-
-#all-shapes .shape{
-  outline: 0px !important;
-}
-
-.post-summary{
-  display: none;
-}
-
-#pick-metric .top text, #coat-v-gender .top text {
-  font-weight: 300 !important;
-}
-
diff --git a/spaces/mfrashad/ClothingGAN/app.py b/spaces/mfrashad/ClothingGAN/app.py
deleted file mode 100644
index c531e7722fb86e1ef3d338cd3991eba12af3ec21..0000000000000000000000000000000000000000
--- a/spaces/mfrashad/ClothingGAN/app.py
+++ /dev/null
@@ -1,283 +0,0 @@
-import nltk; nltk.download('wordnet')
-
-#@title Load Model
-selected_model = 'lookbook'
-
-# Load model
-import torch
-import PIL
-import numpy as np
-from PIL import Image
-import imageio
-from models import get_instrumented_model
-from decomposition import get_or_compute
-from config import Config
-from skimage import img_as_ubyte
-import gradio as gr
-import numpy as np
-from ipywidgets import fixed
-
-# Speed up computation
-torch.autograd.set_grad_enabled(False)
-torch.backends.cudnn.benchmark = True
-
-# Specify model to use
-config = Config(
-  model='StyleGAN2',
-  layer='style',
-  output_class=selected_model,
-  components=80,
-  use_w=True,
-  batch_size=5_000, # style layer quite small
-)
-device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
-
-inst = get_instrumented_model(config.model, config.output_class,
-                              config.layer, torch.device(device), use_w=config.use_w)
-
-path_to_components = get_or_compute(config, inst)
-
-model = inst.model
-
-comps = np.load(path_to_components)
-lst = comps.files
-latent_dirs = []
-latent_stdevs = []
-
-load_activations = False
-
-for item in lst:
-    if load_activations:
-      if item == 'act_comp':
-        for i in range(comps[item].shape[0]):
-          latent_dirs.append(comps[item][i])
-      if item == 'act_stdev':
-        for i in range(comps[item].shape[0]):
-          latent_stdevs.append(comps[item][i])
-    else:
-      if item == 'lat_comp':
-        for i in range(comps[item].shape[0]):
-          latent_dirs.append(comps[item][i])
-      if item == 'lat_stdev':
-        for i in range(comps[item].shape[0]):
-          latent_stdevs.append(comps[item][i])
-
-
-#@title Define functions
-
-
-# Taken from https://github.com/alexanderkuk/log-progress
-def log_progress(sequence, every=1, size=None, name='Items'):
-    from ipywidgets import IntProgress, HTML, VBox
-    from IPython.display import display
-
-    is_iterator = False
-    if size is None:
-        try:
-            size = len(sequence)
-        except TypeError:
-            is_iterator = True
-    if size is not None:
-        if every is None:
-            if size <= 200:
-                every = 1
-            else:
-                every = int(size / 200)     # every 0.5%
-    else:
-        assert every is not None, 'sequence is iterator, set every'
-
-    if is_iterator:
-        progress = IntProgress(min=0, max=1, value=1)
-        progress.bar_style = 'info'
-    else:
-        progress = IntProgress(min=0, max=size, value=0)
-    label = HTML()
-    box = VBox(children=[label, progress])
-    display(box)
-
-    index = 0
-    try:
-        for index, record in enumerate(sequence, 1):
-            if index == 1 or index % every == 0:
-                if is_iterator:
-                    label.value = '{name}: {index} / ?'.format(
-                        name=name,
-                        index=index
-                    )
-                else:
-                    progress.value = index
-                    label.value = u'{name}: {index} / {size}'.format(
-                        name=name,
-                        index=index,
-                        size=size
-                    )
-            yield record
-    except:
-        progress.bar_style = 'danger'
-        raise
-    else:
-        progress.bar_style = 'success'
-        progress.value = index
-        label.value = "{name}: {index}".format(
-            name=name,
-            index=str(index or '?')
-        )
-
-def name_direction(sender):
-  if not text.value:
-    print('Please name the direction before saving')
-    return
-    
-  if num in named_directions.values():
-    target_key = list(named_directions.keys())[list(named_directions.values()).index(num)]
-    print(f'Direction already named: {target_key}')
-    print(f'Overwriting... ')
-    del(named_directions[target_key])
-  named_directions[text.value] = [num, start_layer.value, end_layer.value]
-  save_direction(random_dir, text.value)
-  for item in named_directions:
-    print(item, named_directions[item])
-
-def save_direction(direction, filename):
-  filename += ".npy"
-  np.save(filename, direction, allow_pickle=True, fix_imports=True)
-  print(f'Latent direction saved as {filename}')
-
-def mix_w(w1, w2, content, style):
-    for i in range(0,5):
-        w2[i] = w1[i] * (1 - content) + w2[i] * content
-
-    for i in range(5, 16):
-        w2[i] = w1[i] * (1 - style) + w2[i] * style
-    
-    return w2
-
-def display_sample_pytorch(seed, truncation, directions, distances, scale, start, end, w=None, disp=True, save=None, noise_spec=None):
-    # blockPrint()
-    model.truncation = truncation
-    if w is None:
-        w = model.sample_latent(1, seed=seed).detach().cpu().numpy()
-        w = [w]*model.get_max_latents() # one per layer
-    else:
-        w = [np.expand_dims(x, 0) for x in w]
-    
-    for l in range(start, end):
-      for i in range(len(directions)):
-        w[l] = w[l] + directions[i] * distances[i] * scale
-    
-    torch.cuda.empty_cache()
-    #save image and display
-    out = model.sample_np(w)
-    final_im = Image.fromarray((out * 255).astype(np.uint8)).resize((500,500),Image.LANCZOS)
-    
-    
-    if save is not None:
-      if disp == False:
-        print(save)
-      final_im.save(f'out/{seed}_{save:05}.png')
-    if disp:
-      display(final_im)
-    
-    return final_im
-
-def generate_mov(seed, truncation, direction_vec, scale, layers, n_frames, out_name = 'out', noise_spec = None, loop=True):
-  """Generates a mov moving back and forth along the chosen direction vector"""
-  # Example of reading a generated set of images, and storing as MP4.
-  movieName = f'{out_name}.mp4'
-  offset = -10
-  step = 20 / n_frames
-  imgs = []
-  for i in log_progress(range(n_frames), name = "Generating frames"):
-    print(f'\r{i} / {n_frames}', end='')
-    w = model.sample_latent(1, seed=seed).cpu().numpy()
-
-    model.truncation = truncation
-    w = [w]*model.get_max_latents() # one per layer
-    for l in layers:
-      if l <= model.get_max_latents():
-          w[l] = w[l] + direction_vec * offset * scale
-
-    #save image and display
-    out = model.sample_np(w)
-    final_im = Image.fromarray((out * 255).astype(np.uint8))
-    imgs.append(out)
-    #increase offset
-    offset += step
-  if loop:
-    imgs += imgs[::-1]
-  with imageio.get_writer(movieName, mode='I') as writer:
-    for image in log_progress(list(imgs), name = "Creating animation"):
-        writer.append_data(img_as_ubyte(image))
-
-
-#@title Demo UI
-
-
-def generate_image(seed1, seed2, content, style, truncation, c0, c1, c2, c3, c4, c5, c6, start_layer, end_layer):
-    seed1 = int(seed1)
-    seed2 = int(seed2)
-
-    scale = 1
-    params = {'c0': c0,
-          'c1': c1,
-          'c2': c2,
-          'c3': c3,
-          'c4': c4,
-          'c5': c5,
-          'c6': c6}
-
-    param_indexes = {'c0': 0,
-              'c1': 1,
-              'c2': 2,
-              'c3': 3,
-              'c4': 4,
-              'c5': 5,
-              'c6': 6}
-
-    directions = []
-    distances = []
-    for k, v in params.items():
-        directions.append(latent_dirs[param_indexes[k]])
-        distances.append(v)
-
-    w1 = model.sample_latent(1, seed=seed1).detach().cpu().numpy()
-    w1 = [w1]*model.get_max_latents() # one per layer
-    im1 = model.sample_np(w1)
-
-    w2 = model.sample_latent(1, seed=seed2).detach().cpu().numpy()
-    w2 = [w2]*model.get_max_latents() # one per layer
-    im2 = model.sample_np(w2)
-    combined_im = np.concatenate([im1, im2], axis=1)
-    input_im = Image.fromarray((combined_im * 255).astype(np.uint8))
-    
-
-    mixed_w = mix_w(w1, w2, content, style)
-    return input_im, display_sample_pytorch(seed1, truncation, directions, distances, scale, int(start_layer), int(end_layer), w=mixed_w, disp=False)
-
-truncation = gr.inputs.Slider(minimum=0, maximum=1, default=0.5, label="Truncation")
-start_layer = gr.inputs.Number(default=3, label="Start Layer")
-end_layer = gr.inputs.Number(default=14, label="End Layer")
-seed1 = gr.inputs.Number(default=0, label="Seed 1")
-seed2 = gr.inputs.Number(default=0, label="Seed 2")
-content = gr.inputs.Slider(label="Structure", minimum=0, maximum=1, default=0.5)
-style = gr.inputs.Slider(label="Style", minimum=0, maximum=1, default=0.5)
-
-slider_max_val = 20
-slider_min_val = -20
-slider_step = 1
-
-c0 = gr.inputs.Slider(label="Sleeve & Size", minimum=slider_min_val, maximum=slider_max_val, default=0)
-c1 = gr.inputs.Slider(label="Dress - Jacket", minimum=slider_min_val, maximum=slider_max_val, default=0)
-c2 = gr.inputs.Slider(label="Female Coat", minimum=slider_min_val, maximum=slider_max_val, default=0)
-c3 = gr.inputs.Slider(label="Coat", minimum=slider_min_val, maximum=slider_max_val, default=0)
-c4 = gr.inputs.Slider(label="Graphics", minimum=slider_min_val, maximum=slider_max_val, default=0)
-c5 = gr.inputs.Slider(label="Dark", minimum=slider_min_val, maximum=slider_max_val, default=0)
-c6 = gr.inputs.Slider(label="Less Cleavage", minimum=slider_min_val, maximum=slider_max_val, default=0)
-
-
-scale = 1
-
-inputs = [seed1, seed2, content, style, truncation, c0, c1, c2, c3, c4, c5, c6, start_layer, end_layer]
-description = "Change the seed number to generate different parent design. Made by <a href='https://www.mfrashad.com/' target='_blank'>@mfrashad</a>. For more details on how to build this, read the <a href='https://towardsdatascience.com/how-to-build-an-ai-fashion-designer-575b5e67915e' target='_blank'>article</a> or <a href='https://github.com/mfrashad/ClothingGAN' target='_blank'>repo</a>. Please give a clap/star if you find it useful :)"
-
-gr.Interface(generate_image, inputs, ["image", "image"], description=description, live=True, title="ClothingGAN").launch()
\ No newline at end of file
diff --git a/spaces/miaomiaoren/vits-uma-genshin-honkai/text/cleaners.py b/spaces/miaomiaoren/vits-uma-genshin-honkai/text/cleaners.py
deleted file mode 100644
index d26581deb399609163518054718ad80ecca5d934..0000000000000000000000000000000000000000
--- a/spaces/miaomiaoren/vits-uma-genshin-honkai/text/cleaners.py
+++ /dev/null
@@ -1,475 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-
-'''
-Cleaners are transformations that run over the input text at both training and eval time.
-
-Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
-hyperparameter. Some cleaners are English-specific. You'll typically want to use:
-  1. "english_cleaners" for English text
-  2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
-     the Unidecode library (https://pypi.python.org/pypi/Unidecode)
-  3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
-     the symbols in symbols.py to match your data).
-'''
-
-import re
-from unidecode import unidecode
-import pyopenjtalk
-from jamo import h2j, j2hcj
-from pypinyin import lazy_pinyin, BOPOMOFO
-import jieba, cn2an
-
-
-# This is a list of Korean classifiers preceded by pure Korean numerals.
-_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
-
-# Regular expression matching whitespace:
-_whitespace_re = re.compile(r'\s+')
-
-# Regular expression matching Japanese without punctuation marks:
-_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# Regular expression matching non-Japanese characters or punctuation marks:
-_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# List of (regular expression, replacement) pairs for abbreviations:
-_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
-  ('mrs', 'misess'),
-  ('mr', 'mister'),
-  ('dr', 'doctor'),
-  ('st', 'saint'),
-  ('co', 'company'),
-  ('jr', 'junior'),
-  ('maj', 'major'),
-  ('gen', 'general'),
-  ('drs', 'doctors'),
-  ('rev', 'reverend'),
-  ('lt', 'lieutenant'),
-  ('hon', 'honorable'),
-  ('sgt', 'sergeant'),
-  ('capt', 'captain'),
-  ('esq', 'esquire'),
-  ('ltd', 'limited'),
-  ('col', 'colonel'),
-  ('ft', 'fort'),
-]]
-
-# List of (hangul, hangul divided) pairs:
-_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
-  ('ㄳ', 'ㄱㅅ'),
-  ('ㄵ', 'ㄴㅈ'),
-  ('ㄶ', 'ㄴㅎ'),
-  ('ㄺ', 'ㄹㄱ'),
-  ('ㄻ', 'ㄹㅁ'),
-  ('ㄼ', 'ㄹㅂ'),
-  ('ㄽ', 'ㄹㅅ'),
-  ('ㄾ', 'ㄹㅌ'),
-  ('ㄿ', 'ㄹㅍ'),
-  ('ㅀ', 'ㄹㅎ'),
-  ('ㅄ', 'ㅂㅅ'),
-  ('ㅘ', 'ㅗㅏ'),
-  ('ㅙ', 'ㅗㅐ'),
-  ('ㅚ', 'ㅗㅣ'),
-  ('ㅝ', 'ㅜㅓ'),
-  ('ㅞ', 'ㅜㅔ'),
-  ('ㅟ', 'ㅜㅣ'),
-  ('ㅢ', 'ㅡㅣ'),
-  ('ㅑ', 'ㅣㅏ'),
-  ('ㅒ', 'ㅣㅐ'),
-  ('ㅕ', 'ㅣㅓ'),
-  ('ㅖ', 'ㅣㅔ'),
-  ('ㅛ', 'ㅣㅗ'),
-  ('ㅠ', 'ㅣㅜ')
-]]
-
-# List of (Latin alphabet, hangul) pairs:
-_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
-  ('a', '에이'),
-  ('b', '비'),
-  ('c', '시'),
-  ('d', '디'),
-  ('e', '이'),
-  ('f', '에프'),
-  ('g', '지'),
-  ('h', '에이치'),
-  ('i', '아이'),
-  ('j', '제이'),
-  ('k', '케이'),
-  ('l', '엘'),
-  ('m', '엠'),
-  ('n', '엔'),
-  ('o', '오'),
-  ('p', '피'),
-  ('q', '큐'),
-  ('r', '아르'),
-  ('s', '에스'),
-  ('t', '티'),
-  ('u', '유'),
-  ('v', '브이'),
-  ('w', '더블유'),
-  ('x', '엑스'),
-  ('y', '와이'),
-  ('z', '제트')
-]]
-
-# List of (Latin alphabet, bopomofo) pairs:
-_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
-  ('a', 'ㄟˉ'),
-  ('b', 'ㄅㄧˋ'),
-  ('c', 'ㄙㄧˉ'),
-  ('d', 'ㄉㄧˋ'),
-  ('e', 'ㄧˋ'),
-  ('f', 'ㄝˊㄈㄨˋ'),
-  ('g', 'ㄐㄧˋ'),
-  ('h', 'ㄝˇㄑㄩˋ'),
-  ('i', 'ㄞˋ'),
-  ('j', 'ㄐㄟˋ'),
-  ('k', 'ㄎㄟˋ'),
-  ('l', 'ㄝˊㄛˋ'),
-  ('m', 'ㄝˊㄇㄨˋ'),
-  ('n', 'ㄣˉ'),
-  ('o', 'ㄡˉ'),
-  ('p', 'ㄆㄧˉ'),
-  ('q', 'ㄎㄧㄡˉ'),
-  ('r', 'ㄚˋ'),
-  ('s', 'ㄝˊㄙˋ'),
-  ('t', 'ㄊㄧˋ'),
-  ('u', 'ㄧㄡˉ'),
-  ('v', 'ㄨㄧˉ'),
-  ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
-  ('x', 'ㄝˉㄎㄨˋㄙˋ'),
-  ('y', 'ㄨㄞˋ'),
-  ('z', 'ㄗㄟˋ')
-]]
-
-
-# List of (bopomofo, romaji) pairs:
-_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
-  ('ㄅㄛ', 'p⁼wo'),
-  ('ㄆㄛ', 'pʰwo'),
-  ('ㄇㄛ', 'mwo'),
-  ('ㄈㄛ', 'fwo'),
-  ('ㄅ', 'p⁼'),
-  ('ㄆ', 'pʰ'),
-  ('ㄇ', 'm'),
-  ('ㄈ', 'f'),
-  ('ㄉ', 't⁼'),
-  ('ㄊ', 'tʰ'),
-  ('ㄋ', 'n'),
-  ('ㄌ', 'l'),
-  ('ㄍ', 'k⁼'),
-  ('ㄎ', 'kʰ'),
-  ('ㄏ', 'h'),
-  ('ㄐ', 'ʧ⁼'),
-  ('ㄑ', 'ʧʰ'),
-  ('ㄒ', 'ʃ'),
-  ('ㄓ', 'ʦ`⁼'),
-  ('ㄔ', 'ʦ`ʰ'),
-  ('ㄕ', 's`'),
-  ('ㄖ', 'ɹ`'),
-  ('ㄗ', 'ʦ⁼'),
-  ('ㄘ', 'ʦʰ'),
-  ('ㄙ', 's'),
-  ('ㄚ', 'a'),
-  ('ㄛ', 'o'),
-  ('ㄜ', 'ə'),
-  ('ㄝ', 'e'),
-  ('ㄞ', 'ai'),
-  ('ㄟ', 'ei'),
-  ('ㄠ', 'au'),
-  ('ㄡ', 'ou'),
-  ('ㄧㄢ', 'yeNN'),
-  ('ㄢ', 'aNN'),
-  ('ㄧㄣ', 'iNN'),
-  ('ㄣ', 'əNN'),
-  ('ㄤ', 'aNg'),
-  ('ㄧㄥ', 'iNg'),
-  ('ㄨㄥ', 'uNg'),
-  ('ㄩㄥ', 'yuNg'),
-  ('ㄥ', 'əNg'),
-  ('ㄦ', 'əɻ'),
-  ('ㄧ', 'i'),
-  ('ㄨ', 'u'),
-  ('ㄩ', 'ɥ'),
-  ('ˉ', '→'),
-  ('ˊ', '↑'),
-  ('ˇ', '↓↑'),
-  ('ˋ', '↓'),
-  ('˙', ''),
-  (',', ','),
-  ('。', '.'),
-  ('!', '!'),
-  ('?', '?'),
-  ('—', '-')
-]]
-
-
-def expand_abbreviations(text):
-  for regex, replacement in _abbreviations:
-    text = re.sub(regex, replacement, text)
-  return text
-
-
-def lowercase(text):
-  return text.lower()
-
-
-def collapse_whitespace(text):
-  return re.sub(_whitespace_re, ' ', text)
-
-
-def convert_to_ascii(text):
-  return unidecode(text)
-
-
-def japanese_to_romaji_with_accent(text):
-  '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
-  sentences = re.split(_japanese_marks, text)
-  marks = re.findall(_japanese_marks, text)
-  text = ''
-  for i, sentence in enumerate(sentences):
-    if re.match(_japanese_characters, sentence):
-      if text!='':
-        text+=' '
-      labels = pyopenjtalk.extract_fullcontext(sentence)
-      for n, label in enumerate(labels):
-        phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
-        if phoneme not in ['sil','pau']:
-          text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
-        else:
-          continue
-        n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
-        a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
-        a2 = int(re.search(r"\+(\d+)\+", label).group(1))
-        a3 = int(re.search(r"\+(\d+)/", label).group(1))
-        if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
-          a2_next=-1
-        else:
-          a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
-        # Accent phrase boundary
-        if a3 == 1 and a2_next == 1:
-          text += ' '
-        # Falling
-        elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
-          text += '↓'
-        # Rising
-        elif a2 == 1 and a2_next == 2:
-          text += '↑'
-    if i<len(marks):
-      text += unidecode(marks[i]).replace(' ','')
-  return text
-
-
-def latin_to_hangul(text):
-  for regex, replacement in _latin_to_hangul:
-    text = re.sub(regex, replacement, text)
-  return text
-
-
-def divide_hangul(text):
-  for regex, replacement in _hangul_divided:
-    text = re.sub(regex, replacement, text)
-  return text
-
-
-def hangul_number(num, sino=True):
-  '''Reference https://github.com/Kyubyong/g2pK'''
-  num = re.sub(',', '', num)
-
-  if num == '0':
-      return '영'
-  if not sino and num == '20':
-      return '스무'
-
-  digits = '123456789'
-  names = '일이삼사오육칠팔구'
-  digit2name = {d: n for d, n in zip(digits, names)}
-  
-  modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
-  decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
-  digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
-  digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
-
-  spelledout = []
-  for i, digit in enumerate(num):
-    i = len(num) - i - 1
-    if sino:
-      if i == 0:
-        name = digit2name.get(digit, '')
-      elif i == 1:
-        name = digit2name.get(digit, '') + '십'
-        name = name.replace('일십', '십')
-    else:
-      if i == 0:
-        name = digit2mod.get(digit, '')
-      elif i == 1:
-        name = digit2dec.get(digit, '')
-    if digit == '0':
-      if i % 4 == 0:
-        last_three = spelledout[-min(3, len(spelledout)):]
-        if ''.join(last_three) == '':
-          spelledout.append('')
-          continue
-      else:
-        spelledout.append('')
-        continue
-    if i == 2:
-      name = digit2name.get(digit, '') + '백'
-      name = name.replace('일백', '백')
-    elif i == 3:
-      name = digit2name.get(digit, '') + '천'
-      name = name.replace('일천', '천')
-    elif i == 4:
-      name = digit2name.get(digit, '') + '만'
-      name = name.replace('일만', '만')
-    elif i == 5:
-      name = digit2name.get(digit, '') + '십'
-      name = name.replace('일십', '십')
-    elif i == 6:
-      name = digit2name.get(digit, '') + '백'
-      name = name.replace('일백', '백')
-    elif i == 7:
-      name = digit2name.get(digit, '') + '천'
-      name = name.replace('일천', '천')
-    elif i == 8:
-      name = digit2name.get(digit, '') + '억'
-    elif i == 9:
-      name = digit2name.get(digit, '') + '십'
-    elif i == 10:
-      name = digit2name.get(digit, '') + '백'
-    elif i == 11:
-      name = digit2name.get(digit, '') + '천'
-    elif i == 12:
-      name = digit2name.get(digit, '') + '조'
-    elif i == 13:
-      name = digit2name.get(digit, '') + '십'
-    elif i == 14:
-      name = digit2name.get(digit, '') + '백'
-    elif i == 15:
-      name = digit2name.get(digit, '') + '천'
-    spelledout.append(name)
-  return ''.join(elem for elem in spelledout)
-
-
-def number_to_hangul(text):
-  '''Reference https://github.com/Kyubyong/g2pK'''
-  tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
-  for token in tokens:
-    num, classifier = token
-    if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
-      spelledout = hangul_number(num, sino=False)
-    else:
-      spelledout = hangul_number(num, sino=True)
-    text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
-  # digit by digit for remaining digits
-  digits = '0123456789'
-  names = '영일이삼사오육칠팔구'
-  for d, n in zip(digits, names):
-    text = text.replace(d, n)
-  return text
-
-
-def number_to_chinese(text):
-  numbers = re.findall(r'\d+(?:\.?\d+)?', text)
-  for number in numbers:
-    text = text.replace(number, cn2an.an2cn(number),1)
-  return text
-
-
-def chinese_to_bopomofo(text):
-  text=text.replace('、',',').replace(';',',').replace(':',',')
-  words=jieba.lcut(text,cut_all=False)
-  text=''
-  for word in words:
-    bopomofos=lazy_pinyin(word,BOPOMOFO)
-    if not re.search('[\u4e00-\u9fff]',word):
-      text+=word
-      continue
-    for i in range(len(bopomofos)):
-      if re.match('[\u3105-\u3129]',bopomofos[i][-1]):
-        bopomofos[i]+='ˉ'
-    if text!='':
-      text+=' '
-    text+=''.join(bopomofos)
-  return text
-
-
-def latin_to_bopomofo(text):
-  for regex, replacement in _latin_to_bopomofo:
-    text = re.sub(regex, replacement, text)
-  return text
-
-
-def bopomofo_to_romaji(text):
-  for regex, replacement in _bopomofo_to_romaji:
-    text = re.sub(regex, replacement, text)
-  return text
-
-
-def basic_cleaners(text):
-  '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
-  text = lowercase(text)
-  text = collapse_whitespace(text)
-  return text
-
-
-def transliteration_cleaners(text):
-  '''Pipeline for non-English text that transliterates to ASCII.'''
-  text = convert_to_ascii(text)
-  text = lowercase(text)
-  text = collapse_whitespace(text)
-  return text
-
-
-def japanese_cleaners(text):
-  text=japanese_to_romaji_with_accent(text)
-  if re.match('[A-Za-z]',text[-1]):
-    text += '.'
-  return text
-
-
-def japanese_cleaners2(text):
-  return japanese_cleaners(text).replace('ts','ʦ').replace('...','…')
-
-
-def korean_cleaners(text):
-  '''Pipeline for Korean text'''
-  text = latin_to_hangul(text)
-  text = number_to_hangul(text)
-  text = j2hcj(h2j(text))
-  text = divide_hangul(text)
-  if re.match('[\u3131-\u3163]',text[-1]):
-    text += '.'
-  return text
-
-
-def chinese_cleaners(text):
-  '''Pipeline for Chinese text'''
-  text=number_to_chinese(text)
-  text=chinese_to_bopomofo(text)
-  text=latin_to_bopomofo(text)
-  if re.match('[ˉˊˇˋ˙]',text[-1]):
-    text += '。'
-  return text
-
-
-def zh_ja_mixture_cleaners(text):
-  chinese_texts=re.findall(r'\[ZH\].*?\[ZH\]',text)
-  japanese_texts=re.findall(r'\[JA\].*?\[JA\]',text)
-  for chinese_text in chinese_texts:
-    cleaned_text=number_to_chinese(chinese_text[4:-4])
-    cleaned_text=chinese_to_bopomofo(cleaned_text)
-    cleaned_text=latin_to_bopomofo(cleaned_text)
-    cleaned_text=bopomofo_to_romaji(cleaned_text)
-    cleaned_text=re.sub('i[aoe]',lambda x:'y'+x.group(0)[1:],cleaned_text)
-    cleaned_text=re.sub('u[aoəe]',lambda x:'w'+x.group(0)[1:],cleaned_text)
-    cleaned_text=re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ`'+x.group(2),cleaned_text).replace('ɻ','ɹ`')
-    cleaned_text=re.sub('([ʦs][⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ'+x.group(2),cleaned_text)
-    text = text.replace(chinese_text,cleaned_text+' ',1)
-  for japanese_text in japanese_texts:
-    cleaned_text=japanese_to_romaji_with_accent(japanese_text[4:-4]).replace('ts','ʦ').replace('u','ɯ').replace('...','…')
-    text = text.replace(japanese_text,cleaned_text+' ',1)
-  text=text[:-1]
-  if re.match('[A-Za-zɯɹəɥ→↓↑]',text[-1]):
-    text += '.'
-  return text
\ No newline at end of file
diff --git a/spaces/mikeee/chatglm2-6b-4bit/app.py b/spaces/mikeee/chatglm2-6b-4bit/app.py
deleted file mode 100644
index bad73ba706a6496ec0a196e5409e6c1628a10018..0000000000000000000000000000000000000000
--- a/spaces/mikeee/chatglm2-6b-4bit/app.py
+++ /dev/null
@@ -1,386 +0,0 @@
-"""Credit to https://github.com/THUDM/ChatGLM2-6B/blob/main/web_demo.py while mistakes are mine."""
-# pylint: disable=broad-exception-caught, redefined-outer-name, missing-function-docstring, missing-module-docstring, too-many-arguments, line-too-long, invalid-name, redefined-builtin, redefined-argument-from-local
-# import gradio as gr
-
-# model_name = "models/THUDM/chatglm2-6b-int4"
-# gr.load(model_name).lauch()
-
-# %%writefile demo-4bit.py
-
-import os
-import time
-from textwrap import dedent
-
-import gradio as gr
-import mdtex2html
-import torch
-from loguru import logger
-from transformers import AutoModel, AutoTokenizer
-
-# fix timezone in Linux
-os.environ["TZ"] = "Asia/Shanghai"
-try:
-    time.tzset()  # type: ignore # pylint: disable=no-member
-except Exception:
-    # Windows
-    logger.warning("Windows, cant run time.tzset()")
-
-# model_name = "THUDM/chatglm2-6b"  # 7x?G
-model_name = "THUDM/chatglm2-6b-int4"  # 3.9G
-
-RETRY_FLAG = False
-
-tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
-
-# model = AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda()
-
-# 4/8 bit
-# model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).quantize(4).cuda()
-
-has_cuda = torch.cuda.is_available()
-# has_cuda = False  # force cpu
-
-if has_cuda:
-    if model_name.endswith("int4"):
-        model = AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda()
-    else:
-        model = (
-            AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda().half()
-        )
-else:
-    model = AutoModel.from_pretrained(
-        model_name, trust_remote_code=True
-    ).float()  #  .half().float(), .float() required for CPU
-
-model = model.eval()
-
-_ = """Override Chatbot.postprocess"""
-
-
-def postprocess(self, y):
-    if y is None:
-        return []
-    for i, (message, response) in enumerate(y):
-        y[i] = (
-            None if message is None else mdtex2html.convert((message)),
-            None if response is None else mdtex2html.convert(response),
-        )
-    return y
-
-
-gr.Chatbot.postprocess = postprocess
-
-
-def parse_text(text):
-    """Copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/."""
-    lines = text.split("\n")
-    lines = [line for line in lines if line != ""]
-    count = 0
-    for i, line in enumerate(lines):
-        if "```" in line:
-            count += 1
-            items = line.split("`")
-            if count % 2 == 1:
-                lines[i] = f'<pre><code class="language-{items[-1]}">'
-            else:
-                lines[i] = "<br></code></pre>"
-        else:
-            if i > 0:
-                if count % 2 == 1:
-                    line = line.replace("`", r"\`")
-                    line = line.replace("<", "&lt;")
-                    line = line.replace(">", "&gt;")
-                    line = line.replace(" ", "&nbsp;")
-                    line = line.replace("*", "&ast;")
-                    line = line.replace("_", "&lowbar;")
-                    line = line.replace("-", "&#45;")
-                    line = line.replace(".", "&#46;")
-                    line = line.replace("!", "&#33;")
-                    line = line.replace("(", "&#40;")
-                    line = line.replace(")", "&#41;")
-                    line = line.replace("$", "&#36;")
-                lines[i] = "<br>" + line
-    text = "".join(lines)
-    return text
-
-
-def predict(
-    RETRY_FLAG, input, chatbot, max_length, top_p, temperature, history, past_key_values
-):
-    try:
-        chatbot.append((parse_text(input), ""))
-    except Exception as exc:
-        logger.error(exc)
-        logger.debug(f"{chatbot=}")
-        _ = """
-        if chatbot:
-            chatbot[-1] = (parse_text(input), str(exc))
-            yield chatbot, history, past_key_values
-        # """
-        yield chatbot, history, past_key_values
-
-    for response, history, past_key_values in model.stream_chat(
-        tokenizer,
-        input,
-        history,
-        past_key_values=past_key_values,
-        return_past_key_values=True,
-        max_length=max_length,
-        top_p=top_p,
-        temperature=temperature,
-    ):
-        chatbot[-1] = (parse_text(input), parse_text(response))
-
-        yield chatbot, history, past_key_values
-
-
-def trans_api(input, max_length=4096, top_p=0.8, temperature=0.2):
-    if max_length < 10:
-        max_length = 4096
-    if top_p < 0.1 or top_p > 1:
-        top_p = 0.85
-    if temperature <= 0 or temperature > 1:
-        temperature = 0.01
-    try:
-        res, _ = model.chat(
-            tokenizer,
-            input,
-            history=[],
-            past_key_values=None,
-            max_length=max_length,
-            top_p=top_p,
-            temperature=temperature,
-        )
-        # logger.debug(f"{res=} \n{_=}")
-    except Exception as exc:
-        logger.error(f"{exc=}")
-        res = str(exc)
-
-    return res
-
-
-def reset_user_input():
-    return gr.update(value="")
-
-
-def reset_state():
-    return [], [], None
-
-
-# Delete last turn
-def delete_last_turn(chat, history):
-    if chat and history:
-        chat.pop(-1)
-        history.pop(-1)
-    return chat, history
-
-
-# Regenerate response
-def retry_last_answer(
-    user_input, chatbot, max_length, top_p, temperature, history, past_key_values
-):
-    if chatbot and history:
-        # Removing the previous conversation from chat
-        chatbot.pop(-1)
-        # Setting up a flag to capture a retry
-        RETRY_FLAG = True
-        # Getting last message from user
-        user_input = history[-1][0]
-        # Removing bot response from the history
-        history.pop(-1)
-
-    yield from predict(
-        RETRY_FLAG,  # type: ignore
-        user_input,
-        chatbot,
-        max_length,
-        top_p,
-        temperature,
-        history,
-        past_key_values,
-    )
-
-
-with gr.Blocks(title="ChatGLM2-6B-int4", theme=gr.themes.Soft(text_size="sm")) as demo:
-    # gr.HTML("""<h1 align="center">ChatGLM2-6B-int4</h1>""")
-    gr.HTML(
-        """<center><a href="https://huggingface.co/spaces/mikeee/chatglm2-6b-4bit?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>To avoid the queue and for faster inference Duplicate this Space and upgrade to GPU</center>"""
-    )
-
-    with gr.Accordion("🎈 Info", open=False):
-        _ = f"""
-            ## {model_name}
-
-            Try to refresh the browser and try again when  occasionally an error occurs.
-
-            With a GPU, a query takes from a few seconds to a few tens of seconds, dependent on the number of words/characters
-            the question and responses contain. The quality of the responses varies quite a bit it seems. Even the same
-            question with the same parameters, asked at different times, can result in quite different responses.
-
-            * Low temperature: responses will be more deterministic and focused; High temperature: responses more creative.
-
-            * Suggested temperatures -- translation: up to 0.3; chatting: > 0.4
-
-            * Top P controls dynamic vocabulary selection based on context.
-
-            For a table of example values for different scenarios, refer to [this](https://community.openai.com/t/cheat-sheet-mastering-temperature-and-top-p-in-chatgpt-api-a-few-tips-and-tricks-on-controlling-the-creativity-deterministic-output-of-prompt-responses/172683)
-
-            If the instance is not on a GPU (T4), it will be very slow. You can try to run the colab notebook [chatglm2-6b-4bit colab notebook](https://colab.research.google.com/drive/1WkF7kOjVCcBBatDHjaGkuJHnPdMWNtbW?usp=sharing) for a spin.
-
-            The T4 GPU is sponsored by a community GPU grant from Huggingface. Thanks a lot!
-            """
-        gr.Markdown(dedent(_))
-    chatbot = gr.Chatbot()
-    with gr.Row():
-        with gr.Column(scale=4):
-            with gr.Column(scale=12):
-                user_input = gr.Textbox(
-                    show_label=False,
-                    placeholder="Input...",
-                ).style(container=False)
-                RETRY_FLAG = gr.Checkbox(value=False, visible=False)
-            with gr.Column(min_width=32, scale=1):
-                with gr.Row():
-                    submitBtn = gr.Button("Submit", variant="primary")
-                    deleteBtn = gr.Button("Delete last turn", variant="secondary")
-                    retryBtn = gr.Button("Regenerate", variant="secondary")
-        with gr.Column(scale=1):
-            emptyBtn = gr.Button("Clear History")
-            max_length = gr.Slider(
-                0,
-                32768,
-                value=8192,
-                step=1.0,
-                label="Maximum length",
-                interactive=True,
-            )
-            top_p = gr.Slider(
-                0, 1, value=0.85, step=0.01, label="Top P", interactive=True
-            )
-            temperature = gr.Slider(
-                0.01, 1, value=0.95, step=0.01, label="Temperature", interactive=True
-            )
-
-    history = gr.State([])
-    past_key_values = gr.State(None)
-
-    user_input.submit(
-        predict,
-        [
-            RETRY_FLAG,
-            user_input,
-            chatbot,
-            max_length,
-            top_p,
-            temperature,
-            history,
-            past_key_values,
-        ],
-        [chatbot, history, past_key_values],
-        show_progress="full",
-    )
-    submitBtn.click(
-        predict,
-        [
-            RETRY_FLAG,
-            user_input,
-            chatbot,
-            max_length,
-            top_p,
-            temperature,
-            history,
-            past_key_values,
-        ],
-        [chatbot, history, past_key_values],
-        show_progress="full",
-        api_name="predict",
-    )
-    submitBtn.click(reset_user_input, [], [user_input])
-
-    emptyBtn.click(
-        reset_state, outputs=[chatbot, history, past_key_values], show_progress="full"
-    )
-
-    retryBtn.click(
-        retry_last_answer,
-        inputs=[
-            user_input,
-            chatbot,
-            max_length,
-            top_p,
-            temperature,
-            history,
-            past_key_values,
-        ],
-        # outputs = [chatbot, history, last_user_message, user_message]
-        outputs=[chatbot, history, past_key_values],
-    )
-    deleteBtn.click(delete_last_turn, [chatbot, history], [chatbot, history])
-
-    with gr.Accordion("Example inputs", open=True):
-        etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
-        examples = gr.Examples(
-            examples=[
-                ["What NFL team won the Super Bowl in the year Justin Bieber was born? "],
-                ["What NFL team won the Super Bowl in the year Justin Bieber was born? Think step by step."],
-                ["Explain the plot of Cinderella in a sentence."],
-                [
-                    "How long does it take to become proficient in French, and what are the best methods for retaining information?"
-                ],
-                ["What are some common mistakes to avoid when writing code?"],
-                ["Build a prompt to generate a beautiful portrait of a horse"],
-                ["Suggest four metaphors to describe the benefits of AI"],
-                ["Write a pop song about leaving home for the sandy beaches."],
-                ["Write a summary demonstrating my ability to tame lions"],
-                ["鲁迅和周树人什么关系"],
-                ["从前有一头牛,这头牛后面有什么?"],
-                ["正无穷大加一大于正无穷大吗?"],
-                ["正无穷大加正无穷大大于正无穷大吗?"],
-                ["-2的平方根等于什么"],
-                ["树上有5只鸟,猎人开枪打死了一只。树上还有几只鸟?"],
-                ["树上有11只鸟,猎人开枪打死了一只。树上还有几只鸟?提示:需考虑鸟可能受惊吓飞走。"],
-                ["鲁迅和周树人什么关系 用英文回答"],
-                ["以红楼梦的行文风格写一张委婉的请假条。不少于320字。"],
-                [f"{etext} 翻成中文,列出3个版本"],
-                [f"{etext} \n 翻成中文,保留原意,但使用文学性的语言。不要写解释。列出3个版本"],
-                ["js 判断一个数是不是质数"],
-                ["js 实现python 的 range(10)"],
-                ["js 实现python 的 [*(range(10)]"],
-                ["假定 1 + 2 = 4, 试求 7 + 8"],
-                ["Erkläre die Handlung von Cinderella in einem Satz."],
-                ["Erkläre die Handlung von Cinderella in einem Satz. Auf Deutsch"],
-            ],
-            inputs=[user_input],
-            examples_per_page=30,
-        )
-
-    with gr.Accordion("For Chat/Translation API", open=False, visible=False):
-        input_text = gr.Text()
-        tr_btn = gr.Button("Go", variant="primary")
-        out_text = gr.Text()
-    tr_btn.click(
-        trans_api,
-        [input_text, max_length, top_p, temperature],
-        out_text,
-        # show_progress="full",
-        api_name="tr",
-    )
-    _ = """
-    input_text.submit(
-        trans_api,
-        [input_text, max_length, top_p, temperature],
-        out_text,
-        show_progress="full",
-        api_name="tr1",
-    )
-    # """
-
-# demo.queue().launch(share=False, inbrowser=True)
-# demo.queue().launch(share=True, inbrowser=True, debug=True)
-
-# concurrency_count > 1 requires more memory, max_size: queue size
-# T4 medium: 30GB, model size: ~4G concurrency_count = 6
-# leave one for api access
-# reduce to 5 if OOM occurs to often
-
-demo.queue(concurrency_count=6, max_size=30).launch(debug=True)
diff --git a/spaces/mshukor/UnIVAL/fairseq/tests/test_online_backtranslation.py b/spaces/mshukor/UnIVAL/fairseq/tests/test_online_backtranslation.py
deleted file mode 100644
index 0ae7e773da0ff838b3c8151bc14b84a6a9238a72..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/tests/test_online_backtranslation.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import tempfile
-import unittest
-from pathlib import Path
-from typing import Any, Dict, Sequence
-
-import fairseq.data.indexed_dataset as indexed_dataset
-import fairseq.options
-import fairseq.tasks.online_backtranslation as obt
-import torch
-from tests import utils
-
-
-def mk_sample(tokens: Sequence[int], batch_size: int = 2) -> Dict[str, Any]:
-    batch = torch.stack([torch.tensor(tokens, dtype=torch.long)] * batch_size)
-    sample = {
-        "net_input": {
-            "src_tokens": batch,
-            "prev_output_tokens": batch,
-            "src_lengths": torch.tensor([len(tokens)] * batch_size, dtype=torch.long),
-        },
-        "target": batch[:, 1:],
-    }
-    return sample
-
-
-def mk_dataset(num_samples: int, max_len: int, output: Path):
-    output.parent.mkdir(exist_ok=True)
-    idx = indexed_dataset.IndexedDatasetBuilder(str(output))
-    data = torch.randint(5, 100, (num_samples, max_len))
-    lengths = torch.randint(3, max_len, (num_samples,))
-    for d, l in zip(data, lengths):
-        d[0] = 0
-        idx.add_item(d[:l])
-    idx.finalize(output.with_suffix(".idx"))
-    assert output.exists()
-    assert output.with_suffix(".idx").exists()
-
-
-class OnlineBacktranslationTest(unittest.TestCase):
-
-    tmp_dir = Path(tempfile.mkdtemp(suffix="OnlineBacktranslationTest"))
-
-    @classmethod
-    def obt_task(
-        cls, languages: Sequence[str], data: Path = None, language_mapping: str = None
-    ):
-        dict_path = cls.tmp_dir / "dict.txt"
-        if not dict_path.exists():
-            dictionary = utils.dummy_dictionary(100)
-            dictionary.save(str(dict_path))
-
-        if data is not None:
-            (data / "dict.txt").write_text(dict_path.read_text())
-        else:
-            data = cls.tmp_dir
-        assert len(languages) >= 2
-
-        kwargs = {
-            "arch": "transformer",
-            # --max-sentences=1 for better predictability of batches
-            "max_sentences": 1,
-            # Use characteristics dimensions
-            "encoder_layers": 3,
-            "encoder_embed_dim": 12,
-            "encoder_ffn_embed_dim": 14,
-            "encoder_attention_heads": 4,
-            "decoder_layers": 3,
-            "decoder_embed_dim": 12,
-            "decoder_output_dim": 12,
-            "decoder_ffn_embed_dim": 14,
-            "decoder_attention_heads": 4,
-            # Disable dropout so we have comparable tests.
-            "dropout": 0,
-            "attention_dropout": 0,
-            "activation_dropout": 0,
-            "encoder_layerdrop": 0,
-        }
-
-        args = fairseq.options.get_args(
-            data,
-            task="online_backtranslation",
-            mono_langs=",".join(languages),
-            valid_lang_pairs=f"{languages[0]}-{languages[1]}",
-            tokens_per_sample=256,
-            language_mapping=language_mapping,
-            **kwargs,
-        )
-        task = obt.OnlineBackTranslationTask.setup_task(args)
-        # we need to build the model to have the correct dictionary
-        model = task.build_model(task.args)
-        return task, model
-
-    def tmp_path(self, test_case: str) -> Path:
-        return Path(tempfile.mkdtemp(test_case, dir=self.tmp_dir))
-
-    def test_lang_tokens(self):
-        task, model = self.obt_task(["en", "ro", "zh"])
-        assert obt._lang_token("en") in task.dictionary
-        assert obt._lang_token("ro") in task.dictionary
-        assert obt._lang_token("zh") in task.dictionary
-
-        en_bos = obt._lang_token_index(task.common_dict, "en")
-        assert "en" == task.common_dict[en_bos].strip("_")
-        zh_bos = obt._lang_token_index(task.common_dict, "zh")
-        assert "zh" == task.common_dict[zh_bos].strip("_")
-        zh_sample = mk_sample([zh_bos, 16, 14, 12, 10])
-
-        # we expect to receive the bos token for translation
-        assert task.get_bos_token_from_sample(zh_sample) == en_bos
-
-    def test_backtranslate_sample(self):
-        task, model = self.obt_task(["en", "ro", "zh"])
-
-        en_bos = obt._lang_token_index(task.common_dict, "en")
-        zh_bos = obt._lang_token_index(task.common_dict, "zh")
-        sample = mk_sample([zh_bos, 16, 14, 12, 10])
-
-        task.backtranslate_sample(sample, "zh", "en")
-        target_zh = list(sample["target"][0])
-        assert target_zh == [16, 14, 12, 10]  # original zh sentence
-        generated_en = sample["net_input"]["src_tokens"][0]
-        assert generated_en[0] == en_bos
-
-    def test_train_dataset(self):
-        data = self.tmp_path("test_train_dataset")
-        mk_dataset(20, 10, data / "en" / "train.bin")
-        mk_dataset(10, 10, data / "zh" / "train.bin")
-        task, model = self.obt_task(["en", "zh"], data)
-        task.load_dataset("train")
-
-        en_bos = obt._lang_token_index(task.common_dict, "en")
-        zh_bos = obt._lang_token_index(task.common_dict, "zh")
-
-        train = task.datasets["train"]
-        train.ordered_indices()
-        train.prefetch([0, 19])
-        sample_0 = train[0]
-        sample_19 = train[19]
-        self.assertEqual(
-            set(sample_0.keys()), {"en-BT", "en-DENOISE", "zh-BT", "zh-DENOISE"}
-        )
-        for sample in (sample_0, sample_19):
-            self.assertEqual(sample["en-BT"]["source"][0], en_bos)
-            # bt target isn't ready to look at.
-            self.assertEqual(sample["en-DENOISE"]["source"][0], en_bos)
-            # TODO What could we check on the target side ?
-
-        for i in range(10):
-            # Zh dataset is shorter, and is wrapped around En dataset.
-            train.prefetch([i, i + 10])
-            self.assertEqual(
-                list(train[i]["zh-DENOISE"]["source"]),
-                list(train[i + 10]["zh-DENOISE"]["source"]),
-            )
-            self.assertEqual(train[i]["zh-DENOISE"]["source"][0].item(), zh_bos)
-
-        # Sorted by increasing len
-        self.assertLess(
-            len(sample_0["en-BT"]["source"]), len(sample_19["en-BT"]["source"])
-        )
-
-    def test_valid_dataset(self):
-        data = self.tmp_path("test_valid_dataset")
-        mk_dataset(10, 21, data / "valid.en-zh.en.bin")
-        mk_dataset(10, 21, data / "valid.en-zh.zh.bin")
-
-        task, model = self.obt_task(["en", "zh"], data)
-        valid = task.load_dataset("valid")
-        en_bos = obt._lang_token_index(task.common_dict, "en")
-
-        assert valid is not None
-        valid.prefetch(range(10))
-        sample_0 = valid[0]
-        sample_9 = valid[9]
-        self.assertEqual(sample_0["id"], 0)
-        self.assertEqual(sample_9["id"], 9)
-        self.assertEqual(sample_0["source"][0], en_bos)
-        self.assertEqual(sample_9["source"][0], en_bos)
-        # TODO: could we test the target side ?
-
-    def assertFnMatch(self, fn, values):
-        for x, y in values.items():
-            fn_x = fn(x)
-            self.assertEqual(fn_x, y, f"Fn has wrong value: fn({x}) = {fn_x} != {y}")
-
-    def test_piecewise_linear_fn(self):
-        self.assertFnMatch(
-            obt.PiecewiseLinearFn.from_string("1.0"), {0: 1, 100: 1, 500: 1, 1000: 1}
-        )
-        self.assertFnMatch(
-            obt.PiecewiseLinearFn.from_string("0:1,1000:0"),
-            {0: 1, 500: 0.5, 1000: 0, 2000: 0},
-        )
-        self.assertFnMatch(
-            obt.PiecewiseLinearFn.from_string("0:0,1000:1"),
-            {0: 0, 500: 0.5, 1000: 1, 2000: 1},
-        )
-        self.assertFnMatch(
-            obt.PiecewiseLinearFn.from_string("0:0,1000:1,2000:0"),
-            {0: 0, 500: 0.5, 1000: 1, 1500: 0.5, 2000: 0, 3000: 0},
-        )
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/3Planesoft 3D Screensavers Plus All In One 80 RePack.torrent.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/3Planesoft 3D Screensavers Plus All In One 80 RePack.torrent.md
deleted file mode 100644
index bc3193ef5137aac96a28b8431c453578d7c2aa14..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/3Planesoft 3D Screensavers Plus All In One 80 RePack.torrent.md	
+++ /dev/null
@@ -1,69 +0,0 @@
-<br />
-<h1>3Planesoft 3D Screensavers Plus All in One 80 RePack.torrent: What Is It and How to Use It</h1> | <p>Do you want to spice up your desktop with stunning 3D animations? Do you want to enjoy a variety of beautiful scenes without spending a fortune on expensive software? Do you want to download and install over 130 screensavers in one convenient package? If you answered yes to any of these questions, then you might be interested in <strong>3Planesoft 3D Screensavers Plus All in One 80 RePack.torrent</strong>.</p>
-<h2>3Planesoft 3D Screensavers Plus All in One 80 RePack.torrent</h2><br /><p><b><b>DOWNLOAD</b> &#10026;&#10026;&#10026; <a href="https://urlcod.com/2uI9MO">https://urlcod.com/2uI9MO</a></b></p><br /><br />
-<p>In this article, I will explain what 3Planesoft 3D Screensavers are, what the RePack.torrent file is, and how to use it. I will also compare 3Planesoft 3D Screensavers with other screensaver options and answer some common questions. By the end of this article, you will have a clear idea of whether 3Planesoft 3D Screensavers Plus All in One 80 RePack.torrent is worth downloading and installing on your computer.</p>
-<h2>What Are 3Planesoft 3D Screensavers?</h2>
-<p>3Planesoft 3D Screensavers are a collection of high-quality screensavers that display realistic 3D animations on your desktop. They are designed to enhance your visual experience and create a relaxing atmosphere. You can choose from a wide range of themes, such as nature, space, fantasy, holidays, animals, and more. Some examples of 3Planesoft 3D Screensavers are:</p>
-<ul>
-<li>The Lost Watch: A beautiful scene of a sunken watch in a river with fish and plants.</li>
-<li>Earth: A stunning view of our planet from space with clouds, stars, and satellites.</li>
-<li>Fireplace: A cozy fireplace with crackling flames and soothing sounds.</li>
-<li>Koi Fish: A serene pond with colorful koi fish and water lilies.</li>
-<li>Christmas Bells: A festive scene of a Christmas tree with bells, candles, and snowflakes.</li>
-</ul>
-<p>3Planesoft 3D Screensavers have many features and benefits that make them stand out from other screensavers. Some of them are:</p>
-<ul>
-<li>They are easy to install and use. You can access them from your desktop or from the Windows screensaver settings.</li>
-<li>They are customizable. You can adjust the resolution, sound, music, brightness, and other parameters to suit your preferences.</li>
-<li>They are compatible with Windows XP, Vista, 7, 8, and 10. They also support multiple monitors and widescreen displays.</li>
-<li>They are updated regularly. You can get new screensavers and updates for free from the official website.</li>
-<li>They are affordable. You can buy individual screensavers for $9.99 each or get the whole collection for $99. However, you can also download them for free using the RePack.torrent file that I will explain later.</li>
-</ul>
-<h2>What Is a RePack.torrent File?</h2>
-<p>A torrent file is a small file that contains information about a larger file or a group of files that you want to download from the internet. It does not contain the actual files, but rather the metadata, such as the file names, sizes, locations, and checksums. To download the files using a torrent file, you need a BitTorrent client, which is a software that connects you to other users who have the same torrent file and share the files with each other. This way, you can download faster and more efficiently than using a direct download link.</p>
-<p>A RePack.torrent file is a special type of torrent file that contains compressed and repacked versions of original files. This means that the files have been reduced in size and modified to remove unnecessary or unwanted components, such as languages, subtitles, extras, etc. The purpose of repacking is to save disk space and bandwidth while maintaining the quality and functionality of the files.</p>
-<p>3Planesoft 3D Screensavers Plus All in One 80 RePack.torrent is a RePack.torrent file that contains all the 130+ screensavers from 3Planesoft in one package. The total size of the package is about 2.8 GB, which is much smaller than the original size of over 4 GB. The RePack.torrent file also includes a crack that allows you to use the screensavers without buying them or registering them.</p>
-<h2>How to Download and Install 3Planesoft 3D Screensavers Plus All in One 80 RePack.torrent</h2>
-<p>If you want to download and install 3Planesoft 3D Screensavers Plus All in One 80 RePack.torrent on your computer, you need to follow these steps:</p>
-<ol>
-<li>Download a BitTorrent client if you don't have one already. There are many free and reliable options available online, such as uTorrent, BitTorrent, or qBittorrent. Install the BitTorrent client on your computer and launch it.</li>
-<li>Download the RePack.torrent file from a trusted source. There are many sources where you can download the RePack.torrent file, but not all of them are trustworthy. Some may contain malware, viruses, or fake files that can harm your computer or compromise your privacy. Therefore, you should always be careful and use a reliable torrent site that has good reviews and ratings from other users. You should also use a VPN to hide your IP address and encrypt your traffic when torrenting, as some ISPs may block or throttle your connection or monitor your activity. A VPN can also help you bypass geo-restrictions and access torrent sites that are blocked in your region.</li>
-<li>Once you have downloaded the RePack.torrent file, open it with your BitTorrent client. It will start downloading the files from the peers who have them. Depending on the speed of your connection and the number of seeders, this may take some time. You can check the progress and status of your download on your BitTorrent client.</li>
-<li>After the download is complete, you will have a folder with all the screensavers and a crack file. To install the screensavers, run the setup.exe file and follow the instructions. You can choose which screensavers you want to install or install them all at once. The installation process is simple and fast.</li>
-<li>To activate the screensavers, copy the crack file and paste it into the installation folder, replacing the original file. This will remove the registration requirement and allow you to use the screensavers for free.</li>
-<li>Scan the files with an antivirus program before opening them. Even if you download them from a trusted source, there is always a risk of getting infected with malware or viruses when torrenting. Therefore, it is advisable to scan the files with an antivirus program before opening them. This will ensure that your computer is safe and secure.</li>
-</ol>
-<h2>How to Use 3Planesoft 3D Screensavers</h2>
-<p>Now that you have downloaded and installed 3Planesoft 3D Screensavers Plus All in One 80 RePack.torrent, you can start using them on your desktop. Here are some tips on how to use 3Planesoft 3D Screensavers:</p>
-<ul>
-<li>To select a screensaver from the library, right-click on your desktop and choose Personalize. Then click on Screen Saver and select 3Planesoft Screensaver Manager from the drop-down menu. You will see a list of all the screensavers that you have installed. You can preview them, change their settings, or set them as your default screensaver.</li>
-<li>To set a custom video or GIF as a screensaver, right-click on your desktop and choose Personalize. Then click on Screen Saver and select Video Screensaver from the drop-down menu. You will see a window where you can browse and select any video or GIF file from your computer or from online sources. You can also adjust the volume, loop mode, and playback speed of the video or GIF.</li>
-<li>To adjust the settings of a screensaver, right-click on your desktop and choose Personalize. Then click on Screen Saver and select the screensaver that you want to modify from the drop-down menu. You will see a window where you can change various parameters of the screensaver, such as resolution, sound, music, brightness, etc.</li>
-<li>To uninstall a screensaver, go to Control Panel > Programs > Programs and Features and find 3Planesoft Screensaver Manager in the list of installed programs. Right-click on it and choose Uninstall. Follow the instructions to remove the screensaver from your computer.</li>
-</ul> <h2>Comparison of 3Planesoft 3D Screensavers with Other Screensavers</h2>
-<p>3Planesoft 3D Screensavers are not the only screensaver option that you can use on your desktop. There are many other alternatives that you can try, such as DeskScapes, Screensaver Wonder, and Lively Wallpaper. Each of them has its own pros and cons, depending on your needs and preferences. Here is a table that compares some of the main features and differences of these screensaver options:</p>
-<p></p>
- | Feature | 3Planesoft 3D Screensavers | DeskScapes | Screensaver Wonder | Lively Wallpaper | | --- | --- | --- | --- | --- | | Number of screensavers | Over 130 | Over 60 | Unlimited (customizable) | Over 100 | | Quality of animations | High (3D) | High (3D) | Low (2D) | High (4K) | | Customization options | Moderate (resolution, sound, music, brightness, etc.) | High (effects, filters, colors, etc.) | High (images, videos, sounds, texts, etc.) | Moderate (resolution, sound, volume, etc.) | | Compatibility with Windows versions | XP, Vista, 7, 8, 10 | 7, 8, 10 | XP, Vista, 7, 8, 10 | 7, 8, 10 | | Price | $9.99 per screensaver or $99 for the whole collection (or free with RePack.torrent) | $9.99 for the software or $29.99 for the Object Desktop suite | $34.85 for the software or $49.95 for the Gold Bundle suite | Free (open source) | | Source |                | <p>As you can see from the table, each screensaver option has its own advantages and disadvantages. Some may offer more variety and quality than others, but they may also cost more or require more system resources. Some may be more customizable and flexible than others, but they may also be more complicated or less secure. Ultimately, the choice depends on your personal taste and budget.</p>
-<h2>Conclusion</h2>
-<p>In conclusion, 3Planesoft 3D Screensavers Plus All in One 80 RePack.torrent is a great option for anyone who wants to enjoy stunning 3D animations on their desktop. It offers a large collection of screensavers that cover various themes and genres. It also allows you to download and install them for free using a torrent file that reduces their size and removes their registration requirement. However, you should also be aware of the potential risks of downloading torrents from untrusted sources and scan them with an antivirus program before opening them.</p>
-<p>If you are interested in trying out 3Planesoft 3D Screensavers Plus All in One 80 RePack.torrent, you can follow the steps that I have explained in this article. You can also compare it with other screensaver options that I have mentioned and see which one suits you better. I hope that this article has been helpful and informative for you.</p>
-<p>Now that you have learned everything you need to know about 3Planesoft 3D Screensavers Plus All in One 80 RePack.torrent, why not give it a try and see for yourself how amazing it is? You won't regret it!</p>
-<h2>FAQs</h2>
-<p>Here are some frequently asked questions about 3Planesoft 3D Screensavers Plus All in One 80 RePack.torrent:</p>
-<h4>Where can I download the RePack.torrent file?</h4>
-<p>You can download the RePack.torrent file from various torrent sites online. However, not all of them are trustworthy and safe. Some may contain malware, viruses, or fake files that can harm your computer or compromise your privacy. Therefore, you should always use a reliable torrent site that has good reviews and ratings from other users. You should also use a VPN to hide your IP address and encrypt your traffic when torrenting.</p>
-<h4>How can I update the screensavers?</h4>
-<p>You can update the screensavers by downloading the latest version of the RePack.torrent file from the same source that you downloaded it from before. Then you can run the setup.exe file again and install the updates over the existing screensavers. Alternatively, you can download individual updates from the official website of 3Planesoft and install them manually.</p>
-<h4 > How can I fix common issues with the screensavers?</h4>
-<p>Some common issues that you may encounter with the screensavers are:</p>
-<ul>
-<li>The screensavers do not run or display properly. This may be due to incompatible graphics drivers, outdated DirectX, or insufficient system resources. To fix this, you can update your graphics drivers, install the latest version of DirectX, or close other programs that may be using too much CPU or RAM.</li>
-<li>The screensavers crash or freeze. This may be due to corrupted files, malware infection, or conflicting software. To fix this, you can scan the files with an antivirus program, reinstall the screensavers, or disable any software that may interfere with the screensavers, such as antivirus, firewall, or screen recording programs.</li>
-<li>The screensavers do not activate automatically. This may be due to incorrect settings, power options, or mouse movements. To fix this, you can check the settings of the screensaver manager and make sure that the screensaver is enabled and the wait time is set correctly. You can also check the power options of your computer and make sure that the display is not set to turn off or sleep after a certain period of inactivity. You can also avoid moving your mouse or pressing any keys when you want the screensaver to activate.</li>
-</ul>
-<h4>How can I contact support?</h4>
-<p>If you have any questions or issues that are not covered by this article or the FAQs, you can contact the support team of 3Planesoft by visiting their website and filling out a contact form. You can also send them an email at support@3planesoft.com. They will respond to your queries as soon as possible.</p>
-<h4>How can I share feedback?</h4>
-<p>If you have any feedback or suggestions for 3Planesoft 3D Screensavers Plus All in One 80 RePack.torrent, you can share them with the developers and other users by visiting their website and leaving a comment or a review. You can also join their forum and participate in discussions and polls. Your feedback is valuable and appreciated.</p> b2dd77e56b<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download The Episode 1.28 Full Movie Italian Dubbed In Torrent VERIFIED.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download The Episode 1.28 Full Movie Italian Dubbed In Torrent VERIFIED.md
deleted file mode 100644
index d43ba7b324ea2cd49e00cd30c9775e08130f8b42..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download The Episode 1.28 Full Movie Italian Dubbed In Torrent VERIFIED.md	
+++ /dev/null
@@ -1,31 +0,0 @@
-<br />
-<h1>Download The Episode 1.28 Full Movie Italian Dubbed In Torrent: How to Watch the Latest Sci-Fi Thriller Online</h1>
-
-<p>If you are a fan of sci-fi movies, you might have heard of The Episode 1.28, a new film that has been making waves in the international box office. The movie is set in a dystopian future where a mysterious virus has wiped out most of humanity, leaving behind a few survivors who are hunted by robotic drones. The only hope for the remaining humans is to find a hidden bunker where they can access a secret code that can stop the drones and save the world.</p>
-
-<p>The Episode 1.28 is a gripping and action-packed movie that will keep you on the edge of your seat. The movie features stunning visual effects, a compelling story, and a talented cast of actors. The movie has been praised by critics and audiences alike, and has received several awards and nominations.</p>
-<h2>Download The Episode 1.28 Full Movie Italian Dubbed In Torrent</h2><br /><p><b><b>Download Zip</b> &#10040; <a href="https://urlcod.com/2uIaQY">https://urlcod.com/2uIaQY</a></b></p><br /><br />
-
-<p>But what if you want to watch The Episode 1.28 in Italian? Unfortunately, the movie is not available in Italian cinemas or streaming platforms yet. However, there is a way to watch The Episode 1.28 full movie Italian dubbed in torrent online. Here is how you can do it:</p>
-
-<ol>
-<li>First, you need to download a torrent client software on your device. A torrent client is a program that allows you to download files from other users who are sharing them on the internet. Some of the most popular torrent clients are uTorrent, BitTorrent, and qBittorrent.</li>
-<li>Next, you need to find a reliable torrent site that has The Episode 1.28 full movie Italian dubbed in torrent file. A torrent file is a small file that contains information about the larger file that you want to download, such as its name, size, and location. Some of the best torrent sites for movies are The Pirate Bay, RARBG, and 1337x.</li>
-<li>Then, you need to download The Episode 1.28 full movie Italian dubbed in torrent file from the torrent site of your choice. You can do this by clicking on the download button or magnet link on the site. This will open your torrent client and start downloading the movie file.</li>
-<li>Finally, you need to wait until the download is complete. Depending on your internet speed and the size of the file, this may take from a few minutes to several hours. Once the download is done, you can open the movie file with your preferred media player and enjoy watching The Episode 1.28 full movie Italian dubbed in torrent online.</li>
-</ol>
-
-<p>That's it! You have successfully downloaded and watched The Episode 1.28 full movie Italian dubbed in torrent online. However, before you do this, you should be aware of some risks and precautions:</p>
-
-<ul>
-<li>Downloading and watching movies from torrent sites may be illegal in your country. You may face legal consequences if you are caught by authorities or copyright holders. Therefore, you should always check the laws and regulations in your country before using torrent sites.</li>
-<li>Downloading and watching movies from torrent sites may expose your device to malware and viruses. You may accidentally download malicious files that can harm your device or steal your personal information. Therefore, you should always use a reputable antivirus software and scan every file before opening it.</li>
-<li>Downloading and watching movies from torrent sites may affect your internet speed and bandwidth. You may experience slower browsing and streaming while downloading files from torrent sites. Therefore, you should always limit your download speed and upload speed in your torrent client settings.</li>
-</ul>
-
-<p>Downloading and watching movies from torrent sites is not recommended by us or by the creators of The Episode 1.28. We encourage you to support the original work by watching it legally when it becomes available in your region.</p>
-
-<p>However, if you still want to watch The Episode 1.28 full movie Italian dubbed in torrent online, we hope this article has helped you with that.</p>
-<p></p> 7b8c122e87<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Whatsapp For Blackberry 8520 Freel.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Whatsapp For Blackberry 8520 Freel.md
deleted file mode 100644
index 8a3b2bbefcc764816ce32f2d846ec30f7b33fad9..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Whatsapp For Blackberry 8520 Freel.md	
+++ /dev/null
@@ -1,38 +0,0 @@
-<br />
-<h1>How to Download Whatsapp for Blackberry 8520 Freel</h1>
-<p>Whatsapp is one of the most popular messaging apps in the world, with over 2 billion users. It allows you to send text messages, voice notes, photos, videos, documents, and more to your contacts for free. But what if you have an old Blackberry 8520 Freel device and you want to use Whatsapp on it? Is it possible?</p>
-<p>The answer is yes, but it's not very easy. Whatsapp officially stopped supporting Blackberry OS devices in 2016, and you can no longer download it from the Blackberry App World. However, there are some workarounds that you can try to install Whatsapp on your Blackberry 8520 Freel. Here are some of them:</p>
-<h2>Download Whatsapp For Blackberry 8520 Freel</h2><br /><p><b><b>Download File</b> >>> <a href="https://urlcod.com/2uIc83">https://urlcod.com/2uIc83</a></b></p><br /><br />
-<h2>Method 1: Use the Whatsapp website</h2>
-<p>One of the simplest ways to download Whatsapp for Blackberry 8520 Freel is to use the official Whatsapp website. Here are the steps:</p>
-<ol>
-<li>Open the browser on your Blackberry 8520 Freel and go to <a href="https://www.whatsapp.com/download">https://www.whatsapp.com/download</a> [^1^].</li>
-<li>Scroll down and find the section for Mobile and Tablet devices.</li>
-<li>Select Android as your operating system and tap on Download Now.</li>
-<li>You will be redirected to the Google Play Store page for Whatsapp. Tap on Install and wait for the download to finish.</li>
-<li>Once the installation is complete, open Whatsapp and follow the instructions to set up your account.</li>
-</ol>
-<p>Note that this method may not work on some Blackberry 8520 Freel devices, as they may not support the latest version of Whatsapp or the Google Play Store. If you encounter any errors or compatibility issues, try the next method.</p>
-<h2>Method 2: Use a third-party app store</h2>
-<p>Another way to download Whatsapp for Blackberry 8520 Freel is to use a third-party app store that still offers older versions of Whatsapp that are compatible with Blackberry OS. One of such app stores is APKPure. Here are the steps:</p>
-<ol>
-<li>Open the browser on your Blackberry 8520 Freel and go to <a href="https://apkpure.com/whatsapp-messenger/com.whatsapp">https://apkpure.com/whatsapp-messenger/com.whatsapp</a>.</li>
-<li>Scroll down and find the section for Previous versions.</li>
-<li>Select a version of Whatsapp that was released before December 31, 2016. For example, you can choose version 2.16.399.</li>
-<li>Tap on Download APK and wait for the file to be downloaded.</li>
-<li>Once the download is complete, open the file manager on your Blackberry 8520 Freel and locate the downloaded APK file.</li>
-<li>Tap on it and follow the instructions to install Whatsapp on your device.</li>
-<li>Open Whatsapp and set up your account as usual.</li>
-</ol>
-<p>Note that this method may expose your device to security risks, as you are downloading an unofficial and outdated version of Whatsapp from an unverified source. You may also experience some bugs or glitches while using Whatsapp on your Blackberry 8520 Freel. If you want a more reliable and safe method, try the next one.</p>
-<h2>Method 3: Use a Youtube tutorial</h2>
-<p>The last method to download Whatsapp for Blackberry 8520 Freel is to follow a Youtube tutorial that shows you how to install Whatsapp on your device step by step. One of such tutorials is this one: <a href="https://www.youtube.com/watch?v=6FgnaWCrMwI">https://www.youtube.com/watch?v=6FgnaWCrMwI</a> [^2^]. Here are the main steps:</p>
-<ol>
-<li>Download and install Blackberry Desktop Software on your computer.</li>
-<li>Connect your Blackberry 8520 Freel to your computer using a USB cable.</li>
-<li>Open Blackberry Desktop Software and backup your data.</li>
-<li>Download and install BBSAK (Blackberry Swiss Army Knife) on your computer.</li>
-<li>Open BBSAK and wipe your device</p>
-<p></p> 7b8c122e87<br />
-<br />
-<br />
\ No newline at end of file
diff --git "a/spaces/nickmuchi/fintweet-GPT-Search/pages/1_Tweets_Visualization_\360\237\224\216_.py" "b/spaces/nickmuchi/fintweet-GPT-Search/pages/1_Tweets_Visualization_\360\237\224\216_.py"
deleted file mode 100644
index 4494bf25383b2818e8d00b2d5641958035f4132b..0000000000000000000000000000000000000000
--- "a/spaces/nickmuchi/fintweet-GPT-Search/pages/1_Tweets_Visualization_\360\237\224\216_.py"
+++ /dev/null
@@ -1,100 +0,0 @@
-from variables import *
-import plotly_express as px
-from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
-import matplotlib.pyplot as plt
-import streamlit as st
-import numpy as np
-import pandas as pd
-import textwrap
-
-st.set_option('deprecation.showPyplotGlobalUse', False)
-
-#st.set_page_config(page_title="Earnings Sentiment Analysis", page_icon="📈")
-st.sidebar.header("Sentiment Analysis Visualization")
-st.markdown("## Sentiment Analysis and Density Graphs")
-
-max_word = st.sidebar.slider(label= "WordCloud Max Words", min_value=20, max_value=500, value=50)
-max_font = st.sidebar.slider(label = "WordCloud Max Font", min_value=50, max_value=350, value=50)
-
-
-stopwords = set(STOPWORDS)
-stopwords.update(['us', 'one', 'will', 'said', 'now', 'well', 'man', 'may',
-    'little', 'say', 'must', 'way', 'long', 'yet', 'mean',
-    'put', 'seem', 'asked', 'made', 'half', 'much',
-    'certainly', 'might', 'came','RT','amp'])
-
-def cloud(text, max_word, max_font, random):
-    '''Generate Word Cloud'''
-    
-    wc = WordCloud(background_color="white", colormap="hot", max_words=max_word,
-    stopwords=stopwords, max_font_size=max_font, random_state=random).generate(text)
-
-    return wc
-    
-try:
-
-    if 'tdf' in st.session_state:
-        
-        df = st.session_state['tdf']
-        # df['creation_date'] = pd.to_datetime(df['creation_date'], 
-        #                                format='%Y-%m-%d %H:%M:%S-%Z', 
-        #                                errors='coerce').dt.date
-
-        with st.container():
-            st.subheader('Sentiment Scatter Plot')
-            ## Display negative sentence locations
-            ht = df.tweet.apply(lambda txt: '<br>'.join(textwrap.wrap(txt, width=70)))
-            fig = px.scatter(df, y='sentiment', x='creation_time', color='sentiment', size='sentiment_confidence', hover_data=[ht,'topic','username'], \
-                             color_discrete_map={"Bearish":"firebrick","Neutral":"navajowhite","Bullish":"darkgreen"}, \
-                             title='Sentiment Score Distribution')
-        
-            fig.update_layout(
-            	showlegend=False,
-                autosize=True,
-                width=900,
-                height=500,
-                margin=dict(
-                    b=5,
-                    t=50,
-                    pad=2
-                )
-            )
-        
-            st.plotly_chart(fig)
-
-        with st.container():
-            st.subheader('Topic Distribution Scatter Plot')
-            ## Display negative sentence locations
-            ht = df.tweet.apply(lambda txt: '<br>'.join(textwrap.wrap(txt, width=70)))
-            fig = px.scatter(df, y='topic', x='creation_time', color='sentiment', size='topic_confidence', hover_data=[ht,'topic','username'],\
-                             color_discrete_map={"Bearish":"firebrick","Neutral":"navajowhite","Bullish":"darkgreen"},\
-                             title='Topic Score Distribution')
-        
-            fig.update_layout(
-            	showlegend=False,
-                autosize=True,
-                width=900,
-                height=500,
-                margin=dict(
-                    b=5,
-                    t=50,
-                    pad=2
-                )
-            )
-        
-            st.plotly_chart(fig)
-    
-        with st.container():
-            st.subheader('Sentiment WordCloud')
-            cleaned_tweets = "".join(df['tweet'].tolist())
-            wc = cloud(cleaned_tweets, max_word, max_font, 35)
-            plt.imshow(wc, interpolation='bilinear')
-            plt.axis("off")
-            plt.show()
-            st.pyplot()
-        
-except (AttributeError, KeyError) as e:
-
-    print(e)
-
-    st.error('Tweets Error, please navigate to Home page and refresh tweet stream', icon="🚨")
\ No newline at end of file
diff --git a/spaces/nicolehuangyx/stabilityai-stable-diffusion-xl-base-1.0/README.md b/spaces/nicolehuangyx/stabilityai-stable-diffusion-xl-base-1.0/README.md
deleted file mode 100644
index 59ed6079edefde5157c9a6270553dbe3897073d2..0000000000000000000000000000000000000000
--- a/spaces/nicolehuangyx/stabilityai-stable-diffusion-xl-base-1.0/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Stabilityai Stable Diffusion Xl Base 1.0
-emoji: 📚
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/nkigumnov/banks-ethics-sentiment/README.md b/spaces/nkigumnov/banks-ethics-sentiment/README.md
deleted file mode 100644
index be6d0c8879ba46818e08af6f502fbaf7c95cb14c..0000000000000000000000000000000000000000
--- a/spaces/nkigumnov/banks-ethics-sentiment/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: Banks Ethics Sentiment
-emoji: 🐠
-colorFrom: yellow
-colorTo: pink
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-# Banks Ethics Sentiment
-
-Проект выполнен в рамках HSE Hack 2023. Оценивается оценка этичности банка.
diff --git a/spaces/nyx-ai/stylegan2-flax-tpu/training_utils.py b/spaces/nyx-ai/stylegan2-flax-tpu/training_utils.py
deleted file mode 100644
index 57732bc7a88d29403fce5165442d290b13f7a742..0000000000000000000000000000000000000000
--- a/spaces/nyx-ai/stylegan2-flax-tpu/training_utils.py
+++ /dev/null
@@ -1,174 +0,0 @@
-import jax
-import jax.numpy as jnp
-from jaxlib.xla_extension import DeviceArray
-import flax
-from flax.optim import dynamic_scale as dynamic_scale_lib
-from flax.core import frozen_dict
-from flax.training import train_state
-from flax import struct
-import numpy as np
-from PIL import Image
-from urllib.request import Request, urlopen
-import urllib.error
-from typing import Any, Callable
-
-
-def sync_moving_stats(state):
-    """
-    Sync moving statistics across devices.
-
-    Args:
-        state (train_state.TrainState): Training state.
-
-    Returns:
-        (train_state.TrainState): Updated training state.
-    """
-    cross_replica_mean = jax.pmap(lambda x: jax.lax.pmean(x, 'x'), 'x')
-    return state.replace(moving_stats=cross_replica_mean(state.moving_stats))
-
-
-def update_generator_ema(state_G, params_ema_G, config, ema_beta=None):
-    """
-    Update exponentially moving average of the generator weights.
-    Moving stats and noise constants will be copied over.
-    
-    Args:
-        state_G (train_state.TrainState): Generator state.
-        params_ema_G (frozen_dict.FrozenDict): Parameters of the ema generator.
-        config (Any): Config object.
-        ema_beta (float): Beta parameter of the ema. If None, will be computed
-                          from 'ema_nimg' and 'batch_size'.
-
-    Returns:
-        (frozen_dict.FrozenDict): Updates parameters of the ema generator.
-    """
-    def _update_ema(src, trg, beta):
-        for name, src_child in src.items():
-            if isinstance(src_child, DeviceArray):
-                trg[name] = src[name] + ema_beta * (trg[name] - src[name])
-            else:
-                _update_ema(src_child, trg[name], beta)
-    
-    if ema_beta is None:
-        ema_nimg = config.ema_kimg * 1000
-        ema_beta = 0.5 ** (config.batch_size / max(ema_nimg, 1e-8))
-
-    params_ema_G = params_ema_G.unfreeze()
-
-    # Copy over moving stats
-    params_ema_G['moving_stats']['mapping_network'] = state_G.moving_stats
-    params_ema_G['noise_consts']['synthesis_network'] = state_G.noise_consts 
-    
-    # Update exponentially moving average of the trainable parameters
-    _update_ema(state_G.params['mapping'], params_ema_G['params']['mapping_network'], ema_beta)
-    _update_ema(state_G.params['synthesis'], params_ema_G['params']['synthesis_network'], ema_beta)
-
-    params_ema_G = frozen_dict.freeze(params_ema_G)
-    return params_ema_G
-
-
-class TrainStateG(train_state.TrainState):
-    """
-    Generator train state for a single Optax optimizer.
-
-    Attributes:
-        apply_mapping (Callable): Apply function of the Mapping Network.
-        apply_synthesis (Callable): Apply function of the Synthesis Network.
-        dynamic_scale (dynamic_scale_lib.DynamicScale): Dynamic loss scaling for mixed precision gradients.
-        epoch (int): Current epoch.
-        moving_stats (Any): Moving average of the latent W. 
-        noise_consts (Any): Noise constants from synthesis layers.
-    """
-    apply_mapping: Callable = struct.field(pytree_node=False)
-    apply_synthesis: Callable = struct.field(pytree_node=False)
-    dynamic_scale_main: dynamic_scale_lib.DynamicScale
-    dynamic_scale_reg: dynamic_scale_lib.DynamicScale
-    epoch: int
-    moving_stats: Any=None
-    noise_consts: Any=None
-
-
-class TrainStateD(train_state.TrainState):
-    """
-    Discriminator train state for a single Optax optimizer.
-
-    Attributes:
-        dynamic_scale (dynamic_scale_lib.DynamicScale): Dynamic loss scaling for mixed precision gradients.
-        epoch (int): Current epoch.
-    """
-    dynamic_scale_main: dynamic_scale_lib.DynamicScale
-    dynamic_scale_reg: dynamic_scale_lib.DynamicScale
-    epoch: int
-
-
-def get_training_snapshot(image_real, image_gen, max_num=10):
-    """
-    Creates a snapshot of generated images and real images.
-    
-    Args:
-        images_real (DeviceArray): Batch of real images, shape [B, H, W, C].
-        images_gen (DeviceArray): Batch of generated images, shape [B, H, W, C].
-        max_num (int): Maximum number of images used for snapshot.
-
-    Returns:
-        (PIL.Image): Training snapshot. Top row: generated images, bottom row: real images.
-    """
-    if image_real.shape[0] > max_num:
-        image_real = image_real[:max_num]
-    if image_gen.shape[0] > max_num:
-        image_gen = image_gen[:max_num]
-
-    image_real = jnp.split(image_real, image_real.shape[0], axis=0)
-    image_gen = jnp.split(image_gen, image_gen.shape[0], axis=0)
-
-    image_real = [jnp.squeeze(x, axis=0) for x in image_real]
-    image_gen = [jnp.squeeze(x, axis=0) for x in image_gen]
-
-    image_real = jnp.concatenate(image_real, axis=1)
-    image_gen = jnp.concatenate(image_gen, axis=1)
-
-    image_gen = (image_gen - np.min(image_gen)) / (np.max(image_gen) - np.min(image_gen))
-    image_real = (image_real - np.min(image_real)) / (np.max(image_real) - np.min(image_real))
-    image = jnp.concatenate((image_gen, image_real), axis=0)
-    
-    image = np.uint8(image * 255)
-    if image.shape[-1] == 1:
-        image = np.repeat(image, 3, axis=-1)
-    return Image.fromarray(image)
-
-
-def get_eval_snapshot(image, max_num=10):
-    """
-    Creates a snapshot of generated images.
-
-    Args:
-        image (DeviceArray): Generated images, shape [B, H, W, C].
-
-    Returns:
-        (PIL.Image): Eval snapshot.
-    """
-    if image.shape[0] > max_num:
-        image = image[:max_num]
-
-    image = jnp.split(image, image.shape[0], axis=0)
-    image = [jnp.squeeze(x, axis=0) for x in image]
-    image = jnp.concatenate(image, axis=1)
-    image = (image - np.min(image)) / (np.max(image) - np.min(image))
-    image = np.uint8(image * 255)
-    if image.shape[-1] == 1:
-        image = np.repeat(image, 3, axis=-1)
-    return Image.fromarray(image)
-
-
-def get_vm_name():
-    gcp_metadata_url = "http://metadata.google.internal/computeMetadata/v1/instance/attributes/instance-id"
-    req = Request(gcp_metadata_url)
-    req.add_header('Metadata-Flavor', 'Google')
-    instance_id = None
-    try:
-        with urlopen(req) as url:
-            instance_id = url.read().decode()
-    except urllib.error.URLError:
-        # metadata.google.internal not reachable: use dev
-        pass
-    return instance_id
diff --git a/spaces/onnx/MNIST-Handwritten-Digit-Recognition/app.py b/spaces/onnx/MNIST-Handwritten-Digit-Recognition/app.py
deleted file mode 100644
index c92e253c8b843588590861b00812809097f36998..0000000000000000000000000000000000000000
--- a/spaces/onnx/MNIST-Handwritten-Digit-Recognition/app.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import onnx
-import numpy as np
-import onnxruntime as ort
-from PIL import Image
-import cv2
-import os
-import gradio as gr
-
-os.system("wget https://s3.amazonaws.com/onnx-model-zoo/synset.txt")
-
-
-with open('synset.txt', 'r') as f:
-    labels = [l.rstrip() for l in f]
-    
-os.system("wget https://github.com/onnx/models/raw/main/vision/classification/mnist/model/mnist-8.onnx")
-
-os.system("wget https://s3.amazonaws.com/model-server/inputs/kitten.jpg")
-
-
-
-model_path = 'shufflenet-v2-10.onnx'
-model = onnx.load(model_path)
-session = ort.InferenceSession(model.SerializeToString())
-
-def get_image(path):
-    with Image.open(path) as img:
-        img = np.array(img.convert('RGB'))
-    return img
-    
-def preprocess(img):
-    img = img / 255.
-    img = cv2.resize(img, (256, 256))
-    h, w = img.shape[0], img.shape[1]
-    y0 = (h - 224) // 2
-    x0 = (w - 224) // 2
-    img = img[y0 : y0+224, x0 : x0+224, :]
-    img = (img - [0.485, 0.456, 0.406]) / [0.229, 0.224, 0.225]
-    img = np.transpose(img, axes=[2, 0, 1])
-    img = img.astype(np.float32)
-    img = np.expand_dims(img, axis=0)
-    return img
-
-def predict(path):
-    img = get_image(path)
-    img = preprocess(img)
-    ort_inputs = {session.get_inputs()[0].name: img}
-    preds = session.run(None, ort_inputs)[0]
-    preds = np.squeeze(preds)
-    a = np.argsort(preds)
-    results = {}
-    for i in a[0:5]:    
-        results[labels[a[i]]] = float(preds[a[i]])
-    return results
-       
-
-title="ShuffleNet-v2"
-description="ShuffleNet is a deep convolutional network for image classification. ShuffleNetV2 is an improved architecture that is the state-of-the-art in terms of speed and accuracy tradeoff used for image classification."
-
-examples=[['kitten.jpg']]
-gr.Interface(predict,gr.inputs.Image(type='filepath'),"label",title=title,description=description,examples=examples).launch(enable_queue=True,debug=True)
\ No newline at end of file
diff --git a/spaces/osanseviero/riiaa/app.py b/spaces/osanseviero/riiaa/app.py
deleted file mode 100644
index 0bb0d61ec26e8c1042e052a22f97022b8427bb45..0000000000000000000000000000000000000000
--- a/spaces/osanseviero/riiaa/app.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from PIL import Image
-import torch
-import gradio as gr
-
-model2 = torch.hub.load(
-    "AK391/animegan2-pytorch:main",
-    "generator",
-    pretrained=True,
-    progress=False
-)
-face2paint = torch.hub.load(
-    'AK391/animegan2-pytorch:main', 'face2paint', 
-    size=512,side_by_side=False
-)
-
-def inference(img):
-    out = face2paint(model2, img)
-    return out
-
-title = "AnimeGANv2"
-description = "Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below."
-article = "Github Repo Pytorch "
-examples=[['https://upload.wikimedia.org/wikipedia/commons/thumb/a/a8/Bill_Gates_2017_%28cropped%29.jpg/220px-Bill_Gates_2017_%28cropped%29.jpg']]
-
-demo = gr.Interface(
-    fn=inference, 
-    inputs=[gr.Image(type="pil")], 
-    outputs=gr.Image(type="pil"),
-    title=title,
-    description=description,
-    article=article,
-    examples=examples)
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/oskarvanderwal/MT-bias-demo/README.md b/spaces/oskarvanderwal/MT-bias-demo/README.md
deleted file mode 100644
index a95e6ef36f99ca44213f5f2b4c833cc034fcd96b..0000000000000000000000000000000000000000
--- a/spaces/oskarvanderwal/MT-bias-demo/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Bias in MT
-emoji: 🌍
-colorFrom: yellow
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.3
-app_file: app.py
-pinned: false
----
-
-A demo showing how gender bias could manifest in MT models when translating from Hungarian to English.
diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docker/diffusers-pytorch-cuda/Dockerfile b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docker/diffusers-pytorch-cuda/Dockerfile
deleted file mode 100644
index fab3b70827653a959434cb24929f86e3bd8890e2..0000000000000000000000000000000000000000
--- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docker/diffusers-pytorch-cuda/Dockerfile
+++ /dev/null
@@ -1,47 +0,0 @@
-FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu20.04
-LABEL maintainer="Hugging Face"
-LABEL repository="diffusers"
-
-ENV DEBIAN_FRONTEND=noninteractive
-
-RUN apt update && \
-    apt install -y bash \
-                   build-essential \
-                   git \
-                   git-lfs \
-                   curl \
-                   ca-certificates \
-                   libsndfile1-dev \
-                   libgl1 \
-                   python3.8 \
-                   python3-pip \
-                   python3.8-venv && \
-    rm -rf /var/lib/apt/lists
-
-# make sure to use venv
-RUN python3 -m venv /opt/venv
-ENV PATH="/opt/venv/bin:$PATH"
-
-# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
-RUN python3 -m pip install --no-cache-dir --upgrade pip && \
-    python3 -m pip install --no-cache-dir \
-        torch \
-        torchvision \
-        torchaudio \
-        invisible_watermark && \
-    python3 -m pip install --no-cache-dir \
-        accelerate \
-        datasets \
-        hf-doc-builder \
-        huggingface-hub \
-        Jinja2 \
-        librosa \
-        numpy \
-        scipy \
-        tensorboard \
-        transformers \
-        omegaconf \
-        pytorch-lightning \
-        xformers
-
-CMD ["/bin/bash"]
diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/using-diffusers/other-modalities.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/using-diffusers/other-modalities.md
deleted file mode 100644
index ec879c49b1060c7ade1a0eb7e82de87c95d1b957..0000000000000000000000000000000000000000
--- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/using-diffusers/other-modalities.md
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--Copyright 2023 The HuggingFace Team. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
-an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
-specific language governing permissions and limitations under the License.
--->
-
-# Using Diffusers with other modalities
-
-Diffusers is in the process of expanding to modalities other than images.
-
-Example type        | Colab | Pipeline |
-:-------------------------:|:-------------------------:|:-------------------------:|
-[Molecule conformation](https://www.nature.com/subjects/molecular-conformation#:~:text=Definition,to%20changes%20in%20their%20environment.) generation | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/geodiff_molecule_conformation.ipynb) | ❌
-
-More coming soon!
\ No newline at end of file
diff --git a/spaces/pikto/prodia/utils.py b/spaces/pikto/prodia/utils.py
deleted file mode 100644
index ead91d363542627776d40417382ffed5a6b53b45..0000000000000000000000000000000000000000
--- a/spaces/pikto/prodia/utils.py
+++ /dev/null
@@ -1,6 +0,0 @@
-def keys(dictionary: dict):
-    return [k for k, v in dictionary.items()]
-
-
-def split_numbers(numbers: str):
-    return [int(i) for i in numbers.split(",")]
diff --git a/spaces/pkiage/time_series_autocorrelation_demo/docs/Makefile b/spaces/pkiage/time_series_autocorrelation_demo/docs/Makefile
deleted file mode 100644
index 0ad7fe6def55763746fcefd59613666d972729a2..0000000000000000000000000000000000000000
--- a/spaces/pkiage/time_series_autocorrelation_demo/docs/Makefile
+++ /dev/null
@@ -1,153 +0,0 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS    =
-SPHINXBUILD   = sphinx-build
-PAPER         =
-BUILDDIR      = _build
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
-
-help:
-	@echo "Please use \`make <target>' where <target> is one of"
-	@echo "  html       to make standalone HTML files"
-	@echo "  dirhtml    to make HTML files named index.html in directories"
-	@echo "  singlehtml to make a single large HTML file"
-	@echo "  pickle     to make pickle files"
-	@echo "  json       to make JSON files"
-	@echo "  htmlhelp   to make HTML files and a HTML help project"
-	@echo "  qthelp     to make HTML files and a qthelp project"
-	@echo "  devhelp    to make HTML files and a Devhelp project"
-	@echo "  epub       to make an epub"
-	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
-	@echo "  text       to make text files"
-	@echo "  man        to make manual pages"
-	@echo "  texinfo    to make Texinfo files"
-	@echo "  info       to make Texinfo files and run them through makeinfo"
-	@echo "  gettext    to make PO message catalogs"
-	@echo "  changes    to make an overview of all changed/added/deprecated items"
-	@echo "  linkcheck  to check all external links for integrity"
-	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
-
-clean:
-	-rm -rf $(BUILDDIR)/*
-
-html:
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
-	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
-	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
-	@echo
-	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
-	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
-	@echo
-	@echo "Build finished; now you can process the pickle files."
-
-json:
-	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
-	@echo
-	@echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
-	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
-	@echo
-	@echo "Build finished; now you can run HTML Help Workshop with the" \
-	      ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
-	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
-	@echo
-	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
-	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
-	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/tool-time-series-autocorrelation-demo.qhcp"
-	@echo "To view the help file:"
-	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/tool-time-series-autocorrelation-demo.qhc"
-
-devhelp:
-	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
-	@echo
-	@echo "Build finished."
-	@echo "To view the help file:"
-	@echo "# mkdir -p $$HOME/.local/share/devhelp/tool-time-series-autocorrelation-demo"
-	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/tool-time-series-autocorrelation-demo"
-	@echo "# devhelp"
-
-epub:
-	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
-	@echo
-	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo
-	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
-	@echo "Run \`make' in that directory to run these through (pdf)latex" \
-	      "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo "Running LaTeX files through pdflatex..."
-	$(MAKE) -C $(BUILDDIR)/latex all-pdf
-	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
-	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
-	@echo
-	@echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
-	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
-	@echo
-	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-texinfo:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo
-	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
-	@echo "Run \`make' in that directory to run these through makeinfo" \
-	      "(use \`make info' here to do that automatically)."
-
-info:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo "Running Texinfo files through makeinfo..."
-	make -C $(BUILDDIR)/texinfo info
-	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
-
-gettext:
-	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
-	@echo
-	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
-
-changes:
-	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
-	@echo
-	@echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
-	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
-	@echo
-	@echo "Link check complete; look for any errors in the above output " \
-	      "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
-	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
-	@echo "Testing of doctests in the sources finished, look at the " \
-	      "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/status_codes.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/status_codes.py
deleted file mode 100644
index 4bd072be9769748a852740d037d5c63021472c9d..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/status_codes.py
+++ /dev/null
@@ -1,128 +0,0 @@
-r"""
-The ``codes`` object defines a mapping from common names for HTTP statuses
-to their numerical codes, accessible either as attributes or as dictionary
-items.
-
-Example::
-
-    >>> import requests
-    >>> requests.codes['temporary_redirect']
-    307
-    >>> requests.codes.teapot
-    418
-    >>> requests.codes['\o/']
-    200
-
-Some codes have multiple names, and both upper- and lower-case versions of
-the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
-``codes.okay`` all correspond to the HTTP status code 200.
-"""
-
-from .structures import LookupDict
-
-_codes = {
-    # Informational.
-    100: ("continue",),
-    101: ("switching_protocols",),
-    102: ("processing",),
-    103: ("checkpoint",),
-    122: ("uri_too_long", "request_uri_too_long"),
-    200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"),
-    201: ("created",),
-    202: ("accepted",),
-    203: ("non_authoritative_info", "non_authoritative_information"),
-    204: ("no_content",),
-    205: ("reset_content", "reset"),
-    206: ("partial_content", "partial"),
-    207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"),
-    208: ("already_reported",),
-    226: ("im_used",),
-    # Redirection.
-    300: ("multiple_choices",),
-    301: ("moved_permanently", "moved", "\\o-"),
-    302: ("found",),
-    303: ("see_other", "other"),
-    304: ("not_modified",),
-    305: ("use_proxy",),
-    306: ("switch_proxy",),
-    307: ("temporary_redirect", "temporary_moved", "temporary"),
-    308: (
-        "permanent_redirect",
-        "resume_incomplete",
-        "resume",
-    ),  # "resume" and "resume_incomplete" to be removed in 3.0
-    # Client Error.
-    400: ("bad_request", "bad"),
-    401: ("unauthorized",),
-    402: ("payment_required", "payment"),
-    403: ("forbidden",),
-    404: ("not_found", "-o-"),
-    405: ("method_not_allowed", "not_allowed"),
-    406: ("not_acceptable",),
-    407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"),
-    408: ("request_timeout", "timeout"),
-    409: ("conflict",),
-    410: ("gone",),
-    411: ("length_required",),
-    412: ("precondition_failed", "precondition"),
-    413: ("request_entity_too_large",),
-    414: ("request_uri_too_large",),
-    415: ("unsupported_media_type", "unsupported_media", "media_type"),
-    416: (
-        "requested_range_not_satisfiable",
-        "requested_range",
-        "range_not_satisfiable",
-    ),
-    417: ("expectation_failed",),
-    418: ("im_a_teapot", "teapot", "i_am_a_teapot"),
-    421: ("misdirected_request",),
-    422: ("unprocessable_entity", "unprocessable"),
-    423: ("locked",),
-    424: ("failed_dependency", "dependency"),
-    425: ("unordered_collection", "unordered"),
-    426: ("upgrade_required", "upgrade"),
-    428: ("precondition_required", "precondition"),
-    429: ("too_many_requests", "too_many"),
-    431: ("header_fields_too_large", "fields_too_large"),
-    444: ("no_response", "none"),
-    449: ("retry_with", "retry"),
-    450: ("blocked_by_windows_parental_controls", "parental_controls"),
-    451: ("unavailable_for_legal_reasons", "legal_reasons"),
-    499: ("client_closed_request",),
-    # Server Error.
-    500: ("internal_server_error", "server_error", "/o\\", "✗"),
-    501: ("not_implemented",),
-    502: ("bad_gateway",),
-    503: ("service_unavailable", "unavailable"),
-    504: ("gateway_timeout",),
-    505: ("http_version_not_supported", "http_version"),
-    506: ("variant_also_negotiates",),
-    507: ("insufficient_storage",),
-    509: ("bandwidth_limit_exceeded", "bandwidth"),
-    510: ("not_extended",),
-    511: ("network_authentication_required", "network_auth", "network_authentication"),
-}
-
-codes = LookupDict(name="status_codes")
-
-
-def _init():
-    for code, titles in _codes.items():
-        for title in titles:
-            setattr(codes, title, code)
-            if not title.startswith(("\\", "/")):
-                setattr(codes, title.upper(), code)
-
-    def doc(code):
-        names = ", ".join(f"``{n}``" for n in _codes[code])
-        return "* %d: %s" % (code, names)
-
-    global __doc__
-    __doc__ = (
-        __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes))
-        if __doc__ is not None
-        else None
-    )
-
-
-_init()
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/filepost.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/filepost.py
deleted file mode 100644
index 36c9252c647e67bc7353c523152568b993c1331f..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/filepost.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from __future__ import absolute_import
-
-import binascii
-import codecs
-import os
-from io import BytesIO
-
-from .fields import RequestField
-from .packages import six
-from .packages.six import b
-
-writer = codecs.lookup("utf-8")[3]
-
-
-def choose_boundary():
-    """
-    Our embarrassingly-simple replacement for mimetools.choose_boundary.
-    """
-    boundary = binascii.hexlify(os.urandom(16))
-    if not six.PY2:
-        boundary = boundary.decode("ascii")
-    return boundary
-
-
-def iter_field_objects(fields):
-    """
-    Iterate over fields.
-
-    Supports list of (k, v) tuples and dicts, and lists of
-    :class:`~urllib3.fields.RequestField`.
-
-    """
-    if isinstance(fields, dict):
-        i = six.iteritems(fields)
-    else:
-        i = iter(fields)
-
-    for field in i:
-        if isinstance(field, RequestField):
-            yield field
-        else:
-            yield RequestField.from_tuples(*field)
-
-
-def iter_fields(fields):
-    """
-    .. deprecated:: 1.6
-
-    Iterate over fields.
-
-    The addition of :class:`~urllib3.fields.RequestField` makes this function
-    obsolete. Instead, use :func:`iter_field_objects`, which returns
-    :class:`~urllib3.fields.RequestField` objects.
-
-    Supports list of (k, v) tuples and dicts.
-    """
-    if isinstance(fields, dict):
-        return ((k, v) for k, v in six.iteritems(fields))
-
-    return ((k, v) for k, v in fields)
-
-
-def encode_multipart_formdata(fields, boundary=None):
-    """
-    Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
-
-    :param fields:
-        Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
-
-    :param boundary:
-        If not specified, then a random boundary will be generated using
-        :func:`urllib3.filepost.choose_boundary`.
-    """
-    body = BytesIO()
-    if boundary is None:
-        boundary = choose_boundary()
-
-    for field in iter_field_objects(fields):
-        body.write(b("--%s\r\n" % (boundary)))
-
-        writer(body).write(field.render_headers())
-        data = field.data
-
-        if isinstance(data, int):
-            data = str(data)  # Backwards compatibility
-
-        if isinstance(data, six.text_type):
-            writer(body).write(data)
-        else:
-            body.write(data)
-
-        body.write(b"\r\n")
-
-    body.write(b("--%s--\r\n" % (boundary)))
-
-    content_type = str("multipart/form-data; boundary=%s" % boundary)
-
-    return body.getvalue(), content_type
diff --git a/spaces/prof-freakenstein/anurag-bit-Ai-avatar-Generator/app.py b/spaces/prof-freakenstein/anurag-bit-Ai-avatar-Generator/app.py
deleted file mode 100644
index af53bbfe930321a80d99dc01924514d204c3e07f..0000000000000000000000000000000000000000
--- a/spaces/prof-freakenstein/anurag-bit-Ai-avatar-Generator/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/anurag-bit/Ai-avatar-Generator").launch()
\ No newline at end of file
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Example-cb68aa64.css b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Example-cb68aa64.css
deleted file mode 100644
index 6d7fa6f62af721fffb7f3366cc916cbe2c2b6113..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Example-cb68aa64.css
+++ /dev/null
@@ -1 +0,0 @@
-img.svelte-2xi6dn{max-width:100%;max-height:100%;border-radius:var(--radius-lg);max-width:none}.container.selected.svelte-5cqjmr{border-color:var(--border-color-accent)}.container.table.svelte-5cqjmr{margin:0 auto;border:2px solid var(--border-color-primary);border-radius:var(--radius-lg);width:var(--size-20);height:var(--size-20);object-fit:cover}.container.gallery.svelte-5cqjmr{border:2px solid var(--border-color-primary);height:var(--size-20);max-height:var(--size-20);object-fit:cover}
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Example-1fe376d1.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Example-1fe376d1.js
deleted file mode 100644
index af8acc82ce44226fa2066538b03e6719ac0d4227..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Example-1fe376d1.js
+++ /dev/null
@@ -1,2 +0,0 @@
-const{SvelteComponent:c,append:u,attr:d,detach:g,element:o,init:v,insert:r,noop:f,safe_not_equal:y,set_data:m,text:b,toggle_class:i}=window.__gradio__svelte__internal;function h(a){let e,n;return{c(){e=o("div"),n=b(a[0]),d(e,"class","svelte-1ayixqk"),i(e,"table",a[1]==="table"),i(e,"gallery",a[1]==="gallery"),i(e,"selected",a[2])},m(t,l){r(t,e,l),u(e,n)},p(t,[l]){l&1&&m(n,t[0]),l&2&&i(e,"table",t[1]==="table"),l&2&&i(e,"gallery",t[1]==="gallery"),l&4&&i(e,"selected",t[2])},i:f,o:f,d(t){t&&g(e)}}}function q(a,e,n){let{value:t}=e,{type:l}=e,{selected:_=!1}=e;return a.$$set=s=>{"value"in s&&n(0,t=s.value),"type"in s&&n(1,l=s.type),"selected"in s&&n(2,_=s.selected)},[t,l,_]}class w extends c{constructor(e){super(),v(this,e,q,h,y,{value:0,type:1,selected:2})}}export{w as default};
-//# sourceMappingURL=Example-1fe376d1.js.map
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_matplotlib.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_matplotlib.py
deleted file mode 100644
index ac1c3455c3d9954512a6f0425e4dfc73b3595456..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_matplotlib.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import os
-import subprocess
-import sys
-
-import pytest
-
-import matplotlib
-
-
-@pytest.mark.parametrize('version_str, version_tuple', [
-    ('3.5.0', (3, 5, 0, 'final', 0)),
-    ('3.5.0rc2', (3, 5, 0, 'candidate', 2)),
-    ('3.5.0.dev820+g6768ef8c4c', (3, 5, 0, 'alpha', 820)),
-    ('3.5.0.post820+g6768ef8c4c', (3, 5, 1, 'alpha', 820)),
-])
-def test_parse_to_version_info(version_str, version_tuple):
-    assert matplotlib._parse_to_version_info(version_str) == version_tuple
-
-
-@pytest.mark.skipif(sys.platform == "win32",
-                    reason="chmod() doesn't work as is on Windows")
-@pytest.mark.skipif(sys.platform != "win32" and os.geteuid() == 0,
-                    reason="chmod() doesn't work as root")
-def test_tmpconfigdir_warning(tmpdir):
-    """Test that a warning is emitted if a temporary configdir must be used."""
-    mode = os.stat(tmpdir).st_mode
-    try:
-        os.chmod(tmpdir, 0)
-        proc = subprocess.run(
-            [sys.executable, "-c", "import matplotlib"],
-            env={**os.environ, "MPLCONFIGDIR": str(tmpdir)},
-            stderr=subprocess.PIPE, text=True, check=True)
-        assert "set the MPLCONFIGDIR" in proc.stderr
-    finally:
-        os.chmod(tmpdir, mode)
-
-
-def test_importable_with_no_home(tmpdir):
-    subprocess.run(
-        [sys.executable, "-c",
-         "import pathlib; pathlib.Path.home = lambda *args: 1/0; "
-         "import matplotlib.pyplot"],
-        env={**os.environ, "MPLCONFIGDIR": str(tmpdir)}, check=True)
-
-
-def test_use_doc_standard_backends():
-    """
-    Test that the standard backends mentioned in the docstring of
-    matplotlib.use() are the same as in matplotlib.rcsetup.
-    """
-    def parse(key):
-        backends = []
-        for line in matplotlib.use.__doc__.split(key)[1].split('\n'):
-            if not line.strip():
-                break
-            backends += [e.strip() for e in line.split(',') if e]
-        return backends
-
-    assert (set(parse('- interactive backends:\n')) ==
-            set(matplotlib.rcsetup.interactive_bk))
-    assert (set(parse('- non-interactive backends:\n')) ==
-            set(matplotlib.rcsetup.non_interactive_bk))
-
-
-def test_importable_with__OO():
-    """
-    When using -OO or export PYTHONOPTIMIZE=2, docstrings are discarded,
-    this simple test may prevent something like issue #17970.
-    """
-    program = (
-        "import matplotlib as mpl; "
-        "import matplotlib.pyplot as plt; "
-        "import matplotlib.cbook as cbook; "
-        "import matplotlib.patches as mpatches"
-    )
-    cmd = [sys.executable, "-OO", "-c", program]
-    assert subprocess.call(cmd, env={**os.environ, "MPLBACKEND": ""}) == 0
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/defchararray.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/defchararray.py
deleted file mode 100644
index 11c5a30bff70ef4edfb9fc0dd616af9d99d9da39..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/defchararray.py
+++ /dev/null
@@ -1,2914 +0,0 @@
-"""
-This module contains a set of functions for vectorized string
-operations and methods.
-
-.. note::
-   The `chararray` class exists for backwards compatibility with
-   Numarray, it is not recommended for new development. Starting from numpy
-   1.4, if one needs arrays of strings, it is recommended to use arrays of
-   `dtype` `object_`, `bytes_` or `str_`, and use the free functions
-   in the `numpy.char` module for fast vectorized string operations.
-
-Some methods will only be available if the corresponding string method is
-available in your version of Python.
-
-The preferred alias for `defchararray` is `numpy.char`.
-
-"""
-import functools
-
-from .._utils import set_module
-from .numerictypes import (
-    bytes_, str_, integer, int_, object_, bool_, character)
-from .numeric import ndarray, compare_chararrays
-from .numeric import array as narray
-from numpy.core.multiarray import _vec_string
-from numpy.core import overrides
-from numpy.compat import asbytes
-import numpy
-
-__all__ = [
-    'equal', 'not_equal', 'greater_equal', 'less_equal',
-    'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
-    'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
-    'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
-    'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
-    'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
-    'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
-    'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
-    'array', 'asarray'
-    ]
-
-
-_globalvar = 0
-
-array_function_dispatch = functools.partial(
-    overrides.array_function_dispatch, module='numpy.char')
-
-
-def _is_unicode(arr):
-    """Returns True if arr is a string or a string array with a dtype that
-    represents a unicode string, otherwise returns False.
-
-    """
-    if (isinstance(arr, str) or
-            issubclass(numpy.asarray(arr).dtype.type, str)):
-        return True
-    return False
-
-
-def _to_bytes_or_str_array(result, output_dtype_like=None):
-    """
-    Helper function to cast a result back into an array
-    with the appropriate dtype if an object array must be used
-    as an intermediary.
-    """
-    ret = numpy.asarray(result.tolist())
-    dtype = getattr(output_dtype_like, 'dtype', None)
-    if dtype is not None:
-        return ret.astype(type(dtype)(_get_num_chars(ret)), copy=False)
-    return ret
-
-
-def _clean_args(*args):
-    """
-    Helper function for delegating arguments to Python string
-    functions.
-
-    Many of the Python string operations that have optional arguments
-    do not use 'None' to indicate a default value.  In these cases,
-    we need to remove all None arguments, and those following them.
-    """
-    newargs = []
-    for chk in args:
-        if chk is None:
-            break
-        newargs.append(chk)
-    return newargs
-
-def _get_num_chars(a):
-    """
-    Helper function that returns the number of characters per field in
-    a string or unicode array.  This is to abstract out the fact that
-    for a unicode array this is itemsize / 4.
-    """
-    if issubclass(a.dtype.type, str_):
-        return a.itemsize // 4
-    return a.itemsize
-
-
-def _binary_op_dispatcher(x1, x2):
-    return (x1, x2)
-
-
-@array_function_dispatch(_binary_op_dispatcher)
-def equal(x1, x2):
-    """
-    Return (x1 == x2) element-wise.
-
-    Unlike `numpy.equal`, this comparison is performed by first
-    stripping whitespace characters from the end of the string.  This
-    behavior is provided for backward-compatibility with numarray.
-
-    Parameters
-    ----------
-    x1, x2 : array_like of str or unicode
-        Input arrays of the same shape.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of bools.
-
-    See Also
-    --------
-    not_equal, greater_equal, less_equal, greater, less
-    """
-    return compare_chararrays(x1, x2, '==', True)
-
-
-@array_function_dispatch(_binary_op_dispatcher)
-def not_equal(x1, x2):
-    """
-    Return (x1 != x2) element-wise.
-
-    Unlike `numpy.not_equal`, this comparison is performed by first
-    stripping whitespace characters from the end of the string.  This
-    behavior is provided for backward-compatibility with numarray.
-
-    Parameters
-    ----------
-    x1, x2 : array_like of str or unicode
-        Input arrays of the same shape.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of bools.
-
-    See Also
-    --------
-    equal, greater_equal, less_equal, greater, less
-    """
-    return compare_chararrays(x1, x2, '!=', True)
-
-
-@array_function_dispatch(_binary_op_dispatcher)
-def greater_equal(x1, x2):
-    """
-    Return (x1 >= x2) element-wise.
-
-    Unlike `numpy.greater_equal`, this comparison is performed by
-    first stripping whitespace characters from the end of the string.
-    This behavior is provided for backward-compatibility with
-    numarray.
-
-    Parameters
-    ----------
-    x1, x2 : array_like of str or unicode
-        Input arrays of the same shape.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of bools.
-
-    See Also
-    --------
-    equal, not_equal, less_equal, greater, less
-    """
-    return compare_chararrays(x1, x2, '>=', True)
-
-
-@array_function_dispatch(_binary_op_dispatcher)
-def less_equal(x1, x2):
-    """
-    Return (x1 <= x2) element-wise.
-
-    Unlike `numpy.less_equal`, this comparison is performed by first
-    stripping whitespace characters from the end of the string.  This
-    behavior is provided for backward-compatibility with numarray.
-
-    Parameters
-    ----------
-    x1, x2 : array_like of str or unicode
-        Input arrays of the same shape.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of bools.
-
-    See Also
-    --------
-    equal, not_equal, greater_equal, greater, less
-    """
-    return compare_chararrays(x1, x2, '<=', True)
-
-
-@array_function_dispatch(_binary_op_dispatcher)
-def greater(x1, x2):
-    """
-    Return (x1 > x2) element-wise.
-
-    Unlike `numpy.greater`, this comparison is performed by first
-    stripping whitespace characters from the end of the string.  This
-    behavior is provided for backward-compatibility with numarray.
-
-    Parameters
-    ----------
-    x1, x2 : array_like of str or unicode
-        Input arrays of the same shape.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of bools.
-
-    See Also
-    --------
-    equal, not_equal, greater_equal, less_equal, less
-    """
-    return compare_chararrays(x1, x2, '>', True)
-
-
-@array_function_dispatch(_binary_op_dispatcher)
-def less(x1, x2):
-    """
-    Return (x1 < x2) element-wise.
-
-    Unlike `numpy.greater`, this comparison is performed by first
-    stripping whitespace characters from the end of the string.  This
-    behavior is provided for backward-compatibility with numarray.
-
-    Parameters
-    ----------
-    x1, x2 : array_like of str or unicode
-        Input arrays of the same shape.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of bools.
-
-    See Also
-    --------
-    equal, not_equal, greater_equal, less_equal, greater
-    """
-    return compare_chararrays(x1, x2, '<', True)
-
-
-def _unary_op_dispatcher(a):
-    return (a,)
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def str_len(a):
-    """
-    Return len(a) element-wise.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    Returns
-    -------
-    out : ndarray
-        Output array of integers
-
-    See Also
-    --------
-    len
-
-    Examples
-    --------
-    >>> a = np.array(['Grace Hopper Conference', 'Open Source Day'])
-    >>> np.char.str_len(a)
-    array([23, 15])
-    >>> a = np.array([u'\u0420', u'\u043e'])
-    >>> np.char.str_len(a)
-    array([1, 1])
-    >>> a = np.array([['hello', 'world'], [u'\u0420', u'\u043e']])
-    >>> np.char.str_len(a)
-    array([[5, 5], [1, 1]])
-    """
-    # Note: __len__, etc. currently return ints, which are not C-integers.
-    # Generally intp would be expected for lengths, although int is sufficient
-    # due to the dtype itemsize limitation.
-    return _vec_string(a, int_, '__len__')
-
-
-@array_function_dispatch(_binary_op_dispatcher)
-def add(x1, x2):
-    """
-    Return element-wise string concatenation for two arrays of str or unicode.
-
-    Arrays `x1` and `x2` must have the same shape.
-
-    Parameters
-    ----------
-    x1 : array_like of str or unicode
-        Input array.
-    x2 : array_like of str or unicode
-        Input array.
-
-    Returns
-    -------
-    add : ndarray
-        Output array of `bytes_` or `str_`, depending on input types
-        of the same shape as `x1` and `x2`.
-
-    """
-    arr1 = numpy.asarray(x1)
-    arr2 = numpy.asarray(x2)
-    out_size = _get_num_chars(arr1) + _get_num_chars(arr2)
-
-    if type(arr1.dtype) != type(arr2.dtype):
-        # Enforce this for now.  The solution to it will be implement add
-        # as a ufunc.  It never worked right on Python 3: bytes + unicode gave
-        # nonsense unicode + bytes errored, and unicode + object used the
-        # object dtype itemsize as num chars (worked on short strings).
-        # bytes + void worked but promoting void->bytes is dubious also.
-        raise TypeError(
-            "np.char.add() requires both arrays of the same dtype kind, but "
-            f"got dtypes: '{arr1.dtype}' and '{arr2.dtype}' (the few cases "
-            "where this used to work often lead to incorrect results).")
-
-    return _vec_string(arr1, type(arr1.dtype)(out_size), '__add__', (arr2,))
-
-def _multiply_dispatcher(a, i):
-    return (a,)
-
-
-@array_function_dispatch(_multiply_dispatcher)
-def multiply(a, i):
-    """
-    Return (a * i), that is string multiple concatenation,
-    element-wise.
-
-    Values in `i` of less than 0 are treated as 0 (which yields an
-    empty string).
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    i : array_like of ints
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input types
-    
-    Examples
-    --------
-    >>> a = np.array(["a", "b", "c"])
-    >>> np.char.multiply(x, 3)
-    array(['aaa', 'bbb', 'ccc'], dtype='<U3')
-    >>> i = np.array([1, 2, 3])
-    >>> np.char.multiply(a, i)
-    array(['a', 'bb', 'ccc'], dtype='<U3')
-    >>> np.char.multiply(np.array(['a']), i)
-    array(['a', 'aa', 'aaa'], dtype='<U3')
-    >>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3))
-    >>> np.char.multiply(a, 3)
-    array([['aaa', 'bbb', 'ccc'],
-           ['ddd', 'eee', 'fff']], dtype='<U3')
-    >>> np.char.multiply(a, i)
-    array([['a', 'bb', 'ccc'],
-           ['d', 'ee', 'fff']], dtype='<U3')
-    """
-    a_arr = numpy.asarray(a)
-    i_arr = numpy.asarray(i)
-    if not issubclass(i_arr.dtype.type, integer):
-        raise ValueError("Can only multiply by integers")
-    out_size = _get_num_chars(a_arr) * max(int(i_arr.max()), 0)
-    return _vec_string(
-        a_arr, type(a_arr.dtype)(out_size), '__mul__', (i_arr,))
-
-
-def _mod_dispatcher(a, values):
-    return (a, values)
-
-
-@array_function_dispatch(_mod_dispatcher)
-def mod(a, values):
-    """
-    Return (a % i), that is pre-Python 2.6 string formatting
-    (interpolation), element-wise for a pair of array_likes of str
-    or unicode.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    values : array_like of values
-       These values will be element-wise interpolated into the string.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input types
-
-    See Also
-    --------
-    str.__mod__
-
-    """
-    return _to_bytes_or_str_array(
-        _vec_string(a, object_, '__mod__', (values,)), a)
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def capitalize(a):
-    """
-    Return a copy of `a` with only the first character of each element
-    capitalized.
-
-    Calls `str.capitalize` element-wise.
-
-    For 8-bit strings, this method is locale-dependent.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-        Input array of strings to capitalize.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input
-        types
-
-    See Also
-    --------
-    str.capitalize
-
-    Examples
-    --------
-    >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c
-    array(['a1b2', '1b2a', 'b2a1', '2a1b'],
-        dtype='|S4')
-    >>> np.char.capitalize(c)
-    array(['A1b2', '1b2a', 'B2a1', '2a1b'],
-        dtype='|S4')
-
-    """
-    a_arr = numpy.asarray(a)
-    return _vec_string(a_arr, a_arr.dtype, 'capitalize')
-
-
-def _center_dispatcher(a, width, fillchar=None):
-    return (a,)
-
-
-@array_function_dispatch(_center_dispatcher)
-def center(a, width, fillchar=' '):
-    """
-    Return a copy of `a` with its elements centered in a string of
-    length `width`.
-
-    Calls `str.center` element-wise.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    width : int
-        The length of the resulting strings
-    fillchar : str or unicode, optional
-        The padding character to use (default is space).
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input
-        types
-
-    See Also
-    --------
-    str.center
-    
-    Notes
-    -----
-    This function is intended to work with arrays of strings.  The
-    fill character is not applied to numeric types.
-
-    Examples
-    --------
-    >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c
-    array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='<U4')
-    >>> np.char.center(c, width=9)
-    array(['   a1b2  ', '   1b2a  ', '   b2a1  ', '   2a1b  '], dtype='<U9')
-    >>> np.char.center(c, width=9, fillchar='*')
-    array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='<U9')
-    >>> np.char.center(c, width=1)
-    array(['a', '1', 'b', '2'], dtype='<U1')
-
-    """
-    a_arr = numpy.asarray(a)
-    width_arr = numpy.asarray(width)
-    size = int(numpy.max(width_arr.flat))
-    if numpy.issubdtype(a_arr.dtype, numpy.bytes_):
-        fillchar = asbytes(fillchar)
-    return _vec_string(
-        a_arr, type(a_arr.dtype)(size), 'center', (width_arr, fillchar))
-
-
-def _count_dispatcher(a, sub, start=None, end=None):
-    return (a,)
-
-
-@array_function_dispatch(_count_dispatcher)
-def count(a, sub, start=0, end=None):
-    """
-    Returns an array with the number of non-overlapping occurrences of
-    substring `sub` in the range [`start`, `end`].
-
-    Calls `str.count` element-wise.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    sub : str or unicode
-       The substring to search for.
-
-    start, end : int, optional
-       Optional arguments `start` and `end` are interpreted as slice
-       notation to specify the range in which to count.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of ints.
-
-    See Also
-    --------
-    str.count
-
-    Examples
-    --------
-    >>> c = np.array(['aAaAaA', '  aA  ', 'abBABba'])
-    >>> c
-    array(['aAaAaA', '  aA  ', 'abBABba'], dtype='<U7')
-    >>> np.char.count(c, 'A')
-    array([3, 1, 1])
-    >>> np.char.count(c, 'aA')
-    array([3, 1, 0])
-    >>> np.char.count(c, 'A', start=1, end=4)
-    array([2, 1, 1])
-    >>> np.char.count(c, 'A', start=1, end=3)
-    array([1, 0, 0])
-
-    """
-    return _vec_string(a, int_, 'count', [sub, start] + _clean_args(end))
-
-
-def _code_dispatcher(a, encoding=None, errors=None):
-    return (a,)
-
-
-@array_function_dispatch(_code_dispatcher)
-def decode(a, encoding=None, errors=None):
-    r"""
-    Calls ``bytes.decode`` element-wise.
-
-    The set of available codecs comes from the Python standard library,
-    and may be extended at runtime.  For more information, see the
-    :mod:`codecs` module.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    encoding : str, optional
-       The name of an encoding
-
-    errors : str, optional
-       Specifies how to handle encoding errors
-
-    Returns
-    -------
-    out : ndarray
-
-    See Also
-    --------
-    :py:meth:`bytes.decode`
-
-    Notes
-    -----
-    The type of the result will depend on the encoding specified.
-
-    Examples
-    --------
-    >>> c = np.array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@',
-    ...               b'\x81\x82\xc2\xc1\xc2\x82\x81'])
-    >>> c
-    array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@',
-    ...    b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7')
-    >>> np.char.decode(c, encoding='cp037')
-    array(['aAaAaA', '  aA  ', 'abBABba'], dtype='<U7')
-
-    """
-    return _to_bytes_or_str_array(
-        _vec_string(a, object_, 'decode', _clean_args(encoding, errors)))
-
-
-@array_function_dispatch(_code_dispatcher)
-def encode(a, encoding=None, errors=None):
-    """
-    Calls `str.encode` element-wise.
-
-    The set of available codecs comes from the Python standard library,
-    and may be extended at runtime. For more information, see the codecs
-    module.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    encoding : str, optional
-       The name of an encoding
-
-    errors : str, optional
-       Specifies how to handle encoding errors
-
-    Returns
-    -------
-    out : ndarray
-
-    See Also
-    --------
-    str.encode
-
-    Notes
-    -----
-    The type of the result will depend on the encoding specified.
-
-    """
-    return _to_bytes_or_str_array(
-        _vec_string(a, object_, 'encode', _clean_args(encoding, errors)))
-
-
-def _endswith_dispatcher(a, suffix, start=None, end=None):
-    return (a,)
-
-
-@array_function_dispatch(_endswith_dispatcher)
-def endswith(a, suffix, start=0, end=None):
-    """
-    Returns a boolean array which is `True` where the string element
-    in `a` ends with `suffix`, otherwise `False`.
-
-    Calls `str.endswith` element-wise.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    suffix : str
-
-    start, end : int, optional
-        With optional `start`, test beginning at that position. With
-        optional `end`, stop comparing at that position.
-
-    Returns
-    -------
-    out : ndarray
-        Outputs an array of bools.
-
-    See Also
-    --------
-    str.endswith
-
-    Examples
-    --------
-    >>> s = np.array(['foo', 'bar'])
-    >>> s[0] = 'foo'
-    >>> s[1] = 'bar'
-    >>> s
-    array(['foo', 'bar'], dtype='<U3')
-    >>> np.char.endswith(s, 'ar')
-    array([False,  True])
-    >>> np.char.endswith(s, 'a', start=1, end=2)
-    array([False,  True])
-
-    """
-    return _vec_string(
-        a, bool_, 'endswith', [suffix, start] + _clean_args(end))
-
-
-def _expandtabs_dispatcher(a, tabsize=None):
-    return (a,)
-
-
-@array_function_dispatch(_expandtabs_dispatcher)
-def expandtabs(a, tabsize=8):
-    """
-    Return a copy of each string element where all tab characters are
-    replaced by one or more spaces.
-
-    Calls `str.expandtabs` element-wise.
-
-    Return a copy of each string element where all tab characters are
-    replaced by one or more spaces, depending on the current column
-    and the given `tabsize`. The column number is reset to zero after
-    each newline occurring in the string. This doesn't understand other
-    non-printing characters or escape sequences.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-        Input array
-    tabsize : int, optional
-        Replace tabs with `tabsize` number of spaces.  If not given defaults
-        to 8 spaces.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.expandtabs
-
-    """
-    return _to_bytes_or_str_array(
-        _vec_string(a, object_, 'expandtabs', (tabsize,)), a)
-
-
-@array_function_dispatch(_count_dispatcher)
-def find(a, sub, start=0, end=None):
-    """
-    For each element, return the lowest index in the string where
-    substring `sub` is found.
-
-    Calls `str.find` element-wise.
-
-    For each element, return the lowest index in the string where
-    substring `sub` is found, such that `sub` is contained in the
-    range [`start`, `end`].
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    sub : str or unicode
-
-    start, end : int, optional
-        Optional arguments `start` and `end` are interpreted as in
-        slice notation.
-
-    Returns
-    -------
-    out : ndarray or int
-        Output array of ints.  Returns -1 if `sub` is not found.
-
-    See Also
-    --------
-    str.find
-
-    Examples
-    --------
-    >>> a = np.array(["NumPy is a Python library"])
-    >>> np.char.find(a, "Python", start=0, end=None)
-    array([11])
-
-    """
-    return _vec_string(
-        a, int_, 'find', [sub, start] + _clean_args(end))
-
-
-@array_function_dispatch(_count_dispatcher)
-def index(a, sub, start=0, end=None):
-    """
-    Like `find`, but raises `ValueError` when the substring is not found.
-
-    Calls `str.index` element-wise.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    sub : str or unicode
-
-    start, end : int, optional
-
-    Returns
-    -------
-    out : ndarray
-        Output array of ints.  Returns -1 if `sub` is not found.
-
-    See Also
-    --------
-    find, str.find
-
-    Examples
-    --------
-    >>> a = np.array(["Computer Science"])
-    >>> np.char.index(a, "Science", start=0, end=None)
-    array([9])
-
-    """
-    return _vec_string(
-        a, int_, 'index', [sub, start] + _clean_args(end))
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def isalnum(a):
-    """
-    Returns true for each element if all characters in the string are
-    alphanumeric and there is at least one character, false otherwise.
-
-    Calls `str.isalnum` element-wise.
-
-    For 8-bit strings, this method is locale-dependent.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.isalnum
-    """
-    return _vec_string(a, bool_, 'isalnum')
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def isalpha(a):
-    """
-    Returns true for each element if all characters in the string are
-    alphabetic and there is at least one character, false otherwise.
-
-    Calls `str.isalpha` element-wise.
-
-    For 8-bit strings, this method is locale-dependent.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    Returns
-    -------
-    out : ndarray
-        Output array of bools
-
-    See Also
-    --------
-    str.isalpha
-    """
-    return _vec_string(a, bool_, 'isalpha')
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def isdigit(a):
-    """
-    Returns true for each element if all characters in the string are
-    digits and there is at least one character, false otherwise.
-
-    Calls `str.isdigit` element-wise.
-
-    For 8-bit strings, this method is locale-dependent.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    Returns
-    -------
-    out : ndarray
-        Output array of bools
-
-    See Also
-    --------
-    str.isdigit
-
-    Examples
-    --------
-    >>> a = np.array(['a', 'b', '0'])
-    >>> np.char.isdigit(a)
-    array([False, False,  True])
-    >>> a = np.array([['a', 'b', '0'], ['c', '1', '2']])
-    >>> np.char.isdigit(a)
-    array([[False, False,  True], [False,  True,  True]])
-    """
-    return _vec_string(a, bool_, 'isdigit')
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def islower(a):
-    """
-    Returns true for each element if all cased characters in the
-    string are lowercase and there is at least one cased character,
-    false otherwise.
-
-    Calls `str.islower` element-wise.
-
-    For 8-bit strings, this method is locale-dependent.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    Returns
-    -------
-    out : ndarray
-        Output array of bools
-
-    See Also
-    --------
-    str.islower
-    """
-    return _vec_string(a, bool_, 'islower')
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def isspace(a):
-    """
-    Returns true for each element if there are only whitespace
-    characters in the string and there is at least one character,
-    false otherwise.
-
-    Calls `str.isspace` element-wise.
-
-    For 8-bit strings, this method is locale-dependent.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    Returns
-    -------
-    out : ndarray
-        Output array of bools
-
-    See Also
-    --------
-    str.isspace
-    """
-    return _vec_string(a, bool_, 'isspace')
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def istitle(a):
-    """
-    Returns true for each element if the element is a titlecased
-    string and there is at least one character, false otherwise.
-
-    Call `str.istitle` element-wise.
-
-    For 8-bit strings, this method is locale-dependent.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    Returns
-    -------
-    out : ndarray
-        Output array of bools
-
-    See Also
-    --------
-    str.istitle
-    """
-    return _vec_string(a, bool_, 'istitle')
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def isupper(a):
-    """
-    Return true for each element if all cased characters in the
-    string are uppercase and there is at least one character, false
-    otherwise.
-
-    Call `str.isupper` element-wise.
-
-    For 8-bit strings, this method is locale-dependent.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    Returns
-    -------
-    out : ndarray
-        Output array of bools
-
-    See Also
-    --------
-    str.isupper
-
-    Examples
-    --------
-    >>> str = "GHC"
-    >>> np.char.isupper(str)
-    array(True)     
-    >>> a = np.array(["hello", "HELLO", "Hello"])
-    >>> np.char.isupper(a)
-    array([False,  True, False]) 
-
-    """
-    return _vec_string(a, bool_, 'isupper')
-
-
-def _join_dispatcher(sep, seq):
-    return (sep, seq)
-
-
-@array_function_dispatch(_join_dispatcher)
-def join(sep, seq):
-    """
-    Return a string which is the concatenation of the strings in the
-    sequence `seq`.
-
-    Calls `str.join` element-wise.
-
-    Parameters
-    ----------
-    sep : array_like of str or unicode
-    seq : array_like of str or unicode
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input types
-
-    See Also
-    --------
-    str.join
-
-    Examples
-    --------
-    >>> np.char.join('-', 'osd')
-    array('o-s-d', dtype='<U5')
-
-    >>> np.char.join(['-', '.'], ['ghc', 'osd'])
-    array(['g-h-c', 'o.s.d'], dtype='<U5')
-
-    """
-    return _to_bytes_or_str_array(
-        _vec_string(sep, object_, 'join', (seq,)), seq)
-
-
-
-def _just_dispatcher(a, width, fillchar=None):
-    return (a,)
-
-
-@array_function_dispatch(_just_dispatcher)
-def ljust(a, width, fillchar=' '):
-    """
-    Return an array with the elements of `a` left-justified in a
-    string of length `width`.
-
-    Calls `str.ljust` element-wise.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    width : int
-        The length of the resulting strings
-    fillchar : str or unicode, optional
-        The character to use for padding
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.ljust
-
-    """
-    a_arr = numpy.asarray(a)
-    width_arr = numpy.asarray(width)
-    size = int(numpy.max(width_arr.flat))
-    if numpy.issubdtype(a_arr.dtype, numpy.bytes_):
-        fillchar = asbytes(fillchar)
-    return _vec_string(
-        a_arr, type(a_arr.dtype)(size), 'ljust', (width_arr, fillchar))
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def lower(a):
-    """
-    Return an array with the elements converted to lowercase.
-
-    Call `str.lower` element-wise.
-
-    For 8-bit strings, this method is locale-dependent.
-
-    Parameters
-    ----------
-    a : array_like, {str, unicode}
-        Input array.
-
-    Returns
-    -------
-    out : ndarray, {str, unicode}
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.lower
-
-    Examples
-    --------
-    >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c
-    array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')
-    >>> np.char.lower(c)
-    array(['a1b c', '1bca', 'bca1'], dtype='<U5')
-
-    """
-    a_arr = numpy.asarray(a)
-    return _vec_string(a_arr, a_arr.dtype, 'lower')
-
-
-def _strip_dispatcher(a, chars=None):
-    return (a,)
-
-
-@array_function_dispatch(_strip_dispatcher)
-def lstrip(a, chars=None):
-    """
-    For each element in `a`, return a copy with the leading characters
-    removed.
-
-    Calls `str.lstrip` element-wise.
-
-    Parameters
-    ----------
-    a : array-like, {str, unicode}
-        Input array.
-
-    chars : {str, unicode}, optional
-        The `chars` argument is a string specifying the set of
-        characters to be removed. If omitted or None, the `chars`
-        argument defaults to removing whitespace. The `chars` argument
-        is not a prefix; rather, all combinations of its values are
-        stripped.
-
-    Returns
-    -------
-    out : ndarray, {str, unicode}
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.lstrip
-
-    Examples
-    --------
-    >>> c = np.array(['aAaAaA', '  aA  ', 'abBABba'])
-    >>> c
-    array(['aAaAaA', '  aA  ', 'abBABba'], dtype='<U7')
-
-    The 'a' variable is unstripped from c[1] because whitespace leading.
-
-    >>> np.char.lstrip(c, 'a')
-    array(['AaAaA', '  aA  ', 'bBABba'], dtype='<U7')
-
-
-    >>> np.char.lstrip(c, 'A') # leaves c unchanged
-    array(['aAaAaA', '  aA  ', 'abBABba'], dtype='<U7')
-    >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all()
-    ... # XXX: is this a regression? This used to return True
-    ... # np.char.lstrip(c,'') does not modify c at all.
-    False
-    >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all()
-    True
-
-    """
-    a_arr = numpy.asarray(a)
-    return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))
-
-
-def _partition_dispatcher(a, sep):
-    return (a,)
-
-
-@array_function_dispatch(_partition_dispatcher)
-def partition(a, sep):
-    """
-    Partition each element in `a` around `sep`.
-
-    Calls `str.partition` element-wise.
-
-    For each element in `a`, split the element as the first
-    occurrence of `sep`, and return 3 strings containing the part
-    before the separator, the separator itself, and the part after
-    the separator. If the separator is not found, return 3 strings
-    containing the string itself, followed by two empty strings.
-
-    Parameters
-    ----------
-    a : array_like, {str, unicode}
-        Input array
-    sep : {str, unicode}
-        Separator to split each string element in `a`.
-
-    Returns
-    -------
-    out : ndarray, {str, unicode}
-        Output array of str or unicode, depending on input type.
-        The output array will have an extra dimension with 3
-        elements per input element.
-
-    See Also
-    --------
-    str.partition
-
-    """
-    return _to_bytes_or_str_array(
-        _vec_string(a, object_, 'partition', (sep,)), a)
-
-
-def _replace_dispatcher(a, old, new, count=None):
-    return (a,)
-
-
-@array_function_dispatch(_replace_dispatcher)
-def replace(a, old, new, count=None):
-    """
-    For each element in `a`, return a copy of the string with all
-    occurrences of substring `old` replaced by `new`.
-
-    Calls `str.replace` element-wise.
-
-    Parameters
-    ----------
-    a : array-like of str or unicode
-
-    old, new : str or unicode
-
-    count : int, optional
-        If the optional argument `count` is given, only the first
-        `count` occurrences are replaced.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.replace
-    
-    Examples
-    --------
-    >>> a = np.array(["That is a mango", "Monkeys eat mangos"])
-    >>> np.char.replace(a, 'mango', 'banana')
-    array(['That is a banana', 'Monkeys eat bananas'], dtype='<U19')
-
-    >>> a = np.array(["The dish is fresh", "This is it"])
-    >>> np.char.replace(a, 'is', 'was')
-    array(['The dwash was fresh', 'Thwas was it'], dtype='<U19')
-    """
-    return _to_bytes_or_str_array(
-        _vec_string(a, object_, 'replace', [old, new] + _clean_args(count)), a)
-
-
-@array_function_dispatch(_count_dispatcher)
-def rfind(a, sub, start=0, end=None):
-    """
-    For each element in `a`, return the highest index in the string
-    where substring `sub` is found, such that `sub` is contained
-    within [`start`, `end`].
-
-    Calls `str.rfind` element-wise.
-
-    Parameters
-    ----------
-    a : array-like of str or unicode
-
-    sub : str or unicode
-
-    start, end : int, optional
-        Optional arguments `start` and `end` are interpreted as in
-        slice notation.
-
-    Returns
-    -------
-    out : ndarray
-       Output array of ints.  Return -1 on failure.
-
-    See Also
-    --------
-    str.rfind
-
-    """
-    return _vec_string(
-        a, int_, 'rfind', [sub, start] + _clean_args(end))
-
-
-@array_function_dispatch(_count_dispatcher)
-def rindex(a, sub, start=0, end=None):
-    """
-    Like `rfind`, but raises `ValueError` when the substring `sub` is
-    not found.
-
-    Calls `str.rindex` element-wise.
-
-    Parameters
-    ----------
-    a : array-like of str or unicode
-
-    sub : str or unicode
-
-    start, end : int, optional
-
-    Returns
-    -------
-    out : ndarray
-       Output array of ints.
-
-    See Also
-    --------
-    rfind, str.rindex
-
-    """
-    return _vec_string(
-        a, int_, 'rindex', [sub, start] + _clean_args(end))
-
-
-@array_function_dispatch(_just_dispatcher)
-def rjust(a, width, fillchar=' '):
-    """
-    Return an array with the elements of `a` right-justified in a
-    string of length `width`.
-
-    Calls `str.rjust` element-wise.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    width : int
-        The length of the resulting strings
-    fillchar : str or unicode, optional
-        The character to use for padding
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.rjust
-
-    """
-    a_arr = numpy.asarray(a)
-    width_arr = numpy.asarray(width)
-    size = int(numpy.max(width_arr.flat))
-    if numpy.issubdtype(a_arr.dtype, numpy.bytes_):
-        fillchar = asbytes(fillchar)
-    return _vec_string(
-        a_arr, type(a_arr.dtype)(size), 'rjust', (width_arr, fillchar))
-
-
-@array_function_dispatch(_partition_dispatcher)
-def rpartition(a, sep):
-    """
-    Partition (split) each element around the right-most separator.
-
-    Calls `str.rpartition` element-wise.
-
-    For each element in `a`, split the element as the last
-    occurrence of `sep`, and return 3 strings containing the part
-    before the separator, the separator itself, and the part after
-    the separator. If the separator is not found, return 3 strings
-    containing the string itself, followed by two empty strings.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-        Input array
-    sep : str or unicode
-        Right-most separator to split each element in array.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of string or unicode, depending on input
-        type.  The output array will have an extra dimension with
-        3 elements per input element.
-
-    See Also
-    --------
-    str.rpartition
-
-    """
-    return _to_bytes_or_str_array(
-        _vec_string(a, object_, 'rpartition', (sep,)), a)
-
-
-def _split_dispatcher(a, sep=None, maxsplit=None):
-    return (a,)
-
-
-@array_function_dispatch(_split_dispatcher)
-def rsplit(a, sep=None, maxsplit=None):
-    """
-    For each element in `a`, return a list of the words in the
-    string, using `sep` as the delimiter string.
-
-    Calls `str.rsplit` element-wise.
-
-    Except for splitting from the right, `rsplit`
-    behaves like `split`.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    sep : str or unicode, optional
-        If `sep` is not specified or None, any whitespace string
-        is a separator.
-    maxsplit : int, optional
-        If `maxsplit` is given, at most `maxsplit` splits are done,
-        the rightmost ones.
-
-    Returns
-    -------
-    out : ndarray
-       Array of list objects
-
-    See Also
-    --------
-    str.rsplit, split
-
-    """
-    # This will return an array of lists of different sizes, so we
-    # leave it as an object array
-    return _vec_string(
-        a, object_, 'rsplit', [sep] + _clean_args(maxsplit))
-
-
-def _strip_dispatcher(a, chars=None):
-    return (a,)
-
-
-@array_function_dispatch(_strip_dispatcher)
-def rstrip(a, chars=None):
-    """
-    For each element in `a`, return a copy with the trailing
-    characters removed.
-
-    Calls `str.rstrip` element-wise.
-
-    Parameters
-    ----------
-    a : array-like of str or unicode
-
-    chars : str or unicode, optional
-       The `chars` argument is a string specifying the set of
-       characters to be removed. If omitted or None, the `chars`
-       argument defaults to removing whitespace. The `chars` argument
-       is not a suffix; rather, all combinations of its values are
-       stripped.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.rstrip
-
-    Examples
-    --------
-    >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c
-    array(['aAaAaA', 'abBABba'],
-        dtype='|S7')
-    >>> np.char.rstrip(c, b'a')
-    array(['aAaAaA', 'abBABb'],
-        dtype='|S7')
-    >>> np.char.rstrip(c, b'A')
-    array(['aAaAa', 'abBABba'],
-        dtype='|S7')
-
-    """
-    a_arr = numpy.asarray(a)
-    return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))
-
-
-@array_function_dispatch(_split_dispatcher)
-def split(a, sep=None, maxsplit=None):
-    """
-    For each element in `a`, return a list of the words in the
-    string, using `sep` as the delimiter string.
-
-    Calls `str.split` element-wise.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    sep : str or unicode, optional
-       If `sep` is not specified or None, any whitespace string is a
-       separator.
-
-    maxsplit : int, optional
-        If `maxsplit` is given, at most `maxsplit` splits are done.
-
-    Returns
-    -------
-    out : ndarray
-        Array of list objects
-
-    See Also
-    --------
-    str.split, rsplit
-
-    """
-    # This will return an array of lists of different sizes, so we
-    # leave it as an object array
-    return _vec_string(
-        a, object_, 'split', [sep] + _clean_args(maxsplit))
-
-
-def _splitlines_dispatcher(a, keepends=None):
-    return (a,)
-
-
-@array_function_dispatch(_splitlines_dispatcher)
-def splitlines(a, keepends=None):
-    """
-    For each element in `a`, return a list of the lines in the
-    element, breaking at line boundaries.
-
-    Calls `str.splitlines` element-wise.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    keepends : bool, optional
-        Line breaks are not included in the resulting list unless
-        keepends is given and true.
-
-    Returns
-    -------
-    out : ndarray
-        Array of list objects
-
-    See Also
-    --------
-    str.splitlines
-
-    """
-    return _vec_string(
-        a, object_, 'splitlines', _clean_args(keepends))
-
-
-def _startswith_dispatcher(a, prefix, start=None, end=None):
-    return (a,)
-
-
-@array_function_dispatch(_startswith_dispatcher)
-def startswith(a, prefix, start=0, end=None):
-    """
-    Returns a boolean array which is `True` where the string element
-    in `a` starts with `prefix`, otherwise `False`.
-
-    Calls `str.startswith` element-wise.
-
-    Parameters
-    ----------
-    a : array_like of str or unicode
-
-    prefix : str
-
-    start, end : int, optional
-        With optional `start`, test beginning at that position. With
-        optional `end`, stop comparing at that position.
-
-    Returns
-    -------
-    out : ndarray
-        Array of booleans
-
-    See Also
-    --------
-    str.startswith
-
-    """
-    return _vec_string(
-        a, bool_, 'startswith', [prefix, start] + _clean_args(end))
-
-
-@array_function_dispatch(_strip_dispatcher)
-def strip(a, chars=None):
-    """
-    For each element in `a`, return a copy with the leading and
-    trailing characters removed.
-
-    Calls `str.strip` element-wise.
-
-    Parameters
-    ----------
-    a : array-like of str or unicode
-
-    chars : str or unicode, optional
-       The `chars` argument is a string specifying the set of
-       characters to be removed. If omitted or None, the `chars`
-       argument defaults to removing whitespace. The `chars` argument
-       is not a prefix or suffix; rather, all combinations of its
-       values are stripped.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.strip
-
-    Examples
-    --------
-    >>> c = np.array(['aAaAaA', '  aA  ', 'abBABba'])
-    >>> c
-    array(['aAaAaA', '  aA  ', 'abBABba'], dtype='<U7')
-    >>> np.char.strip(c)
-    array(['aAaAaA', 'aA', 'abBABba'], dtype='<U7')
-    >>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads
-    array(['AaAaA', '  aA  ', 'bBABb'], dtype='<U7')
-    >>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails
-    array(['aAaAa', '  aA  ', 'abBABba'], dtype='<U7')
-
-    """
-    a_arr = numpy.asarray(a)
-    return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars))
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def swapcase(a):
-    """
-    Return element-wise a copy of the string with
-    uppercase characters converted to lowercase and vice versa.
-
-    Calls `str.swapcase` element-wise.
-
-    For 8-bit strings, this method is locale-dependent.
-
-    Parameters
-    ----------
-    a : array_like, {str, unicode}
-        Input array.
-
-    Returns
-    -------
-    out : ndarray, {str, unicode}
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.swapcase
-
-    Examples
-    --------
-    >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c
-    array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'],
-        dtype='|S5')
-    >>> np.char.swapcase(c)
-    array(['A1b C', '1B cA', 'B cA1', 'Ca1B'],
-        dtype='|S5')
-
-    """
-    a_arr = numpy.asarray(a)
-    return _vec_string(a_arr, a_arr.dtype, 'swapcase')
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def title(a):
-    """
-    Return element-wise title cased version of string or unicode.
-
-    Title case words start with uppercase characters, all remaining cased
-    characters are lowercase.
-
-    Calls `str.title` element-wise.
-
-    For 8-bit strings, this method is locale-dependent.
-
-    Parameters
-    ----------
-    a : array_like, {str, unicode}
-        Input array.
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.title
-
-    Examples
-    --------
-    >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c
-    array(['a1b c', '1b ca', 'b ca1', 'ca1b'],
-        dtype='|S5')
-    >>> np.char.title(c)
-    array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'],
-        dtype='|S5')
-
-    """
-    a_arr = numpy.asarray(a)
-    return _vec_string(a_arr, a_arr.dtype, 'title')
-
-
-def _translate_dispatcher(a, table, deletechars=None):
-    return (a,)
-
-
-@array_function_dispatch(_translate_dispatcher)
-def translate(a, table, deletechars=None):
-    """
-    For each element in `a`, return a copy of the string where all
-    characters occurring in the optional argument `deletechars` are
-    removed, and the remaining characters have been mapped through the
-    given translation table.
-
-    Calls `str.translate` element-wise.
-
-    Parameters
-    ----------
-    a : array-like of str or unicode
-
-    table : str of length 256
-
-    deletechars : str
-
-    Returns
-    -------
-    out : ndarray
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.translate
-
-    """
-    a_arr = numpy.asarray(a)
-    if issubclass(a_arr.dtype.type, str_):
-        return _vec_string(
-            a_arr, a_arr.dtype, 'translate', (table,))
-    else:
-        return _vec_string(
-            a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def upper(a):
-    """
-    Return an array with the elements converted to uppercase.
-
-    Calls `str.upper` element-wise.
-
-    For 8-bit strings, this method is locale-dependent.
-
-    Parameters
-    ----------
-    a : array_like, {str, unicode}
-        Input array.
-
-    Returns
-    -------
-    out : ndarray, {str, unicode}
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.upper
-
-    Examples
-    --------
-    >>> c = np.array(['a1b c', '1bca', 'bca1']); c
-    array(['a1b c', '1bca', 'bca1'], dtype='<U5')
-    >>> np.char.upper(c)
-    array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')
-
-    """
-    a_arr = numpy.asarray(a)
-    return _vec_string(a_arr, a_arr.dtype, 'upper')
-
-
-def _zfill_dispatcher(a, width):
-    return (a,)
-
-
-@array_function_dispatch(_zfill_dispatcher)
-def zfill(a, width):
-    """
-    Return the numeric string left-filled with zeros
-
-    Calls `str.zfill` element-wise.
-
-    Parameters
-    ----------
-    a : array_like, {str, unicode}
-        Input array.
-    width : int
-        Width of string to left-fill elements in `a`.
-
-    Returns
-    -------
-    out : ndarray, {str, unicode}
-        Output array of str or unicode, depending on input type
-
-    See Also
-    --------
-    str.zfill
-
-    """
-    a_arr = numpy.asarray(a)
-    width_arr = numpy.asarray(width)
-    size = int(numpy.max(width_arr.flat))
-    return _vec_string(
-        a_arr, type(a_arr.dtype)(size), 'zfill', (width_arr,))
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def isnumeric(a):
-    """
-    For each element, return True if there are only numeric
-    characters in the element.
-
-    Calls `str.isnumeric` element-wise.
-
-    Numeric characters include digit characters, and all characters
-    that have the Unicode numeric value property, e.g. ``U+2155,
-    VULGAR FRACTION ONE FIFTH``.
-
-    Parameters
-    ----------
-    a : array_like, unicode
-        Input array.
-
-    Returns
-    -------
-    out : ndarray, bool
-        Array of booleans of same shape as `a`.
-
-    See Also
-    --------
-    str.isnumeric
-
-    Examples
-    --------
-    >>> np.char.isnumeric(['123', '123abc', '9.0', '1/4', 'VIII'])
-    array([ True, False, False, False, False])
-
-    """
-    if not _is_unicode(a):
-        raise TypeError("isnumeric is only available for Unicode strings and arrays")
-    return _vec_string(a, bool_, 'isnumeric')
-
-
-@array_function_dispatch(_unary_op_dispatcher)
-def isdecimal(a):
-    """
-    For each element, return True if there are only decimal
-    characters in the element.
-
-    Calls `str.isdecimal` element-wise.
-
-    Decimal characters include digit characters, and all characters
-    that can be used to form decimal-radix numbers,
-    e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``.
-
-    Parameters
-    ----------
-    a : array_like, unicode
-        Input array.
-
-    Returns
-    -------
-    out : ndarray, bool
-        Array of booleans identical in shape to `a`.
-
-    See Also
-    --------
-    str.isdecimal
-
-    Examples
-    --------
-    >>> np.char.isdecimal(['12345', '4.99', '123ABC', ''])
-    array([ True, False, False, False])
-
-    """ 
-    if not _is_unicode(a):
-        raise TypeError(
-            "isdecimal is only available for Unicode strings and arrays")
-    return _vec_string(a, bool_, 'isdecimal')
-
-
-@set_module('numpy')
-class chararray(ndarray):
-    """
-    chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,
-              strides=None, order=None)
-
-    Provides a convenient view on arrays of string and unicode values.
-
-    .. note::
-       The `chararray` class exists for backwards compatibility with
-       Numarray, it is not recommended for new development. Starting from numpy
-       1.4, if one needs arrays of strings, it is recommended to use arrays of
-       `dtype` `object_`, `bytes_` or `str_`, and use the free functions
-       in the `numpy.char` module for fast vectorized string operations.
-
-    Versus a regular NumPy array of type `str` or `unicode`, this
-    class adds the following functionality:
-
-      1) values automatically have whitespace removed from the end
-         when indexed
-
-      2) comparison operators automatically remove whitespace from the
-         end when comparing values
-
-      3) vectorized string operations are provided as methods
-         (e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``)
-
-    chararrays should be created using `numpy.char.array` or
-    `numpy.char.asarray`, rather than this constructor directly.
-
-    This constructor creates the array, using `buffer` (with `offset`
-    and `strides`) if it is not ``None``. If `buffer` is ``None``, then
-    constructs a new array with `strides` in "C order", unless both
-    ``len(shape) >= 2`` and ``order='F'``, in which case `strides`
-    is in "Fortran order".
-
-    Methods
-    -------
-    astype
-    argsort
-    copy
-    count
-    decode
-    dump
-    dumps
-    encode
-    endswith
-    expandtabs
-    fill
-    find
-    flatten
-    getfield
-    index
-    isalnum
-    isalpha
-    isdecimal
-    isdigit
-    islower
-    isnumeric
-    isspace
-    istitle
-    isupper
-    item
-    join
-    ljust
-    lower
-    lstrip
-    nonzero
-    put
-    ravel
-    repeat
-    replace
-    reshape
-    resize
-    rfind
-    rindex
-    rjust
-    rsplit
-    rstrip
-    searchsorted
-    setfield
-    setflags
-    sort
-    split
-    splitlines
-    squeeze
-    startswith
-    strip
-    swapaxes
-    swapcase
-    take
-    title
-    tofile
-    tolist
-    tostring
-    translate
-    transpose
-    upper
-    view
-    zfill
-
-    Parameters
-    ----------
-    shape : tuple
-        Shape of the array.
-    itemsize : int, optional
-        Length of each array element, in number of characters. Default is 1.
-    unicode : bool, optional
-        Are the array elements of type unicode (True) or string (False).
-        Default is False.
-    buffer : object exposing the buffer interface or str, optional
-        Memory address of the start of the array data.  Default is None,
-        in which case a new array is created.
-    offset : int, optional
-        Fixed stride displacement from the beginning of an axis?
-        Default is 0. Needs to be >=0.
-    strides : array_like of ints, optional
-        Strides for the array (see `ndarray.strides` for full description).
-        Default is None.
-    order : {'C', 'F'}, optional
-        The order in which the array data is stored in memory: 'C' ->
-        "row major" order (the default), 'F' -> "column major"
-        (Fortran) order.
-
-    Examples
-    --------
-    >>> charar = np.chararray((3, 3))
-    >>> charar[:] = 'a'
-    >>> charar
-    chararray([[b'a', b'a', b'a'],
-               [b'a', b'a', b'a'],
-               [b'a', b'a', b'a']], dtype='|S1')
-
-    >>> charar = np.chararray(charar.shape, itemsize=5)
-    >>> charar[:] = 'abc'
-    >>> charar
-    chararray([[b'abc', b'abc', b'abc'],
-               [b'abc', b'abc', b'abc'],
-               [b'abc', b'abc', b'abc']], dtype='|S5')
-
-    """
-    def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
-                offset=0, strides=None, order='C'):
-        global _globalvar
-
-        if unicode:
-            dtype = str_
-        else:
-            dtype = bytes_
-
-        # force itemsize to be a Python int, since using NumPy integer
-        # types results in itemsize.itemsize being used as the size of
-        # strings in the new array.
-        itemsize = int(itemsize)
-
-        if isinstance(buffer, str):
-            # unicode objects do not have the buffer interface
-            filler = buffer
-            buffer = None
-        else:
-            filler = None
-
-        _globalvar = 1
-        if buffer is None:
-            self = ndarray.__new__(subtype, shape, (dtype, itemsize),
-                                   order=order)
-        else:
-            self = ndarray.__new__(subtype, shape, (dtype, itemsize),
-                                   buffer=buffer,
-                                   offset=offset, strides=strides,
-                                   order=order)
-        if filler is not None:
-            self[...] = filler
-        _globalvar = 0
-        return self
-
-    def __array_finalize__(self, obj):
-        # The b is a special case because it is used for reconstructing.
-        if not _globalvar and self.dtype.char not in 'SUbc':
-            raise ValueError("Can only create a chararray from string data.")
-
-    def __getitem__(self, obj):
-        val = ndarray.__getitem__(self, obj)
-
-        if isinstance(val, character):
-            temp = val.rstrip()
-            if len(temp) == 0:
-                val = ''
-            else:
-                val = temp
-
-        return val
-
-    # IMPLEMENTATION NOTE: Most of the methods of this class are
-    # direct delegations to the free functions in this module.
-    # However, those that return an array of strings should instead
-    # return a chararray, so some extra wrapping is required.
-
-    def __eq__(self, other):
-        """
-        Return (self == other) element-wise.
-
-        See Also
-        --------
-        equal
-        """
-        return equal(self, other)
-
-    def __ne__(self, other):
-        """
-        Return (self != other) element-wise.
-
-        See Also
-        --------
-        not_equal
-        """
-        return not_equal(self, other)
-
-    def __ge__(self, other):
-        """
-        Return (self >= other) element-wise.
-
-        See Also
-        --------
-        greater_equal
-        """
-        return greater_equal(self, other)
-
-    def __le__(self, other):
-        """
-        Return (self <= other) element-wise.
-
-        See Also
-        --------
-        less_equal
-        """
-        return less_equal(self, other)
-
-    def __gt__(self, other):
-        """
-        Return (self > other) element-wise.
-
-        See Also
-        --------
-        greater
-        """
-        return greater(self, other)
-
-    def __lt__(self, other):
-        """
-        Return (self < other) element-wise.
-
-        See Also
-        --------
-        less
-        """
-        return less(self, other)
-
-    def __add__(self, other):
-        """
-        Return (self + other), that is string concatenation,
-        element-wise for a pair of array_likes of str or unicode.
-
-        See Also
-        --------
-        add
-        """
-        return asarray(add(self, other))
-
-    def __radd__(self, other):
-        """
-        Return (other + self), that is string concatenation,
-        element-wise for a pair of array_likes of `bytes_` or `str_`.
-
-        See Also
-        --------
-        add
-        """
-        return asarray(add(numpy.asarray(other), self))
-
-    def __mul__(self, i):
-        """
-        Return (self * i), that is string multiple concatenation,
-        element-wise.
-
-        See Also
-        --------
-        multiply
-        """
-        return asarray(multiply(self, i))
-
-    def __rmul__(self, i):
-        """
-        Return (self * i), that is string multiple concatenation,
-        element-wise.
-
-        See Also
-        --------
-        multiply
-        """
-        return asarray(multiply(self, i))
-
-    def __mod__(self, i):
-        """
-        Return (self % i), that is pre-Python 2.6 string formatting
-        (interpolation), element-wise for a pair of array_likes of `bytes_`
-        or `str_`.
-
-        See Also
-        --------
-        mod
-        """
-        return asarray(mod(self, i))
-
-    def __rmod__(self, other):
-        return NotImplemented
-
-    def argsort(self, axis=-1, kind=None, order=None):
-        """
-        Return the indices that sort the array lexicographically.
-
-        For full documentation see `numpy.argsort`, for which this method is
-        in fact merely a "thin wrapper."
-
-        Examples
-        --------
-        >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')
-        >>> c = c.view(np.chararray); c
-        chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],
-              dtype='|S5')
-        >>> c[c.argsort()]
-        chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],
-              dtype='|S5')
-
-        """
-        return self.__array__().argsort(axis, kind, order)
-    argsort.__doc__ = ndarray.argsort.__doc__
-
-    def capitalize(self):
-        """
-        Return a copy of `self` with only the first character of each element
-        capitalized.
-
-        See Also
-        --------
-        char.capitalize
-
-        """
-        return asarray(capitalize(self))
-
-    def center(self, width, fillchar=' '):
-        """
-        Return a copy of `self` with its elements centered in a
-        string of length `width`.
-
-        See Also
-        --------
-        center
-        """
-        return asarray(center(self, width, fillchar))
-
-    def count(self, sub, start=0, end=None):
-        """
-        Returns an array with the number of non-overlapping occurrences of
-        substring `sub` in the range [`start`, `end`].
-
-        See Also
-        --------
-        char.count
-
-        """
-        return count(self, sub, start, end)
-
-    def decode(self, encoding=None, errors=None):
-        """
-        Calls ``bytes.decode`` element-wise.
-
-        See Also
-        --------
-        char.decode
-
-        """
-        return decode(self, encoding, errors)
-
-    def encode(self, encoding=None, errors=None):
-        """
-        Calls `str.encode` element-wise.
-
-        See Also
-        --------
-        char.encode
-
-        """
-        return encode(self, encoding, errors)
-
-    def endswith(self, suffix, start=0, end=None):
-        """
-        Returns a boolean array which is `True` where the string element
-        in `self` ends with `suffix`, otherwise `False`.
-
-        See Also
-        --------
-        char.endswith
-
-        """
-        return endswith(self, suffix, start, end)
-
-    def expandtabs(self, tabsize=8):
-        """
-        Return a copy of each string element where all tab characters are
-        replaced by one or more spaces.
-
-        See Also
-        --------
-        char.expandtabs
-
-        """
-        return asarray(expandtabs(self, tabsize))
-
-    def find(self, sub, start=0, end=None):
-        """
-        For each element, return the lowest index in the string where
-        substring `sub` is found.
-
-        See Also
-        --------
-        char.find
-
-        """
-        return find(self, sub, start, end)
-
-    def index(self, sub, start=0, end=None):
-        """
-        Like `find`, but raises `ValueError` when the substring is not found.
-
-        See Also
-        --------
-        char.index
-
-        """
-        return index(self, sub, start, end)
-
-    def isalnum(self):
-        """
-        Returns true for each element if all characters in the string
-        are alphanumeric and there is at least one character, false
-        otherwise.
-
-        See Also
-        --------
-        char.isalnum
-
-        """
-        return isalnum(self)
-
-    def isalpha(self):
-        """
-        Returns true for each element if all characters in the string
-        are alphabetic and there is at least one character, false
-        otherwise.
-
-        See Also
-        --------
-        char.isalpha
-
-        """
-        return isalpha(self)
-
-    def isdigit(self):
-        """
-        Returns true for each element if all characters in the string are
-        digits and there is at least one character, false otherwise.
-
-        See Also
-        --------
-        char.isdigit
-
-        """
-        return isdigit(self)
-
-    def islower(self):
-        """
-        Returns true for each element if all cased characters in the
-        string are lowercase and there is at least one cased character,
-        false otherwise.
-
-        See Also
-        --------
-        char.islower
-
-        """
-        return islower(self)
-
-    def isspace(self):
-        """
-        Returns true for each element if there are only whitespace
-        characters in the string and there is at least one character,
-        false otherwise.
-
-        See Also
-        --------
-        char.isspace
-
-        """
-        return isspace(self)
-
-    def istitle(self):
-        """
-        Returns true for each element if the element is a titlecased
-        string and there is at least one character, false otherwise.
-
-        See Also
-        --------
-        char.istitle
-
-        """
-        return istitle(self)
-
-    def isupper(self):
-        """
-        Returns true for each element if all cased characters in the
-        string are uppercase and there is at least one character, false
-        otherwise.
-
-        See Also
-        --------
-        char.isupper
-
-        """
-        return isupper(self)
-
-    def join(self, seq):
-        """
-        Return a string which is the concatenation of the strings in the
-        sequence `seq`.
-
-        See Also
-        --------
-        char.join
-
-        """
-        return join(self, seq)
-
-    def ljust(self, width, fillchar=' '):
-        """
-        Return an array with the elements of `self` left-justified in a
-        string of length `width`.
-
-        See Also
-        --------
-        char.ljust
-
-        """
-        return asarray(ljust(self, width, fillchar))
-
-    def lower(self):
-        """
-        Return an array with the elements of `self` converted to
-        lowercase.
-
-        See Also
-        --------
-        char.lower
-
-        """
-        return asarray(lower(self))
-
-    def lstrip(self, chars=None):
-        """
-        For each element in `self`, return a copy with the leading characters
-        removed.
-
-        See Also
-        --------
-        char.lstrip
-
-        """
-        return asarray(lstrip(self, chars))
-
-    def partition(self, sep):
-        """
-        Partition each element in `self` around `sep`.
-
-        See Also
-        --------
-        partition
-        """
-        return asarray(partition(self, sep))
-
-    def replace(self, old, new, count=None):
-        """
-        For each element in `self`, return a copy of the string with all
-        occurrences of substring `old` replaced by `new`.
-
-        See Also
-        --------
-        char.replace
-
-        """
-        return asarray(replace(self, old, new, count))
-
-    def rfind(self, sub, start=0, end=None):
-        """
-        For each element in `self`, return the highest index in the string
-        where substring `sub` is found, such that `sub` is contained
-        within [`start`, `end`].
-
-        See Also
-        --------
-        char.rfind
-
-        """
-        return rfind(self, sub, start, end)
-
-    def rindex(self, sub, start=0, end=None):
-        """
-        Like `rfind`, but raises `ValueError` when the substring `sub` is
-        not found.
-
-        See Also
-        --------
-        char.rindex
-
-        """
-        return rindex(self, sub, start, end)
-
-    def rjust(self, width, fillchar=' '):
-        """
-        Return an array with the elements of `self`
-        right-justified in a string of length `width`.
-
-        See Also
-        --------
-        char.rjust
-
-        """
-        return asarray(rjust(self, width, fillchar))
-
-    def rpartition(self, sep):
-        """
-        Partition each element in `self` around `sep`.
-
-        See Also
-        --------
-        rpartition
-        """
-        return asarray(rpartition(self, sep))
-
-    def rsplit(self, sep=None, maxsplit=None):
-        """
-        For each element in `self`, return a list of the words in
-        the string, using `sep` as the delimiter string.
-
-        See Also
-        --------
-        char.rsplit
-
-        """
-        return rsplit(self, sep, maxsplit)
-
-    def rstrip(self, chars=None):
-        """
-        For each element in `self`, return a copy with the trailing
-        characters removed.
-
-        See Also
-        --------
-        char.rstrip
-
-        """
-        return asarray(rstrip(self, chars))
-
-    def split(self, sep=None, maxsplit=None):
-        """
-        For each element in `self`, return a list of the words in the
-        string, using `sep` as the delimiter string.
-
-        See Also
-        --------
-        char.split
-
-        """
-        return split(self, sep, maxsplit)
-
-    def splitlines(self, keepends=None):
-        """
-        For each element in `self`, return a list of the lines in the
-        element, breaking at line boundaries.
-
-        See Also
-        --------
-        char.splitlines
-
-        """
-        return splitlines(self, keepends)
-
-    def startswith(self, prefix, start=0, end=None):
-        """
-        Returns a boolean array which is `True` where the string element
-        in `self` starts with `prefix`, otherwise `False`.
-
-        See Also
-        --------
-        char.startswith
-
-        """
-        return startswith(self, prefix, start, end)
-
-    def strip(self, chars=None):
-        """
-        For each element in `self`, return a copy with the leading and
-        trailing characters removed.
-
-        See Also
-        --------
-        char.strip
-
-        """
-        return asarray(strip(self, chars))
-
-    def swapcase(self):
-        """
-        For each element in `self`, return a copy of the string with
-        uppercase characters converted to lowercase and vice versa.
-
-        See Also
-        --------
-        char.swapcase
-
-        """
-        return asarray(swapcase(self))
-
-    def title(self):
-        """
-        For each element in `self`, return a titlecased version of the
-        string: words start with uppercase characters, all remaining cased
-        characters are lowercase.
-
-        See Also
-        --------
-        char.title
-
-        """
-        return asarray(title(self))
-
-    def translate(self, table, deletechars=None):
-        """
-        For each element in `self`, return a copy of the string where
-        all characters occurring in the optional argument
-        `deletechars` are removed, and the remaining characters have
-        been mapped through the given translation table.
-
-        See Also
-        --------
-        char.translate
-
-        """
-        return asarray(translate(self, table, deletechars))
-
-    def upper(self):
-        """
-        Return an array with the elements of `self` converted to
-        uppercase.
-
-        See Also
-        --------
-        char.upper
-
-        """
-        return asarray(upper(self))
-
-    def zfill(self, width):
-        """
-        Return the numeric string left-filled with zeros in a string of
-        length `width`.
-
-        See Also
-        --------
-        char.zfill
-
-        """
-        return asarray(zfill(self, width))
-
-    def isnumeric(self):
-        """
-        For each element in `self`, return True if there are only
-        numeric characters in the element.
-
-        See Also
-        --------
-        char.isnumeric
-
-        """
-        return isnumeric(self)
-
-    def isdecimal(self):
-        """
-        For each element in `self`, return True if there are only
-        decimal characters in the element.
-
-        See Also
-        --------
-        char.isdecimal
-
-        """
-        return isdecimal(self)
-
-
-@set_module("numpy.char")
-def array(obj, itemsize=None, copy=True, unicode=None, order=None):
-    """
-    Create a `chararray`.
-
-    .. note::
-       This class is provided for numarray backward-compatibility.
-       New code (not concerned with numarray compatibility) should use
-       arrays of type `bytes_` or `str_` and use the free functions
-       in :mod:`numpy.char <numpy.core.defchararray>` for fast
-       vectorized string operations instead.
-
-    Versus a regular NumPy array of type `str` or `unicode`, this
-    class adds the following functionality:
-
-      1) values automatically have whitespace removed from the end
-         when indexed
-
-      2) comparison operators automatically remove whitespace from the
-         end when comparing values
-
-      3) vectorized string operations are provided as methods
-         (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``)
-
-    Parameters
-    ----------
-    obj : array of str or unicode-like
-
-    itemsize : int, optional
-        `itemsize` is the number of characters per scalar in the
-        resulting array.  If `itemsize` is None, and `obj` is an
-        object array or a Python list, the `itemsize` will be
-        automatically determined.  If `itemsize` is provided and `obj`
-        is of type str or unicode, then the `obj` string will be
-        chunked into `itemsize` pieces.
-
-    copy : bool, optional
-        If true (default), then the object is copied.  Otherwise, a copy
-        will only be made if __array__ returns a copy, if obj is a
-        nested sequence, or if a copy is needed to satisfy any of the other
-        requirements (`itemsize`, unicode, `order`, etc.).
-
-    unicode : bool, optional
-        When true, the resulting `chararray` can contain Unicode
-        characters, when false only 8-bit characters.  If unicode is
-        None and `obj` is one of the following:
-
-          - a `chararray`,
-          - an ndarray of type `str` or `unicode`
-          - a Python str or unicode object,
-
-        then the unicode setting of the output array will be
-        automatically determined.
-
-    order : {'C', 'F', 'A'}, optional
-        Specify the order of the array.  If order is 'C' (default), then the
-        array will be in C-contiguous order (last-index varies the
-        fastest).  If order is 'F', then the returned array
-        will be in Fortran-contiguous order (first-index varies the
-        fastest).  If order is 'A', then the returned array may
-        be in any order (either C-, Fortran-contiguous, or even
-        discontiguous).
-    """
-    if isinstance(obj, (bytes, str)):
-        if unicode is None:
-            if isinstance(obj, str):
-                unicode = True
-            else:
-                unicode = False
-
-        if itemsize is None:
-            itemsize = len(obj)
-        shape = len(obj) // itemsize
-
-        return chararray(shape, itemsize=itemsize, unicode=unicode,
-                         buffer=obj, order=order)
-
-    if isinstance(obj, (list, tuple)):
-        obj = numpy.asarray(obj)
-
-    if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character):
-        # If we just have a vanilla chararray, create a chararray
-        # view around it.
-        if not isinstance(obj, chararray):
-            obj = obj.view(chararray)
-
-        if itemsize is None:
-            itemsize = obj.itemsize
-            # itemsize is in 8-bit chars, so for Unicode, we need
-            # to divide by the size of a single Unicode character,
-            # which for NumPy is always 4
-            if issubclass(obj.dtype.type, str_):
-                itemsize //= 4
-
-        if unicode is None:
-            if issubclass(obj.dtype.type, str_):
-                unicode = True
-            else:
-                unicode = False
-
-        if unicode:
-            dtype = str_
-        else:
-            dtype = bytes_
-
-        if order is not None:
-            obj = numpy.asarray(obj, order=order)
-        if (copy or
-                (itemsize != obj.itemsize) or
-                (not unicode and isinstance(obj, str_)) or
-                (unicode and isinstance(obj, bytes_))):
-            obj = obj.astype((dtype, int(itemsize)))
-        return obj
-
-    if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):
-        if itemsize is None:
-            # Since no itemsize was specified, convert the input array to
-            # a list so the ndarray constructor will automatically
-            # determine the itemsize for us.
-            obj = obj.tolist()
-            # Fall through to the default case
-
-    if unicode:
-        dtype = str_
-    else:
-        dtype = bytes_
-
-    if itemsize is None:
-        val = narray(obj, dtype=dtype, order=order, subok=True)
-    else:
-        val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True)
-    return val.view(chararray)
-
-
-@set_module("numpy.char")
-def asarray(obj, itemsize=None, unicode=None, order=None):
-    """
-    Convert the input to a `chararray`, copying the data only if
-    necessary.
-
-    Versus a regular NumPy array of type `str` or `unicode`, this
-    class adds the following functionality:
-
-      1) values automatically have whitespace removed from the end
-         when indexed
-
-      2) comparison operators automatically remove whitespace from the
-         end when comparing values
-
-      3) vectorized string operations are provided as methods
-         (e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``)
-
-    Parameters
-    ----------
-    obj : array of str or unicode-like
-
-    itemsize : int, optional
-        `itemsize` is the number of characters per scalar in the
-        resulting array.  If `itemsize` is None, and `obj` is an
-        object array or a Python list, the `itemsize` will be
-        automatically determined.  If `itemsize` is provided and `obj`
-        is of type str or unicode, then the `obj` string will be
-        chunked into `itemsize` pieces.
-
-    unicode : bool, optional
-        When true, the resulting `chararray` can contain Unicode
-        characters, when false only 8-bit characters.  If unicode is
-        None and `obj` is one of the following:
-
-          - a `chararray`,
-          - an ndarray of type `str` or 'unicode`
-          - a Python str or unicode object,
-
-        then the unicode setting of the output array will be
-        automatically determined.
-
-    order : {'C', 'F'}, optional
-        Specify the order of the array.  If order is 'C' (default), then the
-        array will be in C-contiguous order (last-index varies the
-        fastest).  If order is 'F', then the returned array
-        will be in Fortran-contiguous order (first-index varies the
-        fastest).
-    """
-    return array(obj, itemsize, copy=False,
-                 unicode=unicode, order=order)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/range.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/range.py
deleted file mode 100644
index 1e8a3851b406e9a811fe4f2f07b283b039cb1731..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/range.py
+++ /dev/null
@@ -1,1149 +0,0 @@
-from __future__ import annotations
-
-from collections.abc import (
-    Hashable,
-    Iterator,
-)
-from datetime import timedelta
-import operator
-from sys import getsizeof
-from typing import (
-    TYPE_CHECKING,
-    Any,
-    Callable,
-    cast,
-)
-
-import numpy as np
-
-from pandas._libs import (
-    index as libindex,
-    lib,
-)
-from pandas._libs.algos import unique_deltas
-from pandas._libs.lib import no_default
-from pandas.compat.numpy import function as nv
-from pandas.util._decorators import (
-    cache_readonly,
-    doc,
-)
-
-from pandas.core.dtypes.common import (
-    ensure_platform_int,
-    ensure_python_int,
-    is_float,
-    is_integer,
-    is_scalar,
-    is_signed_integer_dtype,
-)
-from pandas.core.dtypes.generic import ABCTimedeltaIndex
-
-from pandas.core import ops
-import pandas.core.common as com
-from pandas.core.construction import extract_array
-import pandas.core.indexes.base as ibase
-from pandas.core.indexes.base import (
-    Index,
-    maybe_extract_name,
-)
-from pandas.core.ops.common import unpack_zerodim_and_defer
-
-if TYPE_CHECKING:
-    from pandas._typing import (
-        Axis,
-        Dtype,
-        NaPosition,
-        Self,
-        npt,
-    )
-_empty_range = range(0)
-_dtype_int64 = np.dtype(np.int64)
-
-
-class RangeIndex(Index):
-    """
-    Immutable Index implementing a monotonic integer range.
-
-    RangeIndex is a memory-saving special case of an Index limited to representing
-    monotonic ranges with a 64-bit dtype. Using RangeIndex may in some instances
-    improve computing speed.
-
-    This is the default index type used
-    by DataFrame and Series when no explicit index is provided by the user.
-
-    Parameters
-    ----------
-    start : int (default: 0), range, or other RangeIndex instance
-        If int and "stop" is not given, interpreted as "stop" instead.
-    stop : int (default: 0)
-    step : int (default: 1)
-    dtype : np.int64
-        Unused, accepted for homogeneity with other index types.
-    copy : bool, default False
-        Unused, accepted for homogeneity with other index types.
-    name : object, optional
-        Name to be stored in the index.
-
-    Attributes
-    ----------
-    start
-    stop
-    step
-
-    Methods
-    -------
-    from_range
-
-    See Also
-    --------
-    Index : The base pandas Index type.
-
-    Examples
-    --------
-    >>> list(pd.RangeIndex(5))
-    [0, 1, 2, 3, 4]
-
-    >>> list(pd.RangeIndex(-2, 4))
-    [-2, -1, 0, 1, 2, 3]
-
-    >>> list(pd.RangeIndex(0, 10, 2))
-    [0, 2, 4, 6, 8]
-
-    >>> list(pd.RangeIndex(2, -10, -3))
-    [2, -1, -4, -7]
-
-    >>> list(pd.RangeIndex(0))
-    []
-
-    >>> list(pd.RangeIndex(1, 0))
-    []
-    """
-
-    _typ = "rangeindex"
-    _dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
-    _range: range
-    _values: np.ndarray
-
-    @property
-    def _engine_type(self) -> type[libindex.Int64Engine]:
-        return libindex.Int64Engine
-
-    # --------------------------------------------------------------------
-    # Constructors
-
-    def __new__(
-        cls,
-        start=None,
-        stop=None,
-        step=None,
-        dtype: Dtype | None = None,
-        copy: bool = False,
-        name: Hashable | None = None,
-    ) -> RangeIndex:
-        cls._validate_dtype(dtype)
-        name = maybe_extract_name(name, start, cls)
-
-        # RangeIndex
-        if isinstance(start, RangeIndex):
-            return start.copy(name=name)
-        elif isinstance(start, range):
-            return cls._simple_new(start, name=name)
-
-        # validate the arguments
-        if com.all_none(start, stop, step):
-            raise TypeError("RangeIndex(...) must be called with integers")
-
-        start = ensure_python_int(start) if start is not None else 0
-
-        if stop is None:
-            start, stop = 0, start
-        else:
-            stop = ensure_python_int(stop)
-
-        step = ensure_python_int(step) if step is not None else 1
-        if step == 0:
-            raise ValueError("Step must not be zero")
-
-        rng = range(start, stop, step)
-        return cls._simple_new(rng, name=name)
-
-    @classmethod
-    def from_range(cls, data: range, name=None, dtype: Dtype | None = None) -> Self:
-        """
-        Create :class:`pandas.RangeIndex` from a ``range`` object.
-
-        Returns
-        -------
-        RangeIndex
-
-        Examples
-        --------
-        >>> pd.RangeIndex.from_range(range(5))
-        RangeIndex(start=0, stop=5, step=1)
-
-        >>> pd.RangeIndex.from_range(range(2, -10, -3))
-        RangeIndex(start=2, stop=-10, step=-3)
-        """
-        if not isinstance(data, range):
-            raise TypeError(
-                f"{cls.__name__}(...) must be called with object coercible to a "
-                f"range, {repr(data)} was passed"
-            )
-        cls._validate_dtype(dtype)
-        return cls._simple_new(data, name=name)
-
-    #  error: Argument 1 of "_simple_new" is incompatible with supertype "Index";
-    #  supertype defines the argument type as
-    #  "Union[ExtensionArray, ndarray[Any, Any]]"  [override]
-    @classmethod
-    def _simple_new(  # type: ignore[override]
-        cls, values: range, name: Hashable | None = None
-    ) -> Self:
-        result = object.__new__(cls)
-
-        assert isinstance(values, range)
-
-        result._range = values
-        result._name = name
-        result._cache = {}
-        result._reset_identity()
-        result._references = None
-        return result
-
-    @classmethod
-    def _validate_dtype(cls, dtype: Dtype | None) -> None:
-        if dtype is None:
-            return
-
-        validation_func, expected = cls._dtype_validation_metadata
-        if not validation_func(dtype):
-            raise ValueError(
-                f"Incorrect `dtype` passed: expected {expected}, received {dtype}"
-            )
-
-    # --------------------------------------------------------------------
-
-    # error: Return type "Type[Index]" of "_constructor" incompatible with return
-    # type "Type[RangeIndex]" in supertype "Index"
-    @cache_readonly
-    def _constructor(self) -> type[Index]:  # type: ignore[override]
-        """return the class to use for construction"""
-        return Index
-
-    # error: Signature of "_data" incompatible with supertype "Index"
-    @cache_readonly
-    def _data(self) -> np.ndarray:  # type: ignore[override]
-        """
-        An int array that for performance reasons is created only when needed.
-
-        The constructed array is saved in ``_cache``.
-        """
-        return np.arange(self.start, self.stop, self.step, dtype=np.int64)
-
-    def _get_data_as_items(self):
-        """return a list of tuples of start, stop, step"""
-        rng = self._range
-        return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
-
-    def __reduce__(self):
-        d = {"name": self._name}
-        d.update(dict(self._get_data_as_items()))
-        return ibase._new_Index, (type(self), d), None
-
-    # --------------------------------------------------------------------
-    # Rendering Methods
-
-    def _format_attrs(self):
-        """
-        Return a list of tuples of the (attr, formatted_value)
-        """
-        attrs = self._get_data_as_items()
-        if self._name is not None:
-            attrs.append(("name", ibase.default_pprint(self._name)))
-        return attrs
-
-    def _format_data(self, name=None):
-        # we are formatting thru the attributes
-        return None
-
-    def _format_with_header(self, header: list[str], na_rep: str) -> list[str]:
-        # Equivalent to Index implementation, but faster
-        if not len(self._range):
-            return header
-        first_val_str = str(self._range[0])
-        last_val_str = str(self._range[-1])
-        max_length = max(len(first_val_str), len(last_val_str))
-
-        return header + [f"{x:<{max_length}}" for x in self._range]
-
-    # --------------------------------------------------------------------
-
-    @property
-    def start(self) -> int:
-        """
-        The value of the `start` parameter (``0`` if this was not supplied).
-
-        Examples
-        --------
-        >>> idx = pd.RangeIndex(5)
-        >>> idx.start
-        0
-
-        >>> idx = pd.RangeIndex(2, -10, -3)
-        >>> idx.start
-        2
-        """
-        # GH 25710
-        return self._range.start
-
-    @property
-    def stop(self) -> int:
-        """
-        The value of the `stop` parameter.
-
-        Examples
-        --------
-        >>> idx = pd.RangeIndex(5)
-        >>> idx.stop
-        5
-
-        >>> idx = pd.RangeIndex(2, -10, -3)
-        >>> idx.stop
-        -10
-        """
-        return self._range.stop
-
-    @property
-    def step(self) -> int:
-        """
-        The value of the `step` parameter (``1`` if this was not supplied).
-
-        Examples
-        --------
-        >>> idx = pd.RangeIndex(5)
-        >>> idx.step
-        1
-
-        >>> idx = pd.RangeIndex(2, -10, -3)
-        >>> idx.step
-        -3
-
-        Even if :class:`pandas.RangeIndex` is empty, ``step`` is still ``1`` if
-        not supplied.
-
-        >>> idx = pd.RangeIndex(1, 0)
-        >>> idx.step
-        1
-        """
-        # GH 25710
-        return self._range.step
-
-    @cache_readonly
-    def nbytes(self) -> int:
-        """
-        Return the number of bytes in the underlying data.
-        """
-        rng = self._range
-        return getsizeof(rng) + sum(
-            getsizeof(getattr(rng, attr_name))
-            for attr_name in ["start", "stop", "step"]
-        )
-
-    def memory_usage(self, deep: bool = False) -> int:
-        """
-        Memory usage of my values
-
-        Parameters
-        ----------
-        deep : bool
-            Introspect the data deeply, interrogate
-            `object` dtypes for system-level memory consumption
-
-        Returns
-        -------
-        bytes used
-
-        Notes
-        -----
-        Memory usage does not include memory consumed by elements that
-        are not components of the array if deep=False
-
-        See Also
-        --------
-        numpy.ndarray.nbytes
-        """
-        return self.nbytes
-
-    @property
-    def dtype(self) -> np.dtype:
-        return _dtype_int64
-
-    @property
-    def is_unique(self) -> bool:
-        """return if the index has unique values"""
-        return True
-
-    @cache_readonly
-    def is_monotonic_increasing(self) -> bool:
-        return self._range.step > 0 or len(self) <= 1
-
-    @cache_readonly
-    def is_monotonic_decreasing(self) -> bool:
-        return self._range.step < 0 or len(self) <= 1
-
-    def __contains__(self, key: Any) -> bool:
-        hash(key)
-        try:
-            key = ensure_python_int(key)
-        except TypeError:
-            return False
-        return key in self._range
-
-    @property
-    def inferred_type(self) -> str:
-        return "integer"
-
-    # --------------------------------------------------------------------
-    # Indexing Methods
-
-    @doc(Index.get_loc)
-    def get_loc(self, key):
-        if is_integer(key) or (is_float(key) and key.is_integer()):
-            new_key = int(key)
-            try:
-                return self._range.index(new_key)
-            except ValueError as err:
-                raise KeyError(key) from err
-        if isinstance(key, Hashable):
-            raise KeyError(key)
-        self._check_indexing_error(key)
-        raise KeyError(key)
-
-    def _get_indexer(
-        self,
-        target: Index,
-        method: str | None = None,
-        limit: int | None = None,
-        tolerance=None,
-    ) -> npt.NDArray[np.intp]:
-        if com.any_not_none(method, tolerance, limit):
-            return super()._get_indexer(
-                target, method=method, tolerance=tolerance, limit=limit
-            )
-
-        if self.step > 0:
-            start, stop, step = self.start, self.stop, self.step
-        else:
-            # GH 28678: work on reversed range for simplicity
-            reverse = self._range[::-1]
-            start, stop, step = reverse.start, reverse.stop, reverse.step
-
-        target_array = np.asarray(target)
-        locs = target_array - start
-        valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
-        locs[~valid] = -1
-        locs[valid] = locs[valid] / step
-
-        if step != self.step:
-            # We reversed this range: transform to original locs
-            locs[valid] = len(self) - 1 - locs[valid]
-        return ensure_platform_int(locs)
-
-    @cache_readonly
-    def _should_fallback_to_positional(self) -> bool:
-        """
-        Should an integer key be treated as positional?
-        """
-        return False
-
-    # --------------------------------------------------------------------
-
-    def tolist(self) -> list[int]:
-        return list(self._range)
-
-    @doc(Index.__iter__)
-    def __iter__(self) -> Iterator[int]:
-        yield from self._range
-
-    @doc(Index._shallow_copy)
-    def _shallow_copy(self, values, name: Hashable = no_default):
-        name = self._name if name is no_default else name
-
-        if values.dtype.kind == "f":
-            return Index(values, name=name, dtype=np.float64)
-        # GH 46675 & 43885: If values is equally spaced, return a
-        # more memory-compact RangeIndex instead of Index with 64-bit dtype
-        unique_diffs = unique_deltas(values)
-        if len(unique_diffs) == 1 and unique_diffs[0] != 0:
-            diff = unique_diffs[0]
-            new_range = range(values[0], values[-1] + diff, diff)
-            return type(self)._simple_new(new_range, name=name)
-        else:
-            return self._constructor._simple_new(values, name=name)
-
-    def _view(self) -> Self:
-        result = type(self)._simple_new(self._range, name=self._name)
-        result._cache = self._cache
-        return result
-
-    @doc(Index.copy)
-    def copy(self, name: Hashable | None = None, deep: bool = False) -> Self:
-        name = self._validate_names(name=name, deep=deep)[0]
-        new_index = self._rename(name=name)
-        return new_index
-
-    def _minmax(self, meth: str):
-        no_steps = len(self) - 1
-        if no_steps == -1:
-            return np.nan
-        elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
-            return self.start
-
-        return self.start + self.step * no_steps
-
-    def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
-        """The minimum value of the RangeIndex"""
-        nv.validate_minmax_axis(axis)
-        nv.validate_min(args, kwargs)
-        return self._minmax("min")
-
-    def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
-        """The maximum value of the RangeIndex"""
-        nv.validate_minmax_axis(axis)
-        nv.validate_max(args, kwargs)
-        return self._minmax("max")
-
-    def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
-        """
-        Returns the indices that would sort the index and its
-        underlying data.
-
-        Returns
-        -------
-        np.ndarray[np.intp]
-
-        See Also
-        --------
-        numpy.ndarray.argsort
-        """
-        ascending = kwargs.pop("ascending", True)  # EA compat
-        kwargs.pop("kind", None)  # e.g. "mergesort" is irrelevant
-        nv.validate_argsort(args, kwargs)
-
-        if self._range.step > 0:
-            result = np.arange(len(self), dtype=np.intp)
-        else:
-            result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)
-
-        if not ascending:
-            result = result[::-1]
-        return result
-
-    def factorize(
-        self,
-        sort: bool = False,
-        use_na_sentinel: bool = True,
-    ) -> tuple[npt.NDArray[np.intp], RangeIndex]:
-        codes = np.arange(len(self), dtype=np.intp)
-        uniques = self
-        if sort and self.step < 0:
-            codes = codes[::-1]
-            uniques = uniques[::-1]
-        return codes, uniques
-
-    def equals(self, other: object) -> bool:
-        """
-        Determines if two Index objects contain the same elements.
-        """
-        if isinstance(other, RangeIndex):
-            return self._range == other._range
-        return super().equals(other)
-
-    def sort_values(
-        self,
-        return_indexer: bool = False,
-        ascending: bool = True,
-        na_position: NaPosition = "last",
-        key: Callable | None = None,
-    ):
-        if key is not None:
-            return super().sort_values(
-                return_indexer=return_indexer,
-                ascending=ascending,
-                na_position=na_position,
-                key=key,
-            )
-        else:
-            sorted_index = self
-            inverse_indexer = False
-            if ascending:
-                if self.step < 0:
-                    sorted_index = self[::-1]
-                    inverse_indexer = True
-            else:
-                if self.step > 0:
-                    sorted_index = self[::-1]
-                    inverse_indexer = True
-
-        if return_indexer:
-            if inverse_indexer:
-                rng = range(len(self) - 1, -1, -1)
-            else:
-                rng = range(len(self))
-            return sorted_index, RangeIndex(rng)
-        else:
-            return sorted_index
-
-    # --------------------------------------------------------------------
-    # Set Operations
-
-    def _intersection(self, other: Index, sort: bool = False):
-        # caller is responsible for checking self and other are both non-empty
-
-        if not isinstance(other, RangeIndex):
-            return super()._intersection(other, sort=sort)
-
-        first = self._range[::-1] if self.step < 0 else self._range
-        second = other._range[::-1] if other.step < 0 else other._range
-
-        # check whether intervals intersect
-        # deals with in- and decreasing ranges
-        int_low = max(first.start, second.start)
-        int_high = min(first.stop, second.stop)
-        if int_high <= int_low:
-            return self._simple_new(_empty_range)
-
-        # Method hint: linear Diophantine equation
-        # solve intersection problem
-        # performance hint: for identical step sizes, could use
-        # cheaper alternative
-        gcd, s, _ = self._extended_gcd(first.step, second.step)
-
-        # check whether element sets intersect
-        if (first.start - second.start) % gcd:
-            return self._simple_new(_empty_range)
-
-        # calculate parameters for the RangeIndex describing the
-        # intersection disregarding the lower bounds
-        tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
-        new_step = first.step * second.step // gcd
-        new_range = range(tmp_start, int_high, new_step)
-        new_index = self._simple_new(new_range)
-
-        # adjust index to limiting interval
-        new_start = new_index._min_fitting_element(int_low)
-        new_range = range(new_start, new_index.stop, new_index.step)
-        new_index = self._simple_new(new_range)
-
-        if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
-            new_index = new_index[::-1]
-
-        if sort is None:
-            new_index = new_index.sort_values()
-
-        return new_index
-
-    def _min_fitting_element(self, lower_limit: int) -> int:
-        """Returns the smallest element greater than or equal to the limit"""
-        no_steps = -(-(lower_limit - self.start) // abs(self.step))
-        return self.start + abs(self.step) * no_steps
-
-    def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:
-        """
-        Extended Euclidean algorithms to solve Bezout's identity:
-           a*x + b*y = gcd(x, y)
-        Finds one particular solution for x, y: s, t
-        Returns: gcd, s, t
-        """
-        s, old_s = 0, 1
-        t, old_t = 1, 0
-        r, old_r = b, a
-        while r:
-            quotient = old_r // r
-            old_r, r = r, old_r - quotient * r
-            old_s, s = s, old_s - quotient * s
-            old_t, t = t, old_t - quotient * t
-        return old_r, old_s, old_t
-
-    def _range_in_self(self, other: range) -> bool:
-        """Check if other range is contained in self"""
-        # https://stackoverflow.com/a/32481015
-        if not other:
-            return True
-        if not self._range:
-            return False
-        if len(other) > 1 and other.step % self._range.step:
-            return False
-        return other.start in self._range and other[-1] in self._range
-
-    def _union(self, other: Index, sort: bool | None):
-        """
-        Form the union of two Index objects and sorts if possible
-
-        Parameters
-        ----------
-        other : Index or array-like
-
-        sort : bool or None, default None
-            Whether to sort (monotonically increasing) the resulting index.
-            ``sort=None|True`` returns a ``RangeIndex`` if possible or a sorted
-            ``Index`` with a int64 dtype if not.
-            ``sort=False`` can return a ``RangeIndex`` if self is monotonically
-            increasing and other is fully contained in self. Otherwise, returns
-            an unsorted ``Index`` with an int64 dtype.
-
-        Returns
-        -------
-        union : Index
-        """
-        if isinstance(other, RangeIndex):
-            if sort in (None, True) or (
-                sort is False and self.step > 0 and self._range_in_self(other._range)
-            ):
-                # GH 47557: Can still return a RangeIndex
-                # if other range in self and sort=False
-                start_s, step_s = self.start, self.step
-                end_s = self.start + self.step * (len(self) - 1)
-                start_o, step_o = other.start, other.step
-                end_o = other.start + other.step * (len(other) - 1)
-                if self.step < 0:
-                    start_s, step_s, end_s = end_s, -step_s, start_s
-                if other.step < 0:
-                    start_o, step_o, end_o = end_o, -step_o, start_o
-                if len(self) == 1 and len(other) == 1:
-                    step_s = step_o = abs(self.start - other.start)
-                elif len(self) == 1:
-                    step_s = step_o
-                elif len(other) == 1:
-                    step_o = step_s
-                start_r = min(start_s, start_o)
-                end_r = max(end_s, end_o)
-                if step_o == step_s:
-                    if (
-                        (start_s - start_o) % step_s == 0
-                        and (start_s - end_o) <= step_s
-                        and (start_o - end_s) <= step_s
-                    ):
-                        return type(self)(start_r, end_r + step_s, step_s)
-                    if (
-                        (step_s % 2 == 0)
-                        and (abs(start_s - start_o) == step_s / 2)
-                        and (abs(end_s - end_o) == step_s / 2)
-                    ):
-                        # e.g. range(0, 10, 2) and range(1, 11, 2)
-                        #  but not range(0, 20, 4) and range(1, 21, 4) GH#44019
-                        return type(self)(start_r, end_r + step_s / 2, step_s / 2)
-
-                elif step_o % step_s == 0:
-                    if (
-                        (start_o - start_s) % step_s == 0
-                        and (start_o + step_s >= start_s)
-                        and (end_o - step_s <= end_s)
-                    ):
-                        return type(self)(start_r, end_r + step_s, step_s)
-                elif step_s % step_o == 0:
-                    if (
-                        (start_s - start_o) % step_o == 0
-                        and (start_s + step_o >= start_o)
-                        and (end_s - step_o <= end_o)
-                    ):
-                        return type(self)(start_r, end_r + step_o, step_o)
-
-        return super()._union(other, sort=sort)
-
-    def _difference(self, other, sort=None):
-        # optimized set operation if we have another RangeIndex
-        self._validate_sort_keyword(sort)
-        self._assert_can_do_setop(other)
-        other, result_name = self._convert_can_do_setop(other)
-
-        if not isinstance(other, RangeIndex):
-            return super()._difference(other, sort=sort)
-
-        if sort is not False and self.step < 0:
-            return self[::-1]._difference(other)
-
-        res_name = ops.get_op_result_name(self, other)
-
-        first = self._range[::-1] if self.step < 0 else self._range
-        overlap = self.intersection(other)
-        if overlap.step < 0:
-            overlap = overlap[::-1]
-
-        if len(overlap) == 0:
-            return self.rename(name=res_name)
-        if len(overlap) == len(self):
-            return self[:0].rename(res_name)
-
-        # overlap.step will always be a multiple of self.step (see _intersection)
-
-        if len(overlap) == 1:
-            if overlap[0] == self[0]:
-                return self[1:]
-
-            elif overlap[0] == self[-1]:
-                return self[:-1]
-
-            elif len(self) == 3 and overlap[0] == self[1]:
-                return self[::2]
-
-            else:
-                return super()._difference(other, sort=sort)
-
-        elif len(overlap) == 2 and overlap[0] == first[0] and overlap[-1] == first[-1]:
-            # e.g. range(-8, 20, 7) and range(13, -9, -3)
-            return self[1:-1]
-
-        if overlap.step == first.step:
-            if overlap[0] == first.start:
-                # The difference is everything after the intersection
-                new_rng = range(overlap[-1] + first.step, first.stop, first.step)
-            elif overlap[-1] == first[-1]:
-                # The difference is everything before the intersection
-                new_rng = range(first.start, overlap[0], first.step)
-            elif overlap._range == first[1:-1]:
-                # e.g. range(4) and range(1, 3)
-                step = len(first) - 1
-                new_rng = first[::step]
-            else:
-                # The difference is not range-like
-                # e.g. range(1, 10, 1) and range(3, 7, 1)
-                return super()._difference(other, sort=sort)
-
-        else:
-            # We must have len(self) > 1, bc we ruled out above
-            #  len(overlap) == 0 and len(overlap) == len(self)
-            assert len(self) > 1
-
-            if overlap.step == first.step * 2:
-                if overlap[0] == first[0] and overlap[-1] in (first[-1], first[-2]):
-                    # e.g. range(1, 10, 1) and range(1, 10, 2)
-                    new_rng = first[1::2]
-
-                elif overlap[0] == first[1] and overlap[-1] in (first[-1], first[-2]):
-                    # e.g. range(1, 10, 1) and range(2, 10, 2)
-                    new_rng = first[::2]
-
-                else:
-                    # We can get here with  e.g. range(20) and range(0, 10, 2)
-                    return super()._difference(other, sort=sort)
-
-            else:
-                # e.g. range(10) and range(0, 10, 3)
-                return super()._difference(other, sort=sort)
-
-        new_index = type(self)._simple_new(new_rng, name=res_name)
-        if first is not self._range:
-            new_index = new_index[::-1]
-
-        return new_index
-
-    def symmetric_difference(
-        self, other, result_name: Hashable | None = None, sort=None
-    ):
-        if not isinstance(other, RangeIndex) or sort is not None:
-            return super().symmetric_difference(other, result_name, sort)
-
-        left = self.difference(other)
-        right = other.difference(self)
-        result = left.union(right)
-
-        if result_name is not None:
-            result = result.rename(result_name)
-        return result
-
-    # --------------------------------------------------------------------
-
-    # error: Return type "Index" of "delete" incompatible with return type
-    #  "RangeIndex" in supertype "Index"
-    def delete(self, loc) -> Index:  # type: ignore[override]
-        # In some cases we can retain RangeIndex, see also
-        #  DatetimeTimedeltaMixin._get_delete_Freq
-        if is_integer(loc):
-            if loc in (0, -len(self)):
-                return self[1:]
-            if loc in (-1, len(self) - 1):
-                return self[:-1]
-            if len(self) == 3 and loc in (1, -2):
-                return self[::2]
-
-        elif lib.is_list_like(loc):
-            slc = lib.maybe_indices_to_slice(np.asarray(loc, dtype=np.intp), len(self))
-
-            if isinstance(slc, slice):
-                # defer to RangeIndex._difference, which is optimized to return
-                #  a RangeIndex whenever possible
-                other = self[slc]
-                return self.difference(other, sort=False)
-
-        return super().delete(loc)
-
-    def insert(self, loc: int, item) -> Index:
-        if len(self) and (is_integer(item) or is_float(item)):
-            # We can retain RangeIndex is inserting at the beginning or end,
-            #  or right in the middle.
-            rng = self._range
-            if loc == 0 and item == self[0] - self.step:
-                new_rng = range(rng.start - rng.step, rng.stop, rng.step)
-                return type(self)._simple_new(new_rng, name=self._name)
-
-            elif loc == len(self) and item == self[-1] + self.step:
-                new_rng = range(rng.start, rng.stop + rng.step, rng.step)
-                return type(self)._simple_new(new_rng, name=self._name)
-
-            elif len(self) == 2 and item == self[0] + self.step / 2:
-                # e.g. inserting 1 into [0, 2]
-                step = int(self.step / 2)
-                new_rng = range(self.start, self.stop, step)
-                return type(self)._simple_new(new_rng, name=self._name)
-
-        return super().insert(loc, item)
-
-    def _concat(self, indexes: list[Index], name: Hashable) -> Index:
-        """
-        Overriding parent method for the case of all RangeIndex instances.
-
-        When all members of "indexes" are of type RangeIndex: result will be
-        RangeIndex if possible, Index with a int64 dtype otherwise. E.g.:
-        indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
-        indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Index([0,1,2,4,5], dtype='int64')
-        """
-        if not all(isinstance(x, RangeIndex) for x in indexes):
-            return super()._concat(indexes, name)
-
-        elif len(indexes) == 1:
-            return indexes[0]
-
-        rng_indexes = cast(list[RangeIndex], indexes)
-
-        start = step = next_ = None
-
-        # Filter the empty indexes
-        non_empty_indexes = [obj for obj in rng_indexes if len(obj)]
-
-        for obj in non_empty_indexes:
-            rng = obj._range
-
-            if start is None:
-                # This is set by the first non-empty index
-                start = rng.start
-                if step is None and len(rng) > 1:
-                    step = rng.step
-            elif step is None:
-                # First non-empty index had only one element
-                if rng.start == start:
-                    values = np.concatenate([x._values for x in rng_indexes])
-                    result = self._constructor(values)
-                    return result.rename(name)
-
-                step = rng.start - start
-
-            non_consecutive = (step != rng.step and len(rng) > 1) or (
-                next_ is not None and rng.start != next_
-            )
-            if non_consecutive:
-                result = self._constructor(
-                    np.concatenate([x._values for x in rng_indexes])
-                )
-                return result.rename(name)
-
-            if step is not None:
-                next_ = rng[-1] + step
-
-        if non_empty_indexes:
-            # Get the stop value from "next" or alternatively
-            # from the last non-empty index
-            stop = non_empty_indexes[-1].stop if next_ is None else next_
-            return RangeIndex(start, stop, step).rename(name)
-
-        # Here all "indexes" had 0 length, i.e. were empty.
-        # In this case return an empty range index.
-        return RangeIndex(0, 0).rename(name)
-
-    def __len__(self) -> int:
-        """
-        return the length of the RangeIndex
-        """
-        return len(self._range)
-
-    @property
-    def size(self) -> int:
-        return len(self)
-
-    def __getitem__(self, key):
-        """
-        Conserve RangeIndex type for scalar and slice keys.
-        """
-        if isinstance(key, slice):
-            return self._getitem_slice(key)
-        elif is_integer(key):
-            new_key = int(key)
-            try:
-                return self._range[new_key]
-            except IndexError as err:
-                raise IndexError(
-                    f"index {key} is out of bounds for axis 0 with size {len(self)}"
-                ) from err
-        elif is_scalar(key):
-            raise IndexError(
-                "only integers, slices (`:`), "
-                "ellipsis (`...`), numpy.newaxis (`None`) "
-                "and integer or boolean "
-                "arrays are valid indices"
-            )
-        return super().__getitem__(key)
-
-    def _getitem_slice(self, slobj: slice) -> Self:
-        """
-        Fastpath for __getitem__ when we know we have a slice.
-        """
-        res = self._range[slobj]
-        return type(self)._simple_new(res, name=self._name)
-
-    @unpack_zerodim_and_defer("__floordiv__")
-    def __floordiv__(self, other):
-        if is_integer(other) and other != 0:
-            if len(self) == 0 or self.start % other == 0 and self.step % other == 0:
-                start = self.start // other
-                step = self.step // other
-                stop = start + len(self) * step
-                new_range = range(start, stop, step or 1)
-                return self._simple_new(new_range, name=self._name)
-            if len(self) == 1:
-                start = self.start // other
-                new_range = range(start, start + 1, 1)
-                return self._simple_new(new_range, name=self._name)
-
-        return super().__floordiv__(other)
-
-    # --------------------------------------------------------------------
-    # Reductions
-
-    def all(self, *args, **kwargs) -> bool:
-        return 0 not in self._range
-
-    def any(self, *args, **kwargs) -> bool:
-        return any(self._range)
-
-    # --------------------------------------------------------------------
-
-    def _cmp_method(self, other, op):
-        if isinstance(other, RangeIndex) and self._range == other._range:
-            # Both are immutable so if ._range attr. are equal, shortcut is possible
-            return super()._cmp_method(self, op)
-        return super()._cmp_method(other, op)
-
-    def _arith_method(self, other, op):
-        """
-        Parameters
-        ----------
-        other : Any
-        op : callable that accepts 2 params
-            perform the binary op
-        """
-
-        if isinstance(other, ABCTimedeltaIndex):
-            # Defer to TimedeltaIndex implementation
-            return NotImplemented
-        elif isinstance(other, (timedelta, np.timedelta64)):
-            # GH#19333 is_integer evaluated True on timedelta64,
-            # so we need to catch these explicitly
-            return super()._arith_method(other, op)
-        elif lib.is_np_dtype(getattr(other, "dtype", None), "m"):
-            # Must be an np.ndarray; GH#22390
-            return super()._arith_method(other, op)
-
-        if op in [
-            operator.pow,
-            ops.rpow,
-            operator.mod,
-            ops.rmod,
-            operator.floordiv,
-            ops.rfloordiv,
-            divmod,
-            ops.rdivmod,
-        ]:
-            return super()._arith_method(other, op)
-
-        step: Callable | None = None
-        if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:
-            step = op
-
-        # TODO: if other is a RangeIndex we may have more efficient options
-        right = extract_array(other, extract_numpy=True, extract_range=True)
-        left = self
-
-        try:
-            # apply if we have an override
-            if step:
-                with np.errstate(all="ignore"):
-                    rstep = step(left.step, right)
-
-                # we don't have a representable op
-                # so return a base index
-                if not is_integer(rstep) or not rstep:
-                    raise ValueError
-
-            # GH#53255
-            else:
-                rstep = -left.step if op == ops.rsub else left.step
-
-            with np.errstate(all="ignore"):
-                rstart = op(left.start, right)
-                rstop = op(left.stop, right)
-
-            res_name = ops.get_op_result_name(self, other)
-            result = type(self)(rstart, rstop, rstep, name=res_name)
-
-            # for compat with numpy / Index with int64 dtype
-            # even if we can represent as a RangeIndex, return
-            # as a float64 Index if we have float-like descriptors
-            if not all(is_integer(x) for x in [rstart, rstop, rstep]):
-                result = result.astype("float64")
-
-            return result
-
-        except (ValueError, TypeError, ZeroDivisionError):
-            # test_arithmetic_explicit_conversions
-            return super()._arith_method(other, op)
-
-    def take(
-        self,
-        indices,
-        axis: Axis = 0,
-        allow_fill: bool = True,
-        fill_value=None,
-        **kwargs,
-    ):
-        if kwargs:
-            nv.validate_take((), kwargs)
-        if is_scalar(indices):
-            raise TypeError("Expected indices to be array-like")
-        indices = ensure_platform_int(indices)
-
-        # raise an exception if allow_fill is True and fill_value is not None
-        self._maybe_disallow_fill(allow_fill, fill_value, indices)
-
-        if len(indices) == 0:
-            taken = np.array([], dtype=self.dtype)
-        else:
-            ind_max = indices.max()
-            if ind_max >= len(self):
-                raise IndexError(
-                    f"index {ind_max} is out of bounds for axis 0 with size {len(self)}"
-                )
-            ind_min = indices.min()
-            if ind_min < -len(self):
-                raise IndexError(
-                    f"index {ind_min} is out of bounds for axis 0 with size {len(self)}"
-                )
-            taken = indices.astype(self.dtype, casting="safe")
-            if ind_min < 0:
-                taken %= len(self)
-            if self.step != 1:
-                taken *= self.step
-            if self.start != 0:
-                taken += self.start
-
-        # _constructor so RangeIndex-> Index with an int64 dtype
-        return self._constructor._simple_new(taken, name=self.name)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_cov_corr.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_cov_corr.py
deleted file mode 100644
index b2d5d1ee090ac5ba58a24741ca349a422af52315..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_cov_corr.py
+++ /dev/null
@@ -1,176 +0,0 @@
-import math
-
-import numpy as np
-import pytest
-
-import pandas as pd
-from pandas import (
-    Series,
-    isna,
-)
-import pandas._testing as tm
-
-
-class TestSeriesCov:
-    def test_cov(self, datetime_series):
-        # full overlap
-        tm.assert_almost_equal(
-            datetime_series.cov(datetime_series), datetime_series.std() ** 2
-        )
-
-        # partial overlap
-        tm.assert_almost_equal(
-            datetime_series[:15].cov(datetime_series[5:]),
-            datetime_series[5:15].std() ** 2,
-        )
-
-        # No overlap
-        assert np.isnan(datetime_series[::2].cov(datetime_series[1::2]))
-
-        # all NA
-        cp = datetime_series[:10].copy()
-        cp[:] = np.nan
-        assert isna(cp.cov(cp))
-
-        # min_periods
-        assert isna(datetime_series[:15].cov(datetime_series[5:], min_periods=12))
-
-        ts1 = datetime_series[:15].reindex(datetime_series.index)
-        ts2 = datetime_series[5:].reindex(datetime_series.index)
-        assert isna(ts1.cov(ts2, min_periods=12))
-
-    @pytest.mark.parametrize("test_ddof", [None, 0, 1, 2, 3])
-    @pytest.mark.parametrize("dtype", ["float64", "Float64"])
-    def test_cov_ddof(self, test_ddof, dtype):
-        # GH#34611
-        np_array1 = np.random.default_rng(2).random(10)
-        np_array2 = np.random.default_rng(2).random(10)
-
-        s1 = Series(np_array1, dtype=dtype)
-        s2 = Series(np_array2, dtype=dtype)
-
-        result = s1.cov(s2, ddof=test_ddof)
-        expected = np.cov(np_array1, np_array2, ddof=test_ddof)[0][1]
-        assert math.isclose(expected, result)
-
-
-class TestSeriesCorr:
-    @pytest.mark.parametrize("dtype", ["float64", "Float64"])
-    def test_corr(self, datetime_series, dtype):
-        stats = pytest.importorskip("scipy.stats")
-
-        datetime_series = datetime_series.astype(dtype)
-
-        # full overlap
-        tm.assert_almost_equal(datetime_series.corr(datetime_series), 1)
-
-        # partial overlap
-        tm.assert_almost_equal(datetime_series[:15].corr(datetime_series[5:]), 1)
-
-        assert isna(datetime_series[:15].corr(datetime_series[5:], min_periods=12))
-
-        ts1 = datetime_series[:15].reindex(datetime_series.index)
-        ts2 = datetime_series[5:].reindex(datetime_series.index)
-        assert isna(ts1.corr(ts2, min_periods=12))
-
-        # No overlap
-        assert np.isnan(datetime_series[::2].corr(datetime_series[1::2]))
-
-        # all NA
-        cp = datetime_series[:10].copy()
-        cp[:] = np.nan
-        assert isna(cp.corr(cp))
-
-        A = tm.makeTimeSeries()
-        B = tm.makeTimeSeries()
-        result = A.corr(B)
-        expected, _ = stats.pearsonr(A, B)
-        tm.assert_almost_equal(result, expected)
-
-    def test_corr_rank(self):
-        stats = pytest.importorskip("scipy.stats")
-
-        # kendall and spearman
-        A = tm.makeTimeSeries()
-        B = tm.makeTimeSeries()
-        A[-5:] = A[:5]
-        result = A.corr(B, method="kendall")
-        expected = stats.kendalltau(A, B)[0]
-        tm.assert_almost_equal(result, expected)
-
-        result = A.corr(B, method="spearman")
-        expected = stats.spearmanr(A, B)[0]
-        tm.assert_almost_equal(result, expected)
-
-        # results from R
-        A = Series(
-            [
-                -0.89926396,
-                0.94209606,
-                -1.03289164,
-                -0.95445587,
-                0.76910310,
-                -0.06430576,
-                -2.09704447,
-                0.40660407,
-                -0.89926396,
-                0.94209606,
-            ]
-        )
-        B = Series(
-            [
-                -1.01270225,
-                -0.62210117,
-                -1.56895827,
-                0.59592943,
-                -0.01680292,
-                1.17258718,
-                -1.06009347,
-                -0.10222060,
-                -0.89076239,
-                0.89372375,
-            ]
-        )
-        kexp = 0.4319297
-        sexp = 0.5853767
-        tm.assert_almost_equal(A.corr(B, method="kendall"), kexp)
-        tm.assert_almost_equal(A.corr(B, method="spearman"), sexp)
-
-    def test_corr_invalid_method(self):
-        # GH PR #22298
-        s1 = Series(np.random.default_rng(2).standard_normal(10))
-        s2 = Series(np.random.default_rng(2).standard_normal(10))
-        msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, "
-        with pytest.raises(ValueError, match=msg):
-            s1.corr(s2, method="____")
-
-    def test_corr_callable_method(self, datetime_series):
-        # simple correlation example
-        # returns 1 if exact equality, 0 otherwise
-        my_corr = lambda a, b: 1.0 if (a == b).all() else 0.0
-
-        # simple example
-        s1 = Series([1, 2, 3, 4, 5])
-        s2 = Series([5, 4, 3, 2, 1])
-        expected = 0
-        tm.assert_almost_equal(s1.corr(s2, method=my_corr), expected)
-
-        # full overlap
-        tm.assert_almost_equal(
-            datetime_series.corr(datetime_series, method=my_corr), 1.0
-        )
-
-        # partial overlap
-        tm.assert_almost_equal(
-            datetime_series[:15].corr(datetime_series[5:], method=my_corr), 1.0
-        )
-
-        # No overlap
-        assert np.isnan(
-            datetime_series[::2].corr(datetime_series[1::2], method=my_corr)
-        )
-
-        # dataframe example
-        df = pd.DataFrame([s1, s2])
-        expected = pd.DataFrame([{0: 1.0, 1: 0}, {0: 0, 1: 1.0}])
-        tm.assert_almost_equal(df.transpose().corr(method=my_corr), expected)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_pct_change.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_pct_change.py
deleted file mode 100644
index 9727ef3d5c27c2a4f695df5fffd30323766c93e5..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_pct_change.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas import (
-    Series,
-    date_range,
-)
-import pandas._testing as tm
-
-
-class TestSeriesPctChange:
-    def test_pct_change(self, datetime_series):
-        msg = (
-            "The 'fill_method' keyword being not None and the 'limit' keyword in "
-            "Series.pct_change are deprecated"
-        )
-
-        rs = datetime_series.pct_change(fill_method=None)
-        tm.assert_series_equal(rs, datetime_series / datetime_series.shift(1) - 1)
-
-        rs = datetime_series.pct_change(2)
-        filled = datetime_series.ffill()
-        tm.assert_series_equal(rs, filled / filled.shift(2) - 1)
-
-        with tm.assert_produces_warning(FutureWarning, match=msg):
-            rs = datetime_series.pct_change(fill_method="bfill", limit=1)
-        filled = datetime_series.bfill(limit=1)
-        tm.assert_series_equal(rs, filled / filled.shift(1) - 1)
-
-        rs = datetime_series.pct_change(freq="5D")
-        filled = datetime_series.ffill()
-        tm.assert_series_equal(
-            rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled)
-        )
-
-    def test_pct_change_with_duplicate_axis(self):
-        # GH#28664
-        common_idx = date_range("2019-11-14", periods=5, freq="D")
-        result = Series(range(5), common_idx).pct_change(freq="B")
-
-        # the reason that the expected should be like this is documented at PR 28681
-        expected = Series([np.nan, np.inf, np.nan, np.nan, 3.0], common_idx)
-
-        tm.assert_series_equal(result, expected)
-
-    def test_pct_change_shift_over_nas(self):
-        s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
-
-        msg = "The default fill_method='pad' in Series.pct_change is deprecated"
-        with tm.assert_produces_warning(FutureWarning, match=msg):
-            chg = s.pct_change()
-
-        expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
-        tm.assert_series_equal(chg, expected)
-
-    @pytest.mark.parametrize(
-        "freq, periods, fill_method, limit",
-        [
-            ("5B", 5, None, None),
-            ("3B", 3, None, None),
-            ("3B", 3, "bfill", None),
-            ("7B", 7, "pad", 1),
-            ("7B", 7, "bfill", 3),
-            ("14B", 14, None, None),
-        ],
-    )
-    def test_pct_change_periods_freq(
-        self, freq, periods, fill_method, limit, datetime_series
-    ):
-        msg = (
-            "The 'fill_method' keyword being not None and the 'limit' keyword in "
-            "Series.pct_change are deprecated"
-        )
-
-        # GH#7292
-        with tm.assert_produces_warning(FutureWarning, match=msg):
-            rs_freq = datetime_series.pct_change(
-                freq=freq, fill_method=fill_method, limit=limit
-            )
-        with tm.assert_produces_warning(FutureWarning, match=msg):
-            rs_periods = datetime_series.pct_change(
-                periods, fill_method=fill_method, limit=limit
-            )
-        tm.assert_series_equal(rs_freq, rs_periods)
-
-        empty_ts = Series(index=datetime_series.index, dtype=object)
-        with tm.assert_produces_warning(FutureWarning, match=msg):
-            rs_freq = empty_ts.pct_change(
-                freq=freq, fill_method=fill_method, limit=limit
-            )
-        with tm.assert_produces_warning(FutureWarning, match=msg):
-            rs_periods = empty_ts.pct_change(
-                periods, fill_method=fill_method, limit=limit
-            )
-        tm.assert_series_equal(rs_freq, rs_periods)
-
-
-@pytest.mark.parametrize("fill_method", ["pad", "ffill", None])
-def test_pct_change_with_duplicated_indices(fill_method):
-    # GH30463
-    s = Series([np.nan, 1, 2, 3, 9, 18], index=["a", "b"] * 3)
-
-    warn = None if fill_method is None else FutureWarning
-    msg = (
-        "The 'fill_method' keyword being not None and the 'limit' keyword in "
-        "Series.pct_change are deprecated"
-    )
-    with tm.assert_produces_warning(warn, match=msg):
-        result = s.pct_change(fill_method=fill_method)
-
-    expected = Series([np.nan, np.nan, 1.0, 0.5, 2.0, 1.0], index=["a", "b"] * 3)
-    tm.assert_series_equal(result, expected)
-
-
-def test_pct_change_no_warning_na_beginning():
-    # GH#54981
-    ser = Series([None, None, 1, 2, 3])
-    result = ser.pct_change()
-    expected = Series([np.nan, np.nan, np.nan, 1, 0.5])
-    tm.assert_series_equal(result, expected)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/locations/_distutils.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/locations/_distutils.py
deleted file mode 100644
index 2ec79e65bea5df7f379451a50b7cc9fe6ce0832f..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/locations/_distutils.py
+++ /dev/null
@@ -1,169 +0,0 @@
-"""Locations where we look for configs, install stuff, etc"""
-
-# The following comment should be removed at some point in the future.
-# mypy: strict-optional=False
-
-import logging
-import os
-import sys
-from distutils.cmd import Command as DistutilsCommand
-from distutils.command.install import SCHEME_KEYS
-from distutils.command.install import install as distutils_install_command
-from distutils.sysconfig import get_python_lib
-from typing import Dict, List, Optional, Tuple, Union, cast
-
-from pip._internal.models.scheme import Scheme
-from pip._internal.utils.compat import WINDOWS
-from pip._internal.utils.virtualenv import running_under_virtualenv
-
-from .base import get_major_minor_version
-
-logger = logging.getLogger(__name__)
-
-
-def distutils_scheme(
-    dist_name: str,
-    user: bool = False,
-    home: str = None,
-    root: str = None,
-    isolated: bool = False,
-    prefix: str = None,
-    *,
-    ignore_config_files: bool = False,
-) -> Dict[str, str]:
-    """
-    Return a distutils install scheme
-    """
-    from distutils.dist import Distribution
-
-    dist_args: Dict[str, Union[str, List[str]]] = {"name": dist_name}
-    if isolated:
-        dist_args["script_args"] = ["--no-user-cfg"]
-
-    d = Distribution(dist_args)
-    if not ignore_config_files:
-        try:
-            d.parse_config_files()
-        except UnicodeDecodeError:
-            # Typeshed does not include find_config_files() for some reason.
-            paths = d.find_config_files()  # type: ignore
-            logger.warning(
-                "Ignore distutils configs in %s due to encoding errors.",
-                ", ".join(os.path.basename(p) for p in paths),
-            )
-    obj: Optional[DistutilsCommand] = None
-    obj = d.get_command_obj("install", create=True)
-    assert obj is not None
-    i = cast(distutils_install_command, obj)
-    # NOTE: setting user or home has the side-effect of creating the home dir
-    # or user base for installations during finalize_options()
-    # ideally, we'd prefer a scheme class that has no side-effects.
-    assert not (user and prefix), f"user={user} prefix={prefix}"
-    assert not (home and prefix), f"home={home} prefix={prefix}"
-    i.user = user or i.user
-    if user or home:
-        i.prefix = ""
-    i.prefix = prefix or i.prefix
-    i.home = home or i.home
-    i.root = root or i.root
-    i.finalize_options()
-
-    scheme = {}
-    for key in SCHEME_KEYS:
-        scheme[key] = getattr(i, "install_" + key)
-
-    # install_lib specified in setup.cfg should install *everything*
-    # into there (i.e. it takes precedence over both purelib and
-    # platlib).  Note, i.install_lib is *always* set after
-    # finalize_options(); we only want to override here if the user
-    # has explicitly requested it hence going back to the config
-    if "install_lib" in d.get_option_dict("install"):
-        scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
-
-    if running_under_virtualenv():
-        if home:
-            prefix = home
-        elif user:
-            prefix = i.install_userbase  # type: ignore
-        else:
-            prefix = i.prefix
-        scheme["headers"] = os.path.join(
-            prefix,
-            "include",
-            "site",
-            f"python{get_major_minor_version()}",
-            dist_name,
-        )
-
-        if root is not None:
-            path_no_drive = os.path.splitdrive(os.path.abspath(scheme["headers"]))[1]
-            scheme["headers"] = os.path.join(root, path_no_drive[1:])
-
-    return scheme
-
-
-def get_scheme(
-    dist_name: str,
-    user: bool = False,
-    home: Optional[str] = None,
-    root: Optional[str] = None,
-    isolated: bool = False,
-    prefix: Optional[str] = None,
-) -> Scheme:
-    """
-    Get the "scheme" corresponding to the input parameters. The distutils
-    documentation provides the context for the available schemes:
-    https://docs.python.org/3/install/index.html#alternate-installation
-
-    :param dist_name: the name of the package to retrieve the scheme for, used
-        in the headers scheme path
-    :param user: indicates to use the "user" scheme
-    :param home: indicates to use the "home" scheme and provides the base
-        directory for the same
-    :param root: root under which other directories are re-based
-    :param isolated: equivalent to --no-user-cfg, i.e. do not consider
-        ~/.pydistutils.cfg (posix) or ~/pydistutils.cfg (non-posix) for
-        scheme paths
-    :param prefix: indicates to use the "prefix" scheme and provides the
-        base directory for the same
-    """
-    scheme = distutils_scheme(dist_name, user, home, root, isolated, prefix)
-    return Scheme(
-        platlib=scheme["platlib"],
-        purelib=scheme["purelib"],
-        headers=scheme["headers"],
-        scripts=scheme["scripts"],
-        data=scheme["data"],
-    )
-
-
-def get_bin_prefix() -> str:
-    # XXX: In old virtualenv versions, sys.prefix can contain '..' components,
-    # so we need to call normpath to eliminate them.
-    prefix = os.path.normpath(sys.prefix)
-    if WINDOWS:
-        bin_py = os.path.join(prefix, "Scripts")
-        # buildout uses 'bin' on Windows too?
-        if not os.path.exists(bin_py):
-            bin_py = os.path.join(prefix, "bin")
-        return bin_py
-    # Forcing to use /usr/local/bin for standard macOS framework installs
-    # Also log to ~/Library/Logs/ for use with the Console.app log viewer
-    if sys.platform[:6] == "darwin" and prefix[:16] == "/System/Library/":
-        return "/usr/local/bin"
-    return os.path.join(prefix, "bin")
-
-
-def get_purelib() -> str:
-    return get_python_lib(plat_specific=False)
-
-
-def get_platlib() -> str:
-    return get_python_lib(plat_specific=True)
-
-
-def get_prefixed_libs(prefix: str) -> Tuple[str, str]:
-    return (
-        get_python_lib(plat_specific=False, prefix=prefix),
-        get_python_lib(plat_specific=True, prefix=prefix),
-    )
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/floscript.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/floscript.py
deleted file mode 100644
index 6cc2971a246ee382ef2f494df34ceb68676695d2..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/floscript.py
+++ /dev/null
@@ -1,82 +0,0 @@
-"""
-    pygments.lexers.floscript
-    ~~~~~~~~~~~~~~~~~~~~~~~~~
-
-    Lexer for FloScript
-
-    :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
-    :license: BSD, see LICENSE for details.
-"""
-
-from pygments.lexer import RegexLexer, include, bygroups
-from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
-    Number, Punctuation, Whitespace
-
-__all__ = ['FloScriptLexer']
-
-
-class FloScriptLexer(RegexLexer):
-    """
-    For FloScript configuration language source code.
-
-    .. versionadded:: 2.4
-    """
-
-    name = 'FloScript'
-    url = 'https://github.com/ioflo/ioflo'
-    aliases = ['floscript', 'flo']
-    filenames = ['*.flo']
-
-    def innerstring_rules(ttype):
-        return [
-            # the old style '%s' % (...) string formatting
-            (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
-             '[hlL]?[E-GXc-giorsux%]', String.Interpol),
-            # backslashes, quotes and formatting signs must be parsed one at a time
-            (r'[^\\\'"%\n]+', ttype),
-            (r'[\'"\\]', ttype),
-            # unhandled string formatting sign
-            (r'%', ttype),
-            # newlines are an error (use "nl" state)
-        ]
-
-    tokens = {
-        'root': [
-            (r'\s+', Whitespace),
-
-            (r'[]{}:(),;[]', Punctuation),
-            (r'(\\)(\n)', bygroups(Text, Whitespace)),
-            (r'\\', Text),
-            (r'(to|by|with|from|per|for|cum|qua|via|as|at|in|of|on|re|is|if|be|into|'
-             r'and|not)\b', Operator.Word),
-            (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
-            (r'(load|init|server|logger|log|loggee|first|over|under|next|done|timeout|'
-             r'repeat|native|benter|enter|recur|exit|precur|renter|rexit|print|put|inc|'
-             r'copy|set|aux|rear|raze|go|let|do|bid|ready|start|stop|run|abort|use|flo|'
-             r'give|take)\b', Name.Builtin),
-            (r'(frame|framer|house)\b', Keyword),
-            ('"', String, 'string'),
-
-            include('name'),
-            include('numbers'),
-            (r'#.+$', Comment.Single),
-        ],
-        'string': [
-            ('[^"]+', String),
-            ('"', String, '#pop'),
-        ],
-        'numbers': [
-            (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
-            (r'\d+[eE][+-]?[0-9]+j?', Number.Float),
-            (r'0[0-7]+j?', Number.Oct),
-            (r'0[bB][01]+', Number.Bin),
-            (r'0[xX][a-fA-F0-9]+', Number.Hex),
-            (r'\d+L', Number.Integer.Long),
-            (r'\d+j?', Number.Integer)
-        ],
-
-        'name': [
-            (r'@[\w.]+', Name.Decorator),
-            (r'[a-zA-Z_]\w*', Name),
-        ],
-    }
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pylab.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pylab.py
deleted file mode 100644
index f9d135d36e2b140e65e00efaddbc606a53f4383f..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pylab.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from matplotlib.pylab import *
-import matplotlib.pylab
-__doc__ = matplotlib.pylab.__doc__
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/util.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/util.py
deleted file mode 100644
index 64f06dd4bce4d634946bb836d56eff78196e0b6d..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/util.py
+++ /dev/null
@@ -1,535 +0,0 @@
-"""distutils.util
-
-Miscellaneous utility functions -- anything that doesn't fit into
-one of the other *util.py modules.
-"""
-
-import os
-import re
-import importlib.util
-import string
-import sys
-from distutils.errors import DistutilsPlatformError
-from distutils.dep_util import newer
-from distutils.spawn import spawn
-from distutils import log
-from distutils.errors import DistutilsByteCompileError
-from .py35compat import _optim_args_from_interpreter_flags
-
-
-def get_host_platform():
-    """Return a string that identifies the current platform.  This is used mainly to
-    distinguish platform-specific build directories and platform-specific built
-    distributions.  Typically includes the OS name and version and the
-    architecture (as supplied by 'os.uname()'), although the exact information
-    included depends on the OS; eg. on Linux, the kernel version isn't
-    particularly important.
-
-    Examples of returned values:
-       linux-i586
-       linux-alpha (?)
-       solaris-2.6-sun4u
-
-    Windows will return one of:
-       win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
-       win32 (all others - specifically, sys.platform is returned)
-
-    For other non-POSIX platforms, currently just returns 'sys.platform'.
-
-    """
-    if os.name == 'nt':
-        if 'amd64' in sys.version.lower():
-            return 'win-amd64'
-        if '(arm)' in sys.version.lower():
-            return 'win-arm32'
-        if '(arm64)' in sys.version.lower():
-            return 'win-arm64'
-        return sys.platform
-
-    # Set for cross builds explicitly
-    if "_PYTHON_HOST_PLATFORM" in os.environ:
-        return os.environ["_PYTHON_HOST_PLATFORM"]
-
-    if os.name != "posix" or not hasattr(os, 'uname'):
-        # XXX what about the architecture? NT is Intel or Alpha,
-        # Mac OS is M68k or PPC, etc.
-        return sys.platform
-
-    # Try to distinguish various flavours of Unix
-
-    (osname, host, release, version, machine) = os.uname()
-
-    # Convert the OS name to lowercase, remove '/' characters, and translate
-    # spaces (for "Power Macintosh")
-    osname = osname.lower().replace('/', '')
-    machine = machine.replace(' ', '_')
-    machine = machine.replace('/', '-')
-
-    if osname[:5] == "linux":
-        # At least on Linux/Intel, 'machine' is the processor --
-        # i386, etc.
-        # XXX what about Alpha, SPARC, etc?
-        return  "%s-%s" % (osname, machine)
-    elif osname[:5] == "sunos":
-        if release[0] >= "5":           # SunOS 5 == Solaris 2
-            osname = "solaris"
-            release = "%d.%s" % (int(release[0]) - 3, release[2:])
-            # We can't use "platform.architecture()[0]" because a
-            # bootstrap problem. We use a dict to get an error
-            # if some suspicious happens.
-            bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
-            machine += ".%s" % bitness[sys.maxsize]
-        # fall through to standard osname-release-machine representation
-    elif osname[:3] == "aix":
-        from .py38compat import aix_platform
-        return aix_platform(osname, version, release)
-    elif osname[:6] == "cygwin":
-        osname = "cygwin"
-        rel_re = re.compile (r'[\d.]+', re.ASCII)
-        m = rel_re.match(release)
-        if m:
-            release = m.group()
-    elif osname[:6] == "darwin":
-        import _osx_support, distutils.sysconfig
-        osname, release, machine = _osx_support.get_platform_osx(
-                                        distutils.sysconfig.get_config_vars(),
-                                        osname, release, machine)
-
-    return "%s-%s-%s" % (osname, release, machine)
-
-def get_platform():
-    if os.name == 'nt':
-        TARGET_TO_PLAT = {
-            'x86' : 'win32',
-            'x64' : 'win-amd64',
-            'arm' : 'win-arm32',
-            'arm64': 'win-arm64',
-        }
-        return TARGET_TO_PLAT.get(os.environ.get('VSCMD_ARG_TGT_ARCH')) or get_host_platform()
-    else:
-        return get_host_platform()
-
-
-if sys.platform == 'darwin':
-    _syscfg_macosx_ver = None # cache the version pulled from sysconfig
-MACOSX_VERSION_VAR = 'MACOSX_DEPLOYMENT_TARGET'
-
-def _clear_cached_macosx_ver():
-    """For testing only. Do not call."""
-    global _syscfg_macosx_ver
-    _syscfg_macosx_ver = None
-
-def get_macosx_target_ver_from_syscfg():
-    """Get the version of macOS latched in the Python interpreter configuration.
-    Returns the version as a string or None if can't obtain one. Cached."""
-    global _syscfg_macosx_ver
-    if _syscfg_macosx_ver is None:
-        from distutils import sysconfig
-        ver = sysconfig.get_config_var(MACOSX_VERSION_VAR) or ''
-        if ver:
-            _syscfg_macosx_ver = ver
-    return _syscfg_macosx_ver
-
-def get_macosx_target_ver():
-    """Return the version of macOS for which we are building.
-
-    The target version defaults to the version in sysconfig latched at time
-    the Python interpreter was built, unless overriden by an environment
-    variable. If neither source has a value, then None is returned"""
-
-    syscfg_ver = get_macosx_target_ver_from_syscfg()
-    env_ver = os.environ.get(MACOSX_VERSION_VAR)
-
-    if env_ver:
-        # Validate overriden version against sysconfig version, if have both.
-        # Ensure that the deployment target of the build process is not less
-        # than 10.3 if the interpreter was built for 10.3 or later.  This
-        # ensures extension modules are built with correct compatibility
-        # values, specifically LDSHARED which can use
-        # '-undefined dynamic_lookup' which only works on >= 10.3.
-        if syscfg_ver and split_version(syscfg_ver) >= [10, 3] and \
-            split_version(env_ver) < [10, 3]:
-            my_msg = ('$' + MACOSX_VERSION_VAR + ' mismatch: '
-                      'now "%s" but "%s" during configure; '
-                      'must use 10.3 or later'
-                      % (env_ver, syscfg_ver))
-            raise DistutilsPlatformError(my_msg)
-        return env_ver
-    return syscfg_ver
-
-
-def split_version(s):
-    """Convert a dot-separated string into a list of numbers for comparisons"""
-    return [int(n) for n in s.split('.')]
-
-
-def convert_path (pathname):
-    """Return 'pathname' as a name that will work on the native filesystem,
-    i.e. split it on '/' and put it back together again using the current
-    directory separator.  Needed because filenames in the setup script are
-    always supplied in Unix style, and have to be converted to the local
-    convention before we can actually use them in the filesystem.  Raises
-    ValueError on non-Unix-ish systems if 'pathname' either starts or
-    ends with a slash.
-    """
-    if os.sep == '/':
-        return pathname
-    if not pathname:
-        return pathname
-    if pathname[0] == '/':
-        raise ValueError("path '%s' cannot be absolute" % pathname)
-    if pathname[-1] == '/':
-        raise ValueError("path '%s' cannot end with '/'" % pathname)
-
-    paths = pathname.split('/')
-    while '.' in paths:
-        paths.remove('.')
-    if not paths:
-        return os.curdir
-    return os.path.join(*paths)
-
-# convert_path ()
-
-
-def change_root (new_root, pathname):
-    """Return 'pathname' with 'new_root' prepended.  If 'pathname' is
-    relative, this is equivalent to "os.path.join(new_root,pathname)".
-    Otherwise, it requires making 'pathname' relative and then joining the
-    two, which is tricky on DOS/Windows and Mac OS.
-    """
-    if os.name == 'posix':
-        if not os.path.isabs(pathname):
-            return os.path.join(new_root, pathname)
-        else:
-            return os.path.join(new_root, pathname[1:])
-
-    elif os.name == 'nt':
-        (drive, path) = os.path.splitdrive(pathname)
-        if path[0] == '\\':
-            path = path[1:]
-        return os.path.join(new_root, path)
-
-    else:
-        raise DistutilsPlatformError("nothing known about platform '%s'" % os.name)
-
-
-_environ_checked = 0
-def check_environ ():
-    """Ensure that 'os.environ' has all the environment variables we
-    guarantee that users can use in config files, command-line options,
-    etc.  Currently this includes:
-      HOME - user's home directory (Unix only)
-      PLAT - description of the current platform, including hardware
-             and OS (see 'get_platform()')
-    """
-    global _environ_checked
-    if _environ_checked:
-        return
-
-    if os.name == 'posix' and 'HOME' not in os.environ:
-        try:
-            import pwd
-            os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
-        except (ImportError, KeyError):
-            # bpo-10496: if the current user identifier doesn't exist in the
-            # password database, do nothing
-            pass
-
-    if 'PLAT' not in os.environ:
-        os.environ['PLAT'] = get_platform()
-
-    _environ_checked = 1
-
-
-def subst_vars (s, local_vars):
-    """Perform shell/Perl-style variable substitution on 'string'.  Every
-    occurrence of '$' followed by a name is considered a variable, and
-    variable is substituted by the value found in the 'local_vars'
-    dictionary, or in 'os.environ' if it's not in 'local_vars'.
-    'os.environ' is first checked/augmented to guarantee that it contains
-    certain values: see 'check_environ()'.  Raise ValueError for any
-    variables not found in either 'local_vars' or 'os.environ'.
-    """
-    check_environ()
-    def _subst (match, local_vars=local_vars):
-        var_name = match.group(1)
-        if var_name in local_vars:
-            return str(local_vars[var_name])
-        else:
-            return os.environ[var_name]
-
-    try:
-        return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
-    except KeyError as var:
-        raise ValueError("invalid variable '$%s'" % var)
-
-# subst_vars ()
-
-
-def grok_environment_error (exc, prefix="error: "):
-    # Function kept for backward compatibility.
-    # Used to try clever things with EnvironmentErrors,
-    # but nowadays str(exception) produces good messages.
-    return prefix + str(exc)
-
-
-# Needed by 'split_quoted()'
-_wordchars_re = _squote_re = _dquote_re = None
-def _init_regex():
-    global _wordchars_re, _squote_re, _dquote_re
-    _wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
-    _squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
-    _dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
-
-def split_quoted (s):
-    """Split a string up according to Unix shell-like rules for quotes and
-    backslashes.  In short: words are delimited by spaces, as long as those
-    spaces are not escaped by a backslash, or inside a quoted string.
-    Single and double quotes are equivalent, and the quote characters can
-    be backslash-escaped.  The backslash is stripped from any two-character
-    escape sequence, leaving only the escaped character.  The quote
-    characters are stripped from any quoted string.  Returns a list of
-    words.
-    """
-
-    # This is a nice algorithm for splitting up a single string, since it
-    # doesn't require character-by-character examination.  It was a little
-    # bit of a brain-bender to get it working right, though...
-    if _wordchars_re is None: _init_regex()
-
-    s = s.strip()
-    words = []
-    pos = 0
-
-    while s:
-        m = _wordchars_re.match(s, pos)
-        end = m.end()
-        if end == len(s):
-            words.append(s[:end])
-            break
-
-        if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
-            words.append(s[:end])       # we definitely have a word delimiter
-            s = s[end:].lstrip()
-            pos = 0
-
-        elif s[end] == '\\':            # preserve whatever is being escaped;
-                                        # will become part of the current word
-            s = s[:end] + s[end+1:]
-            pos = end+1
-
-        else:
-            if s[end] == "'":           # slurp singly-quoted string
-                m = _squote_re.match(s, end)
-            elif s[end] == '"':         # slurp doubly-quoted string
-                m = _dquote_re.match(s, end)
-            else:
-                raise RuntimeError("this can't happen (bad char '%c')" % s[end])
-
-            if m is None:
-                raise ValueError("bad string (mismatched %s quotes?)" % s[end])
-
-            (beg, end) = m.span()
-            s = s[:beg] + s[beg+1:end-1] + s[end:]
-            pos = m.end() - 2
-
-        if pos >= len(s):
-            words.append(s)
-            break
-
-    return words
-
-# split_quoted ()
-
-
-def execute (func, args, msg=None, verbose=0, dry_run=0):
-    """Perform some action that affects the outside world (eg.  by
-    writing to the filesystem).  Such actions are special because they
-    are disabled by the 'dry_run' flag.  This method takes care of all
-    that bureaucracy for you; all you have to do is supply the
-    function to call and an argument tuple for it (to embody the
-    "external action" being performed), and an optional message to
-    print.
-    """
-    if msg is None:
-        msg = "%s%r" % (func.__name__, args)
-        if msg[-2:] == ',)':        # correct for singleton tuple
-            msg = msg[0:-2] + ')'
-
-    log.info(msg)
-    if not dry_run:
-        func(*args)
-
-
-def strtobool (val):
-    """Convert a string representation of truth to true (1) or false (0).
-
-    True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
-    are 'n', 'no', 'f', 'false', 'off', and '0'.  Raises ValueError if
-    'val' is anything else.
-    """
-    val = val.lower()
-    if val in ('y', 'yes', 't', 'true', 'on', '1'):
-        return 1
-    elif val in ('n', 'no', 'f', 'false', 'off', '0'):
-        return 0
-    else:
-        raise ValueError("invalid truth value %r" % (val,))
-
-
-def byte_compile (py_files,
-                  optimize=0, force=0,
-                  prefix=None, base_dir=None,
-                  verbose=1, dry_run=0,
-                  direct=None):
-    """Byte-compile a collection of Python source files to .pyc
-    files in a __pycache__ subdirectory.  'py_files' is a list
-    of files to compile; any files that don't end in ".py" are silently
-    skipped.  'optimize' must be one of the following:
-      0 - don't optimize
-      1 - normal optimization (like "python -O")
-      2 - extra optimization (like "python -OO")
-    If 'force' is true, all files are recompiled regardless of
-    timestamps.
-
-    The source filename encoded in each bytecode file defaults to the
-    filenames listed in 'py_files'; you can modify these with 'prefix' and
-    'basedir'.  'prefix' is a string that will be stripped off of each
-    source filename, and 'base_dir' is a directory name that will be
-    prepended (after 'prefix' is stripped).  You can supply either or both
-    (or neither) of 'prefix' and 'base_dir', as you wish.
-
-    If 'dry_run' is true, doesn't actually do anything that would
-    affect the filesystem.
-
-    Byte-compilation is either done directly in this interpreter process
-    with the standard py_compile module, or indirectly by writing a
-    temporary script and executing it.  Normally, you should let
-    'byte_compile()' figure out to use direct compilation or not (see
-    the source for details).  The 'direct' flag is used by the script
-    generated in indirect mode; unless you know what you're doing, leave
-    it set to None.
-    """
-
-    # Late import to fix a bootstrap issue: _posixsubprocess is built by
-    # setup.py, but setup.py uses distutils.
-    import subprocess
-
-    # nothing is done if sys.dont_write_bytecode is True
-    if sys.dont_write_bytecode:
-        raise DistutilsByteCompileError('byte-compiling is disabled.')
-
-    # First, if the caller didn't force us into direct or indirect mode,
-    # figure out which mode we should be in.  We take a conservative
-    # approach: choose direct mode *only* if the current interpreter is
-    # in debug mode and optimize is 0.  If we're not in debug mode (-O
-    # or -OO), we don't know which level of optimization this
-    # interpreter is running with, so we can't do direct
-    # byte-compilation and be certain that it's the right thing.  Thus,
-    # always compile indirectly if the current interpreter is in either
-    # optimize mode, or if either optimization level was requested by
-    # the caller.
-    if direct is None:
-        direct = (__debug__ and optimize == 0)
-
-    # "Indirect" byte-compilation: write a temporary script and then
-    # run it with the appropriate flags.
-    if not direct:
-        try:
-            from tempfile import mkstemp
-            (script_fd, script_name) = mkstemp(".py")
-        except ImportError:
-            from tempfile import mktemp
-            (script_fd, script_name) = None, mktemp(".py")
-        log.info("writing byte-compilation script '%s'", script_name)
-        if not dry_run:
-            if script_fd is not None:
-                script = os.fdopen(script_fd, "w")
-            else:
-                script = open(script_name, "w")
-
-            with script:
-                script.write("""\
-from distutils.util import byte_compile
-files = [
-""")
-
-                # XXX would be nice to write absolute filenames, just for
-                # safety's sake (script should be more robust in the face of
-                # chdir'ing before running it).  But this requires abspath'ing
-                # 'prefix' as well, and that breaks the hack in build_lib's
-                # 'byte_compile()' method that carefully tacks on a trailing
-                # slash (os.sep really) to make sure the prefix here is "just
-                # right".  This whole prefix business is rather delicate -- the
-                # problem is that it's really a directory, but I'm treating it
-                # as a dumb string, so trailing slashes and so forth matter.
-
-                #py_files = map(os.path.abspath, py_files)
-                #if prefix:
-                #    prefix = os.path.abspath(prefix)
-
-                script.write(",\n".join(map(repr, py_files)) + "]\n")
-                script.write("""
-byte_compile(files, optimize=%r, force=%r,
-             prefix=%r, base_dir=%r,
-             verbose=%r, dry_run=0,
-             direct=1)
-""" % (optimize, force, prefix, base_dir, verbose))
-
-        cmd = [sys.executable]
-        cmd.extend(_optim_args_from_interpreter_flags())
-        cmd.append(script_name)
-        spawn(cmd, dry_run=dry_run)
-        execute(os.remove, (script_name,), "removing %s" % script_name,
-                dry_run=dry_run)
-
-    # "Direct" byte-compilation: use the py_compile module to compile
-    # right here, right now.  Note that the script generated in indirect
-    # mode simply calls 'byte_compile()' in direct mode, a weird sort of
-    # cross-process recursion.  Hey, it works!
-    else:
-        from py_compile import compile
-
-        for file in py_files:
-            if file[-3:] != ".py":
-                # This lets us be lazy and not filter filenames in
-                # the "install_lib" command.
-                continue
-
-            # Terminology from the py_compile module:
-            #   cfile - byte-compiled file
-            #   dfile - purported source filename (same as 'file' by default)
-            if optimize >= 0:
-                opt = '' if optimize == 0 else optimize
-                cfile = importlib.util.cache_from_source(
-                    file, optimization=opt)
-            else:
-                cfile = importlib.util.cache_from_source(file)
-            dfile = file
-            if prefix:
-                if file[:len(prefix)] != prefix:
-                    raise ValueError("invalid prefix: filename %r doesn't start with %r"
-                           % (file, prefix))
-                dfile = dfile[len(prefix):]
-            if base_dir:
-                dfile = os.path.join(base_dir, dfile)
-
-            cfile_base = os.path.basename(cfile)
-            if direct:
-                if force or newer(file, cfile):
-                    log.info("byte-compiling %s to %s", file, cfile_base)
-                    if not dry_run:
-                        compile(file, cfile, dfile)
-                else:
-                    log.debug("skipping byte-compilation of %s to %s",
-                              file, cfile_base)
-
-# byte_compile ()
-
-def rfc822_escape (header):
-    """Return a version of the string escaped for inclusion in an
-    RFC-822 header, by ensuring there are 8 spaces space after each newline.
-    """
-    lines = header.split('\n')
-    sep = '\n' + 8 * ' '
-    return sep.join(lines)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tqdm/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tqdm/__init__.py
deleted file mode 100644
index 8081f77b8812f3b42d7949daa4195d2c35dc70ac..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tqdm/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from ._monitor import TMonitor, TqdmSynchronisationWarning
-from ._tqdm_pandas import tqdm_pandas
-from .cli import main  # TODO: remove in v5.0.0
-from .gui import tqdm as tqdm_gui  # TODO: remove in v5.0.0
-from .gui import trange as tgrange  # TODO: remove in v5.0.0
-from .std import (
-    TqdmDeprecationWarning, TqdmExperimentalWarning, TqdmKeyError, TqdmMonitorWarning,
-    TqdmTypeError, TqdmWarning, tqdm, trange)
-from .version import __version__
-
-__all__ = ['tqdm', 'tqdm_gui', 'trange', 'tgrange', 'tqdm_pandas',
-           'tqdm_notebook', 'tnrange', 'main', 'TMonitor',
-           'TqdmTypeError', 'TqdmKeyError',
-           'TqdmWarning', 'TqdmDeprecationWarning',
-           'TqdmExperimentalWarning',
-           'TqdmMonitorWarning', 'TqdmSynchronisationWarning',
-           '__version__']
-
-
-def tqdm_notebook(*args, **kwargs):  # pragma: no cover
-    """See tqdm.notebook.tqdm for full documentation"""
-    from warnings import warn
-
-    from .notebook import tqdm as _tqdm_notebook
-    warn("This function will be removed in tqdm==5.0.0\n"
-         "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`",
-         TqdmDeprecationWarning, stacklevel=2)
-    return _tqdm_notebook(*args, **kwargs)
-
-
-def tnrange(*args, **kwargs):  # pragma: no cover
-    """Shortcut for `tqdm.notebook.tqdm(range(*args), **kwargs)`."""
-    from warnings import warn
-
-    from .notebook import trange as _tnrange
-    warn("Please use `tqdm.notebook.trange` instead of `tqdm.tnrange`",
-         TqdmDeprecationWarning, stacklevel=2)
-    return _tnrange(*args, **kwargs)
diff --git a/spaces/pscpeng/ChuanhuChatGPT/Dockerfile b/spaces/pscpeng/ChuanhuChatGPT/Dockerfile
deleted file mode 100644
index 8cbd335b09b1d1975bfd83a053b5fcaf398147ea..0000000000000000000000000000000000000000
--- a/spaces/pscpeng/ChuanhuChatGPT/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM python:3.9 as builder
-RUN apt-get update && apt-get install -y build-essential
-COPY requirements.txt .
-RUN pip install --user -r requirements.txt
-
-FROM python:3.9
-MAINTAINER iskoldt
-COPY --from=builder /root/.local /root/.local
-ENV PATH=/root/.local/bin:$PATH
-COPY . /app
-WORKDIR /app
-ENV my_api_key empty
-ENV dockerrun yes
-CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"]
diff --git a/spaces/pyodide-demo/self-hosted/decorator.js b/spaces/pyodide-demo/self-hosted/decorator.js
deleted file mode 100644
index 04054baaebefe52ac62db68107a3b03fd90de87f..0000000000000000000000000000000000000000
--- a/spaces/pyodide-demo/self-hosted/decorator.js
+++ /dev/null
@@ -1 +0,0 @@
-var Module=typeof globalThis.__pyodide_module!=="undefined"?globalThis.__pyodide_module:{};if(!Module.expectedDataFileDownloads){Module.expectedDataFileDownloads=0}Module.expectedDataFileDownloads++;(function(){var loadPackage=function(metadata){var PACKAGE_PATH="";if(typeof window==="object"){PACKAGE_PATH=window["encodeURIComponent"](window.location.pathname.toString().substring(0,window.location.pathname.toString().lastIndexOf("/"))+"/")}else if(typeof process==="undefined"&&typeof location!=="undefined"){PACKAGE_PATH=encodeURIComponent(location.pathname.toString().substring(0,location.pathname.toString().lastIndexOf("/"))+"/")}var PACKAGE_NAME="decorator.data";var REMOTE_PACKAGE_BASE="decorator.data";if(typeof Module["locateFilePackage"]==="function"&&!Module["locateFile"]){Module["locateFile"]=Module["locateFilePackage"];err("warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)")}var REMOTE_PACKAGE_NAME=Module["locateFile"]?Module["locateFile"](REMOTE_PACKAGE_BASE,""):REMOTE_PACKAGE_BASE;var REMOTE_PACKAGE_SIZE=metadata["remote_package_size"];var PACKAGE_UUID=metadata["package_uuid"];function fetchRemotePackage(packageName,packageSize,callback,errback){if(typeof process==="object"){require("fs").readFile(packageName,(function(err,contents){if(err){errback(err)}else{callback(contents.buffer)}}));return}var xhr=new XMLHttpRequest;xhr.open("GET",packageName,true);xhr.responseType="arraybuffer";xhr.onprogress=function(event){var url=packageName;var size=packageSize;if(event.total)size=event.total;if(event.loaded){if(!xhr.addedTotal){xhr.addedTotal=true;if(!Module.dataFileDownloads)Module.dataFileDownloads={};Module.dataFileDownloads[url]={loaded:event.loaded,total:size}}else{Module.dataFileDownloads[url].loaded=event.loaded}var total=0;var loaded=0;var num=0;for(var download in Module.dataFileDownloads){var data=Module.dataFileDownloads[download];total+=data.total;loaded+=data.loaded;num++}total=Math.ceil(total*Module.expectedDataFileDownloads/num);if(Module["setStatus"])Module["setStatus"]("Downloading data... ("+loaded+"/"+total+")")}else if(!Module.dataFileDownloads){if(Module["setStatus"])Module["setStatus"]("Downloading data...")}};xhr.onerror=function(event){throw new Error("NetworkError for: "+packageName)};xhr.onload=function(event){if(xhr.status==200||xhr.status==304||xhr.status==206||xhr.status==0&&xhr.response){var packageData=xhr.response;callback(packageData)}else{throw new Error(xhr.statusText+" : "+xhr.responseURL)}};xhr.send(null)}function handleError(error){console.error("package error:",error)}var fetchedCallback=null;var fetched=Module["getPreloadedPackage"]?Module["getPreloadedPackage"](REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE):null;if(!fetched)fetchRemotePackage(REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE,(function(data){if(fetchedCallback){fetchedCallback(data);fetchedCallback=null}else{fetched=data}}),handleError);function runWithFS(){function assert(check,msg){if(!check)throw msg+(new Error).stack}Module["FS_createPath"]("/","lib",true,true);Module["FS_createPath"]("/lib","python3.9",true,true);Module["FS_createPath"]("/lib/python3.9","site-packages",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","decorator-5.1.1-py3.9.egg-info",true,true);function processPackageData(arrayBuffer){assert(arrayBuffer,"Loading data file failed.");assert(arrayBuffer instanceof ArrayBuffer,"bad input to processPackageData");var byteArray=new Uint8Array(arrayBuffer);var curr;var compressedData={data:null,cachedOffset:12749,cachedIndexes:[-1,-1],cachedChunks:[null,null],offsets:[0,1570,2629,3805,5144,6183,7409,8656,9698,10998,12265],sizes:[1570,1059,1176,1339,1039,1226,1247,1042,1300,1267,484],successes:[1,1,1,1,1,1,1,1,1,1,1]};compressedData["data"]=byteArray;assert(typeof Module.LZ4==="object","LZ4 not present - was your app build with  -s LZ4=1  ?");Module.LZ4.loadPackage({metadata:metadata,compressedData:compressedData},true);Module["removeRunDependency"]("datafile_decorator.data")}Module["addRunDependency"]("datafile_decorator.data");if(!Module.preloadResults)Module.preloadResults={};Module.preloadResults[PACKAGE_NAME]={fromCache:false};if(fetched){processPackageData(fetched);fetched=null}else{fetchedCallback=processPackageData}}if(Module["calledRun"]){runWithFS()}else{if(!Module["preRun"])Module["preRun"]=[];Module["preRun"].push(runWithFS)}};loadPackage({files:[{filename:"/lib/python3.9/site-packages/decorator.py",start:0,end:16752,audio:0},{filename:"/lib/python3.9/site-packages/decorator-5.1.1-py3.9.egg-info/PKG-INFO",start:16752,end:20733,audio:0},{filename:"/lib/python3.9/site-packages/decorator-5.1.1-py3.9.egg-info/SOURCES.txt",start:20733,end:21112,audio:0},{filename:"/lib/python3.9/site-packages/decorator-5.1.1-py3.9.egg-info/dependency_links.txt",start:21112,end:21113,audio:0},{filename:"/lib/python3.9/site-packages/decorator-5.1.1-py3.9.egg-info/not-zip-safe",start:21113,end:21114,audio:0},{filename:"/lib/python3.9/site-packages/decorator-5.1.1-py3.9.egg-info/pbr.json",start:21114,end:21161,audio:0},{filename:"/lib/python3.9/site-packages/decorator-5.1.1-py3.9.egg-info/top_level.txt",start:21161,end:21171,audio:0}],remote_package_size:16845,package_uuid:"2d7adc49-e27a-4a37-9c55-95b577141b9f"})})();
\ No newline at end of file
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/En Office Professional Plus 2013 X64 Dvd 1123674.isol.md b/spaces/quidiaMuxgu/Expedit-SAM/En Office Professional Plus 2013 X64 Dvd 1123674.isol.md
deleted file mode 100644
index 2a8ee1c3fc262a044afa387fa4f0147dbb1f91fc..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/En Office Professional Plus 2013 X64 Dvd 1123674.isol.md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>En Office Professional Plus 2013 X64 Dvd 1123674.isol</h2><br /><p><b><b>Download File</b> &#9733;&#9733;&#9733;&#9733;&#9733; <a href="https://geags.com/2uCsQL">https://geags.com/2uCsQL</a></b></p><br /><br />
-
-Original Microsoft licenses 32-64 bit on Offer - Best Price - Buy Online Product Key. ... office 2013 professional plus microsoft license buy online product key ... Plus 32/64-bit - (Product Key); - Download link for Office 2013 32/64-bit (ISO File) ... 4d29de3e1b<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Euro Truck Simulator 2 - Space Paint Jobs Pack _BEST_ Download Now.md b/spaces/quidiaMuxgu/Expedit-SAM/Euro Truck Simulator 2 - Space Paint Jobs Pack _BEST_ Download Now.md
deleted file mode 100644
index fd20e39fa5d41448cc58e51672ec8a00a1be59ee..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Euro Truck Simulator 2 - Space Paint Jobs Pack _BEST_ Download Now.md	
+++ /dev/null
@@ -1,42 +0,0 @@
-<h2>Euro Truck Simulator 2 - Space Paint Jobs Pack Download Now</h2><br /><p><b><b>Download Zip</b> &#9733; <a href="https://geags.com/2uCq3S">https://geags.com/2uCq3S</a></b></p><br /><br />
-<br />
-Deep Space is a space exploration themed off road game where you work as part of an intergalactic mapping team. You and your teammates are sent on deep space missions in an attempt to beat the leader board and win top prizes.
-
-Deep Space is a single player and multi-player racing game where you control a vehicle capable of traveling great distances and get from point A to point B. Drive vehicles like hovercrafts, asteroids and fuel ships to achieve the greatest distance and/or speed!
-
-Deep Space is a racing game where you compete against other players to see who can travel the greatest distance. Your vehicle can be enhanced with custom parts to improve your speed, traction and agility. You will also be challenged with collecting fuel to keep yourself alive and making it to the finish line.
-
-Deep Space is a racing game where you compete against other players to see who can travel the greatest distance. Your vehicle can be enhanced with custom parts to improve your speed, traction and agility. You will also be challenged with collecting fuel to keep yourself alive and making it to the finish line.Q:
-
-How to view/test table inside assembly stored in server
-
-I have a.dll assembly that I am deploying to a remote server in the same network. 
-
-I can see a table in the database and access the table directly. However, when I try to access the table from the.dll, it does not exist. 
-
-If I copy the.dll to my local machine, and open the table in SSMS directly, it works. But I don't know how to do it from the.dll stored on the remote server. 
-
-How can I get the table into SSMS or see the table inside the.dll assembly from the remote server?
-
-A:
-
-If the tables are created as part of a database schema then you can't access them from the.dll. You need to use Object Explorer to see the tables in a database.
-
-This link may be useful
-
-Visual Studio.Net Connection to another database
-
-If you are using SQL Server 2008 then this might be useful
-
-Connecting to a SQL Server Database from a Local Database
-
-If you are using SQL Server 2000 then you may need to use the OLE DB provider for SQL Server to get around this.
-
-Q:
-
-HTML5 video failed to play on IE 10
-
-I'm using IE10 on a Windows 8 (8.1, actually) machine and have been encountering the following 4fefd39f24<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Graphitech Cimagraphi V8 13 Multilingual Lz0.md b/spaces/quidiaMuxgu/Expedit-SAM/Graphitech Cimagraphi V8 13 Multilingual Lz0.md
deleted file mode 100644
index 1ff6e48951b5ae8b110193c798ecc8ed4cd62f92..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Graphitech Cimagraphi V8 13 Multilingual Lz0.md	
+++ /dev/null
@@ -1,14 +0,0 @@
-<h2>Graphitech Cimagraphi V8 13 Multilingual Lz0</h2><br /><p><b><b>Download File</b> &#128279; <a href="https://geags.com/2uCs2b">https://geags.com/2uCs2b</a></b></p><br /><br />
-<br />
-2259 entries - Pronest 13 serial numbers, cracks and keygen are available here. ... Graphitech Cimagraphi v8.13 MULTILINGUAL keygen by Lz0. Kaspersky Internet Security 2014 14.0.0.651.
-Nod32 ESET Smart Security Premium.
-Read more2259 entries - Pronest 13 serial numbers, cracks and keygen are available here.
-You need to enter Codes to activate it. ...
-Graphitech Cimagraphi v8.13 MULTILINGUAL keygen by Lz0.
-Kaspersky Internet Security 2014 14.0.0.651.
-NOD32 Internet Security.
-Hide
-P 8a78ff9644<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/r3gm/Ultimate-Vocal-Remover-WebUI/demucs/demucs.py b/spaces/r3gm/Ultimate-Vocal-Remover-WebUI/demucs/demucs.py
deleted file mode 100644
index d2c08e73d65de3031a1e1be545b68afd5554f7a5..0000000000000000000000000000000000000000
--- a/spaces/r3gm/Ultimate-Vocal-Remover-WebUI/demucs/demucs.py
+++ /dev/null
@@ -1,459 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-import typing as tp
-
-import julius
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from .states import capture_init
-from .utils import center_trim, unfold
-
-
-class BLSTM(nn.Module):
-    """
-    BiLSTM with same hidden units as input dim.
-    If `max_steps` is not None, input will be splitting in overlapping
-    chunks and the LSTM applied separately on each chunk.
-    """
-    def __init__(self, dim, layers=1, max_steps=None, skip=False):
-        super().__init__()
-        assert max_steps is None or max_steps % 4 == 0
-        self.max_steps = max_steps
-        self.lstm = nn.LSTM(bidirectional=True, num_layers=layers, hidden_size=dim, input_size=dim)
-        self.linear = nn.Linear(2 * dim, dim)
-        self.skip = skip
-
-    def forward(self, x):
-        B, C, T = x.shape
-        y = x
-        framed = False
-        if self.max_steps is not None and T > self.max_steps:
-            width = self.max_steps
-            stride = width // 2
-            frames = unfold(x, width, stride)
-            nframes = frames.shape[2]
-            framed = True
-            x = frames.permute(0, 2, 1, 3).reshape(-1, C, width)
-
-        x = x.permute(2, 0, 1)
-
-        x = self.lstm(x)[0]
-        x = self.linear(x)
-        x = x.permute(1, 2, 0)
-        if framed:
-            out = []
-            frames = x.reshape(B, -1, C, width)
-            limit = stride // 2
-            for k in range(nframes):
-                if k == 0:
-                    out.append(frames[:, k, :, :-limit])
-                elif k == nframes - 1:
-                    out.append(frames[:, k, :, limit:])
-                else:
-                    out.append(frames[:, k, :, limit:-limit])
-            out = torch.cat(out, -1)
-            out = out[..., :T]
-            x = out
-        if self.skip:
-            x = x + y
-        return x
-
-
-def rescale_conv(conv, reference):
-    """Rescale initial weight scale. It is unclear why it helps but it certainly does.
-    """
-    std = conv.weight.std().detach()
-    scale = (std / reference)**0.5
-    conv.weight.data /= scale
-    if conv.bias is not None:
-        conv.bias.data /= scale
-
-
-def rescale_module(module, reference):
-    for sub in module.modules():
-        if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)):
-            rescale_conv(sub, reference)
-
-
-class LayerScale(nn.Module):
-    """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf).
-    This rescales diagonaly residual outputs close to 0 initially, then learnt.
-    """
-    def __init__(self, channels: int, init: float = 0):
-        super().__init__()
-        self.scale = nn.Parameter(torch.zeros(channels, requires_grad=True))
-        self.scale.data[:] = init
-
-    def forward(self, x):
-        return self.scale[:, None] * x
-
-
-class DConv(nn.Module):
-    """
-    New residual branches in each encoder layer.
-    This alternates dilated convolutions, potentially with LSTMs and attention.
-    Also before entering each residual branch, dimension is projected on a smaller subspace,
-    e.g. of dim `channels // compress`.
-    """
-    def __init__(self, channels: int, compress: float = 4, depth: int = 2, init: float = 1e-4,
-                 norm=True, attn=False, heads=4, ndecay=4, lstm=False, gelu=True,
-                 kernel=3, dilate=True):
-        """
-        Args:
-            channels: input/output channels for residual branch.
-            compress: amount of channel compression inside the branch.
-            depth: number of layers in the residual branch. Each layer has its own
-                projection, and potentially LSTM and attention.
-            init: initial scale for LayerNorm.
-            norm: use GroupNorm.
-            attn: use LocalAttention.
-            heads: number of heads for the LocalAttention.
-            ndecay: number of decay controls in the LocalAttention.
-            lstm: use LSTM.
-            gelu: Use GELU activation.
-            kernel: kernel size for the (dilated) convolutions.
-            dilate: if true, use dilation, increasing with the depth.
-        """
-
-        super().__init__()
-        assert kernel % 2 == 1
-        self.channels = channels
-        self.compress = compress
-        self.depth = abs(depth)
-        dilate = depth > 0
-
-        norm_fn: tp.Callable[[int], nn.Module]
-        norm_fn = lambda d: nn.Identity()  # noqa
-        if norm:
-            norm_fn = lambda d: nn.GroupNorm(1, d)  # noqa
-
-        hidden = int(channels / compress)
-
-        act: tp.Type[nn.Module]
-        if gelu:
-            act = nn.GELU
-        else:
-            act = nn.ReLU
-
-        self.layers = nn.ModuleList([])
-        for d in range(self.depth):
-            dilation = 2 ** d if dilate else 1
-            padding = dilation * (kernel // 2)
-            mods = [
-                nn.Conv1d(channels, hidden, kernel, dilation=dilation, padding=padding),
-                norm_fn(hidden), act(),
-                nn.Conv1d(hidden, 2 * channels, 1),
-                norm_fn(2 * channels), nn.GLU(1),
-                LayerScale(channels, init),
-            ]
-            if attn:
-                mods.insert(3, LocalState(hidden, heads=heads, ndecay=ndecay))
-            if lstm:
-                mods.insert(3, BLSTM(hidden, layers=2, max_steps=200, skip=True))
-            layer = nn.Sequential(*mods)
-            self.layers.append(layer)
-
-    def forward(self, x):
-        for layer in self.layers:
-            x = x + layer(x)
-        return x
-
-
-class LocalState(nn.Module):
-    """Local state allows to have attention based only on data (no positional embedding),
-    but while setting a constraint on the time window (e.g. decaying penalty term).
-
-    Also a failed experiments with trying to provide some frequency based attention.
-    """
-    def __init__(self, channels: int, heads: int = 4, nfreqs: int = 0, ndecay: int = 4):
-        super().__init__()
-        assert channels % heads == 0, (channels, heads)
-        self.heads = heads
-        self.nfreqs = nfreqs
-        self.ndecay = ndecay
-        self.content = nn.Conv1d(channels, channels, 1)
-        self.query = nn.Conv1d(channels, channels, 1)
-        self.key = nn.Conv1d(channels, channels, 1)
-        if nfreqs:
-            self.query_freqs = nn.Conv1d(channels, heads * nfreqs, 1)
-        if ndecay:
-            self.query_decay = nn.Conv1d(channels, heads * ndecay, 1)
-            # Initialize decay close to zero (there is a sigmoid), for maximum initial window.
-            self.query_decay.weight.data *= 0.01
-            assert self.query_decay.bias is not None  # stupid type checker
-            self.query_decay.bias.data[:] = -2
-        self.proj = nn.Conv1d(channels + heads * nfreqs, channels, 1)
-
-    def forward(self, x):
-        B, C, T = x.shape
-        heads = self.heads
-        indexes = torch.arange(T, device=x.device, dtype=x.dtype)
-        # left index are keys, right index are queries
-        delta = indexes[:, None] - indexes[None, :]
-
-        queries = self.query(x).view(B, heads, -1, T)
-        keys = self.key(x).view(B, heads, -1, T)
-        # t are keys, s are queries
-        dots = torch.einsum("bhct,bhcs->bhts", keys, queries)
-        dots /= keys.shape[2]**0.5
-        if self.nfreqs:
-            periods = torch.arange(1, self.nfreqs + 1, device=x.device, dtype=x.dtype)
-            freq_kernel = torch.cos(2 * math.pi * delta / periods.view(-1, 1, 1))
-            freq_q = self.query_freqs(x).view(B, heads, -1, T) / self.nfreqs ** 0.5
-            dots += torch.einsum("fts,bhfs->bhts", freq_kernel, freq_q)
-        if self.ndecay:
-            decays = torch.arange(1, self.ndecay + 1, device=x.device, dtype=x.dtype)
-            decay_q = self.query_decay(x).view(B, heads, -1, T)
-            decay_q = torch.sigmoid(decay_q) / 2
-            decay_kernel = - decays.view(-1, 1, 1) * delta.abs() / self.ndecay**0.5
-            dots += torch.einsum("fts,bhfs->bhts", decay_kernel, decay_q)
-
-        # Kill self reference.
-        dots.masked_fill_(torch.eye(T, device=dots.device, dtype=torch.bool), -100)
-        weights = torch.softmax(dots, dim=2)
-
-        content = self.content(x).view(B, heads, -1, T)
-        result = torch.einsum("bhts,bhct->bhcs", weights, content)
-        if self.nfreqs:
-            time_sig = torch.einsum("bhts,fts->bhfs", weights, freq_kernel)
-            result = torch.cat([result, time_sig], 2)
-        result = result.reshape(B, -1, T)
-        return x + self.proj(result)
-
-
-class Demucs(nn.Module):
-    @capture_init
-    def __init__(self,
-                 sources,
-                 # Channels
-                 audio_channels=2,
-                 channels=64,
-                 growth=2.,
-                 # Main structure
-                 depth=6,
-                 rewrite=True,
-                 lstm_layers=0,
-                 # Convolutions
-                 kernel_size=8,
-                 stride=4,
-                 context=1,
-                 # Activations
-                 gelu=True,
-                 glu=True,
-                 # Normalization
-                 norm_starts=4,
-                 norm_groups=4,
-                 # DConv residual branch
-                 dconv_mode=1,
-                 dconv_depth=2,
-                 dconv_comp=4,
-                 dconv_attn=4,
-                 dconv_lstm=4,
-                 dconv_init=1e-4,
-                 # Pre/post processing
-                 normalize=True,
-                 resample=True,
-                 # Weight init
-                 rescale=0.1,
-                 # Metadata
-                 samplerate=44100,
-                 segment=4 * 10):
-        """
-        Args:
-            sources (list[str]): list of source names
-            audio_channels (int): stereo or mono
-            channels (int): first convolution channels
-            depth (int): number of encoder/decoder layers
-            growth (float): multiply (resp divide) number of channels by that
-                for each layer of the encoder (resp decoder)
-            depth (int): number of layers in the encoder and in the decoder.
-            rewrite (bool): add 1x1 convolution to each layer.
-            lstm_layers (int): number of lstm layers, 0 = no lstm. Deactivated
-                by default, as this is now replaced by the smaller and faster small LSTMs
-                in the DConv branches.
-            kernel_size (int): kernel size for convolutions
-            stride (int): stride for convolutions
-            context (int): kernel size of the convolution in the
-                decoder before the transposed convolution. If > 1,
-                will provide some context from neighboring time steps.
-            gelu: use GELU activation function.
-            glu (bool): use glu instead of ReLU for the 1x1 rewrite conv.
-            norm_starts: layer at which group norm starts being used.
-                decoder layers are numbered in reverse order.
-            norm_groups: number of groups for group norm.
-            dconv_mode: if 1: dconv in encoder only, 2: decoder only, 3: both.
-            dconv_depth: depth of residual DConv branch.
-            dconv_comp: compression of DConv branch.
-            dconv_attn: adds attention layers in DConv branch starting at this layer.
-            dconv_lstm: adds a LSTM layer in DConv branch starting at this layer.
-            dconv_init: initial scale for the DConv branch LayerScale.
-            normalize (bool): normalizes the input audio on the fly, and scales back
-                the output by the same amount.
-            resample (bool): upsample x2 the input and downsample /2 the output.
-            rescale (int): rescale initial weights of convolutions
-                to get their standard deviation closer to `rescale`.
-            samplerate (int): stored as meta information for easing
-                future evaluations of the model.
-            segment (float): duration of the chunks of audio to ideally evaluate the model on.
-                This is used by `demucs.apply.apply_model`.
-        """
-
-        super().__init__()
-        self.audio_channels = audio_channels
-        self.sources = sources
-        self.kernel_size = kernel_size
-        self.context = context
-        self.stride = stride
-        self.depth = depth
-        self.resample = resample
-        self.channels = channels
-        self.normalize = normalize
-        self.samplerate = samplerate
-        self.segment = segment
-        self.encoder = nn.ModuleList()
-        self.decoder = nn.ModuleList()
-        self.skip_scales = nn.ModuleList()
-
-        if glu:
-            activation = nn.GLU(dim=1)
-            ch_scale = 2
-        else:
-            activation = nn.ReLU()
-            ch_scale = 1
-        if gelu:
-            act2 = nn.GELU
-        else:
-            act2 = nn.ReLU
-
-        in_channels = audio_channels
-        padding = 0
-        for index in range(depth):
-            norm_fn = lambda d: nn.Identity()  # noqa
-            if index >= norm_starts:
-                norm_fn = lambda d: nn.GroupNorm(norm_groups, d)  # noqa
-
-            encode = []
-            encode += [
-                nn.Conv1d(in_channels, channels, kernel_size, stride),
-                norm_fn(channels),
-                act2(),
-            ]
-            attn = index >= dconv_attn
-            lstm = index >= dconv_lstm
-            if dconv_mode & 1:
-                encode += [DConv(channels, depth=dconv_depth, init=dconv_init,
-                                 compress=dconv_comp, attn=attn, lstm=lstm)]
-            if rewrite:
-                encode += [
-                    nn.Conv1d(channels, ch_scale * channels, 1),
-                    norm_fn(ch_scale * channels), activation]
-            self.encoder.append(nn.Sequential(*encode))
-
-            decode = []
-            if index > 0:
-                out_channels = in_channels
-            else:
-                out_channels = len(self.sources) * audio_channels
-            if rewrite:
-                decode += [
-                    nn.Conv1d(channels, ch_scale * channels, 2 * context + 1, padding=context),
-                    norm_fn(ch_scale * channels), activation]
-            if dconv_mode & 2:
-                decode += [DConv(channels, depth=dconv_depth, init=dconv_init,
-                                 compress=dconv_comp, attn=attn, lstm=lstm)]
-            decode += [nn.ConvTranspose1d(channels, out_channels,
-                       kernel_size, stride, padding=padding)]
-            if index > 0:
-                decode += [norm_fn(out_channels), act2()]
-            self.decoder.insert(0, nn.Sequential(*decode))
-            in_channels = channels
-            channels = int(growth * channels)
-
-        channels = in_channels
-        if lstm_layers:
-            self.lstm = BLSTM(channels, lstm_layers)
-        else:
-            self.lstm = None
-
-        if rescale:
-            rescale_module(self, reference=rescale)
-
-    def valid_length(self, length):
-        """
-        Return the nearest valid length to use with the model so that
-        there is no time steps left over in a convolution, e.g. for all
-        layers, size of the input - kernel_size % stride = 0.
-
-        Note that input are automatically padded if necessary to ensure that the output
-        has the same length as the input.
-        """
-        if self.resample:
-            length *= 2
-
-        for _ in range(self.depth):
-            length = math.ceil((length - self.kernel_size) / self.stride) + 1
-            length = max(1, length)
-
-        for idx in range(self.depth):
-            length = (length - 1) * self.stride + self.kernel_size
-
-        if self.resample:
-            length = math.ceil(length / 2)
-        return int(length)
-
-    def forward(self, mix):
-        x = mix
-        length = x.shape[-1]
-
-        if self.normalize:
-            mono = mix.mean(dim=1, keepdim=True)
-            mean = mono.mean(dim=-1, keepdim=True)
-            std = mono.std(dim=-1, keepdim=True)
-            x = (x - mean) / (1e-5 + std)
-        else:
-            mean = 0
-            std = 1
-
-        delta = self.valid_length(length) - length
-        x = F.pad(x, (delta // 2, delta - delta // 2))
-
-        if self.resample:
-            x = julius.resample_frac(x, 1, 2)
-
-        saved = []
-        for encode in self.encoder:
-            x = encode(x)
-            saved.append(x)
-
-        if self.lstm:
-            x = self.lstm(x)
-
-        for decode in self.decoder:
-            skip = saved.pop(-1)
-            skip = center_trim(skip, x)
-            x = decode(x + skip)
-
-        if self.resample:
-            x = julius.resample_frac(x, 2, 1)
-        x = x * std + mean
-        x = center_trim(x, length)
-        x = x.view(x.size(0), len(self.sources), self.audio_channels, x.size(-1))
-        return x
-
-    def load_state_dict(self, state, strict=True):
-        # fix a mismatch with previous generation Demucs models.
-        for idx in range(self.depth):
-            for a in ['encoder', 'decoder']:
-                for b in ['bias', 'weight']:
-                    new = f'{a}.{idx}.3.{b}'
-                    old = f'{a}.{idx}.2.{b}'
-                    if old in state and new not in state:
-                        state[new] = state.pop(old)
-        super().load_state_dict(state, strict=strict)
diff --git a/spaces/radames/transformers-js-sveltekit-server-example-app/tailwind.config.js b/spaces/radames/transformers-js-sveltekit-server-example-app/tailwind.config.js
deleted file mode 100644
index 51fdce502be9a8a39d6d639af0453c58be22ab27..0000000000000000000000000000000000000000
--- a/spaces/radames/transformers-js-sveltekit-server-example-app/tailwind.config.js
+++ /dev/null
@@ -1,8 +0,0 @@
-/** @type {import('tailwindcss').Config} */
-export default {
-  content: ['./src/**/*.{html,js,svelte,ts}'],
-  theme: {
-    extend: {}
-  },
-  plugins: []
-};
\ No newline at end of file
diff --git a/spaces/radames/transformers-js-sveltekit-server-example-app/vite.config.js b/spaces/radames/transformers-js-sveltekit-server-example-app/vite.config.js
deleted file mode 100644
index bbf8c7da43f0080dc6b9fb275f9583b7c17f1506..0000000000000000000000000000000000000000
--- a/spaces/radames/transformers-js-sveltekit-server-example-app/vite.config.js
+++ /dev/null
@@ -1,6 +0,0 @@
-import { sveltekit } from '@sveltejs/kit/vite';
-import { defineConfig } from 'vite';
-
-export default defineConfig({
-	plugins: [sveltekit()]
-});
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] download pc Enjoy home cinema editing burning and more with this software bundle.md b/spaces/raedeXanto/academic-chatgpt-beta/CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] download pc Enjoy home cinema editing burning and more with this software bundle.md
deleted file mode 100644
index 7d5a8e5a38cc89180a80a36edff8778b3b3fcb5b..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] download pc Enjoy home cinema editing burning and more with this software bundle.md	
+++ /dev/null
@@ -1,161 +0,0 @@
-
-<h1>CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ] Download PC</h1>
-<p>If you are looking for a comprehensive multimedia suite that can handle all your media needs, you might want to check out CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ]. This software bundle includes 15 of CyberLink's top-notch products that can help you create, play and organize your media files with ease and efficiency.</p>
-<h2>CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] download pc</h2><br /><p><b><b>Download Zip</b> &#10031;&#10031;&#10031; <a href="https://tinourl.com/2uL0RF">https://tinourl.com/2uL0RF</a></b></p><br /><br />
-<p>In this article, we will show you what CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ] is, what are its features and benefits, how to download and install it on your PC, how to use it for various media tasks, and how it compares with other multimedia suites in the market.</p>
- <h2>What is CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ]?</h2>
-<p>CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ] is a collection of software programs that can help you with various aspects of multimedia creation, playback, editing, burning, ripping, converting and online publishing.</p>
-<p>The software programs included in CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ] are:</p>
-<ul>
-<li>PowerDVD 17 - Basic video playback software with connected home theater.</li>
-<li>PowerDirector 15 - Unique combination of advanced video editing features.</li>
-<li>PhotoDirector 8 - Complete graphics editing tools.</li>
-<li>Power2GO 10 - Burn data, ripping and converting music to CDs, DVDs and Blu-ray discs.</li>
-<li>MediaEspresso 7 .5 - Convert media files, with support for 4K Ultra HD.</li>
-<li>YouCam 7 - A set of effects to overlay video conversations with your friends.</li>
-<li>Screen Recorder - Capture an image from your desktop.</li>
-<li>MediaShow 6 - Easily organize your collection of multimedia content.</li>
-<li>PowerProducer 6 - This is a tool for authoring CDs, DVDs and Blu-Ray.</li>
-<li>Wave Editor 2 - Editing soundtracks.</li>
-<li>LabelPrint 2 .5 - Design and print stylish CD / DVD labels in 4 easy steps.</li>
-<li>PowerBackup 2 .6 - Effortlessly back up up to 50GB of data on Blu-ray media.</li>
-<li>PowerDVD Copy 1 .5 - Copy DVDs in high quality.</li>
-<li>Instant Burn 5 - Burning Blu-ray Discs with packet writing technology.</li>
-</ul>
- <h2>What are the features and benefits of CyberLink Media Suite Ultimate 15 .0 (continued) </h2>
- <p>CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ] offers a wide range of features and benefits that can enhance your multimedia experience . Some of them are :</p>
- <ul>
-<li>It delivers the latest in digital multimedia , including Blu-ray , 3D video , 4K Ultra HD and new H .265 ( HEVC ) format playback , and ultra-fast file conversions .</li>
-<li>It allows you to create professional-looking videos , photos , slideshows , music , discs , labels , menus and more with easy-to-use tools and templates .</li>
-<li>It lets you organize , manage and share your media files across devices , cloud services and social networks with one-click upload options .</li>
-<li>It provides you with a user-friendly interface that lets you quickly access the desired category of media tasks , such as Video , Photo , Data , Music and Backup etc.</li>
-<li>It gives you the option to playback 2D photos in 3D and import your photos from various sources , such as cameras , scanners , mobile devices or online albums .</li>
-<li>It lets you edit your audio files , rip MP3 and WAV files from CDs , DVDs or Blu-ray discs , and create mobile ringtones from your favorite songs or sound clips .</li>
-<li>It offers you a set of effects to overlay video conversations with your friends , such as avatars , filters , frames , emoticons , gadgets and more .</li>
-<li>It enables you to capture an image from your desktop , record your screen activity or webcam video , or stream live gameplay or webinars with high-quality audio and video output .</li>
-<li>It helps you design and print stylish CD / DVD labels in four easy steps , using predefined templates or your own images and text .</li>
-<li>It allows you to effortlessly back up up to 50GB of data on Blu-ray media , or copy DVDs in high quality with smart fit technology that automatically adjusts the video quality according to the disc space available .</li>
-<li>It supports a wide range of formats for media conversion , including MP4 , MKV , AVI , WMV , MOV , FLV , MTS , M2TS , MPEG-2 HD Video (*.mpg;*.mpeg) etc., with support for Intel Quick Sync Video technology that accelerates conversion speed by up to six times faster than before .</li>
-</ul>
- <h2>How to download CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ] for PC?</h2>
- <p>If you want to download CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ] for PC, you can follow these simple steps :</p>
-<p>CyberLink Media Suite 15 Ultimate free download offline installer<br />
-CyberLink Media Suite Ultimate 15.0.0512.0 pre-activated full version<br />
-CyberLink Media Suite 15 Ultimate features and comparison<br />
-CyberLink Media Suite Essentials free for Dell computers<br />
-CyberLink Media Suite 15 Ultimate home cinema technology<br />
-CyberLink Media Suite 15 Ultimate video editing with PowerDirector<br />
-CyberLink Media Suite 15 Ultimate graphics editing with PhotoDirector<br />
-CyberLink Media Suite 15 Ultimate data burning with Power2GO<br />
-CyberLink Media Suite 15 Ultimate media conversion with MediaEspresso<br />
-CyberLink Media Suite 15 Ultimate video playback with PowerDVD<br />
-CyberLink Media Suite 15 Ultimate video effects with YouCam<br />
-CyberLink Media Suite 15 Ultimate screen capture with Screen Recorder<br />
-CyberLink Media Suite 15 Ultimate media organization with MediaShow<br />
-CyberLink Media Suite 15 Ultimate CD/DVD authoring with PowerProducer<br />
-CyberLink Media Suite 15 Ultimate audio editing with Wave Editor<br />
-CyberLink Media Suite 15 Ultimate CD/DVD label design with LabelPrint<br />
-CyberLink Media Suite 15 Ultimate data backup with PowerBackup<br />
-CyberLink Media Suite 15 Ultimate DVD copying with PowerDVD Copy<br />
-CyberLink Media Suite 15 Ultimate Blu-ray burning with Instant Burn<br />
-CyberLink Media Suite 15 Ultimate system requirements and compatibility<br />
-How to download and install CyberLink Media Suite 15 Ultimate on Windows PC<br />
-How to update CyberLink Media Suite 15 Ultimate to the latest version<br />
-How to activate CyberLink Media Suite 15 Ultimate with serial key or crack<br />
-How to uninstall or remove CyberLink Media Suite 15 Ultimate from PC<br />
-How to use CyberLink Media Suite 15 Ultimate for various multimedia tasks<br />
-CyberLink Media Suite 15 Ultimate review and rating by users and experts<br />
-CyberLink Media Suite 15 Ultimate tutorial and guide for beginners and advanced users<br />
-CyberLink Media Suite 15 Ultimate troubleshooting and technical support<br />
-CyberLink Media Suite 15 Ultimate discount and coupon code for online purchase<br />
-CyberLink Media Suite 15 Ultimate alternative and competitor software comparison<br />
-How to get CyberLink Media Suite Essentials for free on Dell PC with DVD or Blu-ray player<br />
-How to upgrade from CyberLink Media Suite Essentials to CyberLink Media Suite 15 Ultra or Ultimate<br />
-How to download and install CyberLink Media Suite Essentials on Dell PC with DVD or Blu-ray player<br />
-How to update CyberLink Media Suite Essentials to the latest version on Dell PC with DVD or Blu-ray player<br />
-How to activate CyberLink Media Suite Essentials on Dell PC with DVD or Blu-ray player<br />
-How to uninstall or remove CyberLink Media Suite Essentials from Dell PC with DVD or Blu-ray player<br />
-How to use CyberLink Media Suite Essentials for basic multimedia tasks on Dell PC with DVD or Blu-ray player<br />
-CyberLink Media Suite Essentials review and rating by Dell users and experts<br />
-CyberLink Media Suite Essentials tutorial and guide for Dell users and beginners<br />
-CyberLink Media Suite Essentials troubleshooting and technical support for Dell users</p>
- <ol>
-<li>Go to this link : <a href="http://www.mediafire.com/file/i3rbpd9k6bdsok9/">http://www.mediafire.com/file/i3rbpd9k6bdsok9/</a></li>
-<li>Select the app you want to install from the list (you can choose one or more apps) and click on Download button.</li>
-<li>Wait for the download to complete (the file size is about 2 .77 GB).</li>
-<li>Extract the downloaded file using WinRAR or any other file compression tool.</li>
-<li>Run the setup file (CyberLink.Media.Suite.Ultimate.v15.exe) as administrator and follow the instructions on screen.</li>
-</ol>
- <p>Note : CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ] is pre-activated, so you don't need a license key or activation code to use it.</p>
- <h2>How to use CyberLink Media Suite Ultimate 15 .0 (continued) </h2>
- <p>CyberLink Media Suite Ultimate 15 .0 .0512 .0 (continued) <h2>How to use CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] for PC?</h2>
- <p>After you have installed CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] on your PC, you can use it for various media tasks by following these steps:</p>
- <ol>
-<li>Launch CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] from your desktop shortcut or Start menu.</li>
-<li>You will see a user-friendly interface that lets you quickly access the desired category of media tasks, such as Video, Photo, Data, Music and Backup etc.</li>
-<li>Click on the category that you want to work with and you will see a list of software programs that are included in CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] for that category.</li>
-<li>Select the software program that you want to use and it will open in a separate window.</li>
-<li>You can then use the software program to create, play and organize your media files with ease and efficiency.</li>
-</ol>
- <p>For example, if you want to edit a video with PowerDirector 15, you can do the following:</p>
- <ol>
-<li>Launch CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] and click on Video.</li>
-<li>Select PowerDirector 15 from the list of software programs.</li>
-<li>PowerDirector 15 will open in a separate window and you will see a timeline-based interface that lets you edit your video with advanced features.</li>
-<li>You can import your video clips from various sources, such as your PC, camera, mobile device or online album.</li>
-<li>You can then trim, crop, rotate, split, merge, enhance, stabilize, add transitions, effects, titles, voiceovers, music and more to your video clips.</li>
-<li>You can also use the Magic Movie Wizard to automatically create a video with predefined themes and styles.</li>
-<li>When you are done editing your video, you can export it to various formats, such as MP4, MKV, AVI, WMV, MOV etc., or upload it directly to YouTube, Facebook or Vimeo.</li>
-</ol>
- <p>You can use the same steps to use other software programs in CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] for different media tasks.</p>
- <h2>Comparison of CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] with other multimedia suites</h2>
- <p>CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] is one of the best multimedia suites in the market that offers a complete solution for all your media needs. However, there are also other multimedia suites that you might want to consider before making your final decision.</p>
- <p>Here is a table that compares CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] with some of its competitors:</p>
- <table>
-<tr><th>Multimedia suite</th><th>Features</th><th>Benefits</th><th>Drawbacks</th></tr>
-<tr><td>CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ]</td><td>- 15 software programs for various media tasks<br>- Latest in digital multimedia , including Blu-ray , 3D video , 4K Ultra HD and new H .265 ( HEVC ) format playback , and ultra-fast file conversions<br>- User-friendly interface that lets you quickly access the desired category of media tasks<br>- Pre-activated , no license key or activation code required</td><td>- Comprehensive multimedia suite that can handle all your media needs<br>- Professional-looking videos , photos , slideshows , music , discs , labels , menus and more with easy-to-use tools and templates<br>- Organize , manage and share your media files across devices , cloud services and social networks with one-click upload options<br>- High-quality playback and editing of 4K Ultra HD and 3D video<br>- Fast and efficient media conversion with Intel Quick Sync Video technology</td><td>- Large file size ( about 2 .77 GB )<br>- Only English version available<br>- No official retail links provided</td></tr>
-<tr><td>Nero Platinum Suite 2021</td><td>- 7 software programs for various media tasks<br>- Burn , copy , rip and create CDs , DVDs and Blu-ray discs<br>- Edit videos with express or advanced mode<br>- Enhance photos with AI tools<br>- Back up data with OneDrive integration<br>- Stream videos , photos and music to TV or mobile devices<br>- Convert media files with support for HEVC ( H .265 ) format</td><td>- Reliable multimedia suite that can handle most of your media needs<br>- Secure burning and copying of discs with SecurDisc technology<br>- Easy video editing with drag-and-drop functionality<br>- Smart photo editing with automatic face recognition and cropping<br>- Flexible backup options with cloud storage support<br>- Seamless streaming of media files across devices<br>- Efficient media conversion with hardware acceleration</td><td>- Expensive ( $49 .95 per year )<br>- Requires license key or activation code<br>- No support for 4K Ultra HD or 3D video playback or editing<br>- Limited number of software programs compared to CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ]</td></tr>
-<tr><td>Roxio Creator NXT Pro 8</td><td>- 6 software programs for various media tasks<br>- Burn , copy , rip and create CDs , DVDs and Blu-ray discs<br>- Edit videos with multi-camera editing , motion tracking , color grading and more<br>- Enhance photos with AI tools , HDR effects and more<br>- Capture screen activity or webcam video<br>- Convert media files with support for HEVC ( H .265 ) format</td><td>- Powerful multimedia suite that can handle most of your media needs<br>- Secure burning and copying of discs with encryption and password protection<br>- Advanced video editing with professional features<br>- Creative photo editing with artistic effects and filters<br>- Easy screen recording and live streaming options<br>- Efficient media conversion with hardware acceleration</td><td>- Very expensive ( $129 .99 )<br>- Requires license key or activation code<br>- No support for 4K Ultra HD or 3D video playback or editing<br>- Limited number of software programs compared to CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ]<br></td></tr>
-</table>
- <h2>Conclusion</h2>
- <p>CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ] is a comprehensive multimedia suite that can help you create, play and organize your media files with ease and efficiency. It offers a wide range of features and benefits that can enhance your multimedia experience, such as high-quality playback and editing of 4K Ultra HD and 3D video, fast and efficient media conversion with Intel Quick Sync Video technology, user-friendly interface that lets you quickly access the desired category of media tasks, pre-activated software programs that don't require a license key or activation code, and more.</p>
- <p>If you are looking for a complete solution for all your media needs, you might want to download CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ] for PC today and enjoy its amazing capabilities.</p>
- <p>To download CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ] for PC, click on this link : <a href="http://www.mediafire.com/file/i3rbpd9k6bdsok9/">http://www.mediafire.com/file/i3rbpd9k6bdsok9/</a></p>
- <h2>FAQs</h2>
- <p>Here are some frequently asked questions about CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ]:</p>
- <ul>
-<li>What are the system requirements for CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ]?</li>
-<p>The system requirements for CyberLink Media Suite Ultimate 15 .0 .0512 .0 [ Soft4Win ] are :</p>
-<ul>
-<li>Operating System : Windows 10 , 8 .1 , 8 , 7 (32-64 bit)</li>
-<li>Memory ( RAM ) : 2 GB of RAM required</li>
-<li>Hard Disk Space : 6 GB of free space required</li>
-<li>Processor : Intel Core i3 or later</li>
-</ul>
- <li>Is CyberLink Media Suite Ultimate 15 .0 (continued) </h2>
- <ul>
-<li>.0512.0 [Soft4Win] pre-activated or do I need a license key?</li>
-<p>CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] is pre-activated, so you don't need a license key or activation code to use it.</p>
- <li>Can I update the programs in Cyber (continued) <ul>
-<li>Link Media Suite Ultimate 15.0.0512.0 [Soft4Win]?</li>
-<p>You can update the programs in CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] by following these steps:</p>
-<ol>
-<li>Launch CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] and click on the Update icon at the top right corner of the interface.</li>
-<li>You will see a list of software programs that have available updates. You can select the ones that you want to update and click on Download Now button.</li>
-<li>Wait for the download and installation to complete.</li>
-<li>Restart your PC if prompted.</li>
-</ol>
- <p>Note: You can also check for updates manually by opening each software program and clicking on the About dialog in the software, and then clicking on the Upgrade button. In the Upgrade dialog, click on the Update tab to see if there are any updates available.</p>
- <li>Does CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] support 4K Ultra HD and 3D video playback and editing?</li>
-<p>Yes, CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win] supports 4K Ultra HD and 3D video playback and editing with some of its software programs, such as PowerDVD 17, PowerDirector 15, MediaEspresso 7.5 and PhotoDirector 8.</p>
- <li>How can I contact CyberLink for technical support or feedback?</li>
-<p>If you need technical support or feedback for CyberLink Media Suite Ultimate 15.0.0512.0 [Soft4Win], you can do the following:</p>
-<ul>
-<li>Visit the CyberLink Support Center at <a href="https://www.cyberlink.com/support/index.html">https://www.cyberlink.com/support/index.html</a> and browse through the FAQs, downloads, user guides and other resources.</li>
-<li>Contact the CyberLink Customer Service Representatives at <a href="https://www.cyberlink.com/support/contact-support.jsp">https://www.cyberlink.com/support/contact-support.jsp</a> and fill out the online form with your details and inquiry.</li>
-<li>Join the CyberLink Community Forum at <a href="https://forum.cyberlink.com/forum/">https://forum.cyberlink.com/forum/</a> and interact with other users and experts.</li>
-</ul>
-</ul>
- </p> 0a6ba089eb<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/raghavtwenty/cyber-attack-prediction/README.md b/spaces/raghavtwenty/cyber-attack-prediction/README.md
deleted file mode 100644
index 5de3af6ec50728cbaa521b98b364b22c85f11630..0000000000000000000000000000000000000000
--- a/spaces/raghavtwenty/cyber-attack-prediction/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Cyber Attack Prediction
-emoji: 🏢
-colorFrom: indigo
-colorTo: pink
-sdk: gradio
-sdk_version: 3.34.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/braces/README.md b/spaces/rayan-saleh/whisper2notion/server/node_modules/braces/README.md
deleted file mode 100644
index cba2f600d2e6efad9cb14279a04d7e3ac6cd6cce..0000000000000000000000000000000000000000
--- a/spaces/rayan-saleh/whisper2notion/server/node_modules/braces/README.md
+++ /dev/null
@@ -1,593 +0,0 @@
-# braces [![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=W8YFZ425KND68) [![NPM version](https://img.shields.io/npm/v/braces.svg?style=flat)](https://www.npmjs.com/package/braces) [![NPM monthly downloads](https://img.shields.io/npm/dm/braces.svg?style=flat)](https://npmjs.org/package/braces) [![NPM total downloads](https://img.shields.io/npm/dt/braces.svg?style=flat)](https://npmjs.org/package/braces) [![Linux Build Status](https://img.shields.io/travis/micromatch/braces.svg?style=flat&label=Travis)](https://travis-ci.org/micromatch/braces)
-
-> Bash-like brace expansion, implemented in JavaScript. Safer than other brace expansion libs, with complete support for the Bash 4.3 braces specification, without sacrificing speed.
-
-Please consider following this project's author, [Jon Schlinkert](https://github.com/jonschlinkert), and consider starring the project to show your :heart: and support.
-
-## Install
-
-Install with [npm](https://www.npmjs.com/):
-
-```sh
-$ npm install --save braces
-```
-
-## v3.0.0 Released!!
-
-See the [changelog](CHANGELOG.md) for details.
-
-## Why use braces?
-
-Brace patterns make globs more powerful by adding the ability to match specific ranges and sequences of characters.
-
-* **Accurate** - complete support for the [Bash 4.3 Brace Expansion](www.gnu.org/software/bash/) specification (passes all of the Bash braces tests)
-* **[fast and performant](#benchmarks)** - Starts fast, runs fast and [scales well](#performance) as patterns increase in complexity.
-* **Organized code base** - The parser and compiler are easy to maintain and update when edge cases crop up.
-* **Well-tested** - Thousands of test assertions, and passes all of the Bash, minimatch, and [brace-expansion](https://github.com/juliangruber/brace-expansion) unit tests (as of the date this was written).
-* **Safer** - You shouldn't have to worry about users defining aggressive or malicious brace patterns that can break your application. Braces takes measures to prevent malicious regex that can be used for DDoS attacks (see [catastrophic backtracking](https://www.regular-expressions.info/catastrophic.html)).
-* [Supports lists](#lists) - (aka "sets") `a/{b,c}/d` => `['a/b/d', 'a/c/d']`
-* [Supports sequences](#sequences) - (aka "ranges") `{01..03}` => `['01', '02', '03']`
-* [Supports steps](#steps) - (aka "increments") `{2..10..2}` => `['2', '4', '6', '8', '10']`
-* [Supports escaping](#escaping) - To prevent evaluation of special characters.
-
-## Usage
-
-The main export is a function that takes one or more brace `patterns` and `options`.
-
-```js
-const braces = require('braces');
-// braces(patterns[, options]);
-
-console.log(braces(['{01..05}', '{a..e}']));
-//=> ['(0[1-5])', '([a-e])']
-
-console.log(braces(['{01..05}', '{a..e}'], { expand: true }));
-//=> ['01', '02', '03', '04', '05', 'a', 'b', 'c', 'd', 'e']
-```
-
-### Brace Expansion vs. Compilation
-
-By default, brace patterns are compiled into strings that are optimized for creating regular expressions and matching.
-
-**Compiled**
-
-```js
-console.log(braces('a/{x,y,z}/b')); 
-//=> ['a/(x|y|z)/b']
-console.log(braces(['a/{01..20}/b', 'a/{1..5}/b'])); 
-//=> [ 'a/(0[1-9]|1[0-9]|20)/b', 'a/([1-5])/b' ]
-```
-
-**Expanded**
-
-Enable brace expansion by setting the `expand` option to true, or by using [braces.expand()](#expand) (returns an array similar to what you'd expect from Bash, or `echo {1..5}`, or [minimatch](https://github.com/isaacs/minimatch)):
-
-```js
-console.log(braces('a/{x,y,z}/b', { expand: true }));
-//=> ['a/x/b', 'a/y/b', 'a/z/b']
-
-console.log(braces.expand('{01..10}'));
-//=> ['01','02','03','04','05','06','07','08','09','10']
-```
-
-### Lists
-
-Expand lists (like Bash "sets"):
-
-```js
-console.log(braces('a/{foo,bar,baz}/*.js'));
-//=> ['a/(foo|bar|baz)/*.js']
-
-console.log(braces.expand('a/{foo,bar,baz}/*.js'));
-//=> ['a/foo/*.js', 'a/bar/*.js', 'a/baz/*.js']
-```
-
-### Sequences
-
-Expand ranges of characters (like Bash "sequences"):
-
-```js
-console.log(braces.expand('{1..3}'));                // ['1', '2', '3']
-console.log(braces.expand('a/{1..3}/b'));            // ['a/1/b', 'a/2/b', 'a/3/b']
-console.log(braces('{a..c}', { expand: true }));     // ['a', 'b', 'c']
-console.log(braces('foo/{a..c}', { expand: true })); // ['foo/a', 'foo/b', 'foo/c']
-
-// supports zero-padded ranges
-console.log(braces('a/{01..03}/b'));   //=> ['a/(0[1-3])/b']
-console.log(braces('a/{001..300}/b')); //=> ['a/(0{2}[1-9]|0[1-9][0-9]|[12][0-9]{2}|300)/b']
-```
-
-See [fill-range](https://github.com/jonschlinkert/fill-range) for all available range-expansion options.
-
-### Steppped ranges
-
-Steps, or increments, may be used with ranges:
-
-```js
-console.log(braces.expand('{2..10..2}'));
-//=> ['2', '4', '6', '8', '10']
-
-console.log(braces('{2..10..2}'));
-//=> ['(2|4|6|8|10)']
-```
-
-When the [.optimize](#optimize) method is used, or [options.optimize](#optionsoptimize) is set to true, sequences are passed to [to-regex-range](https://github.com/jonschlinkert/to-regex-range) for expansion.
-
-### Nesting
-
-Brace patterns may be nested. The results of each expanded string are not sorted, and left to right order is preserved.
-
-**"Expanded" braces**
-
-```js
-console.log(braces.expand('a{b,c,/{x,y}}/e'));
-//=> ['ab/e', 'ac/e', 'a/x/e', 'a/y/e']
-
-console.log(braces.expand('a/{x,{1..5},y}/c'));
-//=> ['a/x/c', 'a/1/c', 'a/2/c', 'a/3/c', 'a/4/c', 'a/5/c', 'a/y/c']
-```
-
-**"Optimized" braces**
-
-```js
-console.log(braces('a{b,c,/{x,y}}/e'));
-//=> ['a(b|c|/(x|y))/e']
-
-console.log(braces('a/{x,{1..5},y}/c'));
-//=> ['a/(x|([1-5])|y)/c']
-```
-
-### Escaping
-
-**Escaping braces**
-
-A brace pattern will not be expanded or evaluted if _either the opening or closing brace is escaped_:
-
-```js
-console.log(braces.expand('a\\{d,c,b}e'));
-//=> ['a{d,c,b}e']
-
-console.log(braces.expand('a{d,c,b\\}e'));
-//=> ['a{d,c,b}e']
-```
-
-**Escaping commas**
-
-Commas inside braces may also be escaped:
-
-```js
-console.log(braces.expand('a{b\\,c}d'));
-//=> ['a{b,c}d']
-
-console.log(braces.expand('a{d\\,c,b}e'));
-//=> ['ad,ce', 'abe']
-```
-
-**Single items**
-
-Following bash conventions, a brace pattern is also not expanded when it contains a single character:
-
-```js
-console.log(braces.expand('a{b}c'));
-//=> ['a{b}c']
-```
-
-## Options
-
-### options.maxLength
-
-**Type**: `Number`
-
-**Default**: `65,536`
-
-**Description**: Limit the length of the input string. Useful when the input string is generated or your application allows users to pass a string, et cetera.
-
-```js
-console.log(braces('a/{b,c}/d', { maxLength: 3 }));  //=> throws an error
-```
-
-### options.expand
-
-**Type**: `Boolean`
-
-**Default**: `undefined`
-
-**Description**: Generate an "expanded" brace pattern (alternatively you can use the `braces.expand()` method, which does the same thing).
-
-```js
-console.log(braces('a/{b,c}/d', { expand: true }));
-//=> [ 'a/b/d', 'a/c/d' ]
-```
-
-### options.nodupes
-
-**Type**: `Boolean`
-
-**Default**: `undefined`
-
-**Description**: Remove duplicates from the returned array.
-
-### options.rangeLimit
-
-**Type**: `Number`
-
-**Default**: `1000`
-
-**Description**: To prevent malicious patterns from being passed by users, an error is thrown when `braces.expand()` is used or `options.expand` is true and the generated range will exceed the `rangeLimit`.
-
-You can customize `options.rangeLimit` or set it to `Inifinity` to disable this altogether.
-
-**Examples**
-
-```js
-// pattern exceeds the "rangeLimit", so it's optimized automatically
-console.log(braces.expand('{1..1000}'));
-//=> ['([1-9]|[1-9][0-9]{1,2}|1000)']
-
-// pattern does not exceed "rangeLimit", so it's NOT optimized
-console.log(braces.expand('{1..100}'));
-//=> ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100']
-```
-
-### options.transform
-
-**Type**: `Function`
-
-**Default**: `undefined`
-
-**Description**: Customize range expansion.
-
-**Example: Transforming non-numeric values**
-
-```js
-const alpha = braces.expand('x/{a..e}/y', {
-  transform(value, index) {
-    // When non-numeric values are passed, "value" is a character code.
-    return 'foo/' + String.fromCharCode(value) + '-' + index;
-  }
-});
-console.log(alpha);
-//=> [ 'x/foo/a-0/y', 'x/foo/b-1/y', 'x/foo/c-2/y', 'x/foo/d-3/y', 'x/foo/e-4/y' ]
-```
-
-**Example: Transforming numeric values**
-
-```js
-const numeric = braces.expand('{1..5}', {
-  transform(value) {
-    // when numeric values are passed, "value" is a number
-    return 'foo/' + value * 2;
-  }
-});
-console.log(numeric); 
-//=> [ 'foo/2', 'foo/4', 'foo/6', 'foo/8', 'foo/10' ]
-```
-
-### options.quantifiers
-
-**Type**: `Boolean`
-
-**Default**: `undefined`
-
-**Description**: In regular expressions, quanitifiers can be used to specify how many times a token can be repeated. For example, `a{1,3}` will match the letter `a` one to three times.
-
-Unfortunately, regex quantifiers happen to share the same syntax as [Bash lists](#lists)
-
-The `quantifiers` option tells braces to detect when [regex quantifiers](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/RegExp#quantifiers) are defined in the given pattern, and not to try to expand them as lists.
-
-**Examples**
-
-```js
-const braces = require('braces');
-console.log(braces('a/b{1,3}/{x,y,z}'));
-//=> [ 'a/b(1|3)/(x|y|z)' ]
-console.log(braces('a/b{1,3}/{x,y,z}', {quantifiers: true}));
-//=> [ 'a/b{1,3}/(x|y|z)' ]
-console.log(braces('a/b{1,3}/{x,y,z}', {quantifiers: true, expand: true}));
-//=> [ 'a/b{1,3}/x', 'a/b{1,3}/y', 'a/b{1,3}/z' ]
-```
-
-### options.unescape
-
-**Type**: `Boolean`
-
-**Default**: `undefined`
-
-**Description**: Strip backslashes that were used for escaping from the result.
-
-## What is "brace expansion"?
-
-Brace expansion is a type of parameter expansion that was made popular by unix shells for generating lists of strings, as well as regex-like matching when used alongside wildcards (globs).
-
-In addition to "expansion", braces are also used for matching. In other words:
-
-* [brace expansion](#brace-expansion) is for generating new lists
-* [brace matching](#brace-matching) is for filtering existing lists
-
-<details>
-<summary><strong>More about brace expansion</strong> (click to expand)</summary>
-
-There are two main types of brace expansion:
-
-1. **lists**: which are defined using comma-separated values inside curly braces: `{a,b,c}`
-2. **sequences**: which are defined using a starting value and an ending value, separated by two dots: `a{1..3}b`. Optionally, a third argument may be passed to define a "step" or increment to use: `a{1..100..10}b`. These are also sometimes referred to as "ranges".
-
-Here are some example brace patterns to illustrate how they work:
-
-**Sets**
-
-```
-{a,b,c}       => a b c
-{a,b,c}{1,2}  => a1 a2 b1 b2 c1 c2
-```
-
-**Sequences**
-
-```
-{1..9}        => 1 2 3 4 5 6 7 8 9
-{4..-4}       => 4 3 2 1 0 -1 -2 -3 -4
-{1..20..3}    => 1 4 7 10 13 16 19
-{a..j}        => a b c d e f g h i j
-{j..a}        => j i h g f e d c b a
-{a..z..3}     => a d g j m p s v y
-```
-
-**Combination**
-
-Sets and sequences can be mixed together or used along with any other strings.
-
-```
-{a,b,c}{1..3}   => a1 a2 a3 b1 b2 b3 c1 c2 c3
-foo/{a,b,c}/bar => foo/a/bar foo/b/bar foo/c/bar
-```
-
-The fact that braces can be "expanded" from relatively simple patterns makes them ideal for quickly generating test fixtures, file paths, and similar use cases.
-
-## Brace matching
-
-In addition to _expansion_, brace patterns are also useful for performing regular-expression-like matching.
-
-For example, the pattern `foo/{1..3}/bar` would match any of following strings:
-
-```
-foo/1/bar
-foo/2/bar
-foo/3/bar
-```
-
-But not:
-
-```
-baz/1/qux
-baz/2/qux
-baz/3/qux
-```
-
-Braces can also be combined with [glob patterns](https://github.com/jonschlinkert/micromatch) to perform more advanced wildcard matching. For example, the pattern `*/{1..3}/*` would match any of following strings:
-
-```
-foo/1/bar
-foo/2/bar
-foo/3/bar
-baz/1/qux
-baz/2/qux
-baz/3/qux
-```
-
-## Brace matching pitfalls
-
-Although brace patterns offer a user-friendly way of matching ranges or sets of strings, there are also some major disadvantages and potential risks you should be aware of.
-
-### tldr
-
-**"brace bombs"**
-
-* brace expansion can eat up a huge amount of processing resources
-* as brace patterns increase _linearly in size_, the system resources required to expand the pattern increase exponentially
-* users can accidentally (or intentially) exhaust your system's resources resulting in the equivalent of a DoS attack (bonus: no programming knowledge is required!)
-
-For a more detailed explanation with examples, see the [geometric complexity](#geometric-complexity) section.
-
-### The solution
-
-Jump to the [performance section](#performance) to see how Braces solves this problem in comparison to other libraries.
-
-### Geometric complexity
-
-At minimum, brace patterns with sets limited to two elements have quadradic or `O(n^2)` complexity. But the complexity of the algorithm increases exponentially as the number of sets, _and elements per set_, increases, which is `O(n^c)`.
-
-For example, the following sets demonstrate quadratic (`O(n^2)`) complexity:
-
-```
-{1,2}{3,4}      => (2X2)    => 13 14 23 24
-{1,2}{3,4}{5,6} => (2X2X2)  => 135 136 145 146 235 236 245 246
-```
-
-But add an element to a set, and we get a n-fold Cartesian product with `O(n^c)` complexity:
-
-```
-{1,2,3}{4,5,6}{7,8,9} => (3X3X3) => 147 148 149 157 158 159 167 168 169 247 248 
-                                    249 257 258 259 267 268 269 347 348 349 357 
-                                    358 359 367 368 369
-```
-
-Now, imagine how this complexity grows given that each element is a n-tuple:
-
-```
-{1..100}{1..100}         => (100X100)     => 10,000 elements (38.4 kB)
-{1..100}{1..100}{1..100} => (100X100X100) => 1,000,000 elements (5.76 MB)
-```
-
-Although these examples are clearly contrived, they demonstrate how brace patterns can quickly grow out of control.
-
-**More information**
-
-Interested in learning more about brace expansion?
-
-* [linuxjournal/bash-brace-expansion](http://www.linuxjournal.com/content/bash-brace-expansion)
-* [rosettacode/Brace_expansion](https://rosettacode.org/wiki/Brace_expansion)
-* [cartesian product](https://en.wikipedia.org/wiki/Cartesian_product)
-
-</details>
-
-## Performance
-
-Braces is not only screaming fast, it's also more accurate the other brace expansion libraries.
-
-### Better algorithms
-
-Fortunately there is a solution to the ["brace bomb" problem](#brace-matching-pitfalls): _don't expand brace patterns into an array when they're used for matching_.
-
-Instead, convert the pattern into an optimized regular expression. This is easier said than done, and braces is the only library that does this currently.
-
-**The proof is in the numbers**
-
-Minimatch gets exponentially slower as patterns increase in complexity, braces does not. The following results were generated using `braces()` and `minimatch.braceExpand()`, respectively.
-
-| **Pattern**                 | **braces**         | **[minimatch][]**            |
-| ---                         | ---                | ---                          |
-| `{1..9007199254740991}`[^1] | `298 B` (5ms 459μs)|  N/A (freezes)               |
-| `{1..1000000000000000}`     | `41 B` (1ms 15μs)  |  N/A (freezes)               |
-| `{1..100000000000000}`      | `40 B` (890μs)     |  N/A (freezes)               |
-| `{1..10000000000000}`       | `39 B` (2ms 49μs)  |  N/A (freezes)               |
-| `{1..1000000000000}`        | `38 B` (608μs)     |  N/A (freezes)               |
-| `{1..100000000000}`         | `37 B` (397μs)     |  N/A (freezes)               |
-| `{1..10000000000}`          | `35 B` (983μs)     |  N/A (freezes)               |
-| `{1..1000000000}`           | `34 B` (798μs)     |  N/A (freezes)               |
-| `{1..100000000}`            | `33 B` (733μs)     |  N/A (freezes)               |
-| `{1..10000000}`             | `32 B` (5ms 632μs) | `78.89 MB` (16s 388ms 569μs) |
-| `{1..1000000}`              | `31 B` (1ms 381μs) | `6.89 MB` (1s 496ms 887μs)   |
-| `{1..100000}`               | `30 B` (950μs)     | `588.89 kB` (146ms 921μs)    |
-| `{1..10000}`                | `29 B` (1ms 114μs) | `48.89 kB` (14ms 187μs)      |
-| `{1..1000}`                 | `28 B` (760μs)     | `3.89 kB` (1ms 453μs)        |
-| `{1..100}`                  | `22 B` (345μs)     | `291 B` (196μs)              |
-| `{1..10}`                   | `10 B` (533μs)     | `20 B` (37μs)                |
-| `{1..3}`                    | `7 B` (190μs)      | `5 B` (27μs)                 |
-
-### Faster algorithms
-
-When you need expansion, braces is still much faster.
-
-_(the following results were generated using `braces.expand()` and `minimatch.braceExpand()`, respectively)_
-
-| **Pattern**     | **braces**                  | **[minimatch][]**            |
-| ---             | ---                         | ---                          |
-| `{1..10000000}` | `78.89 MB` (2s 698ms 642μs) | `78.89 MB` (18s 601ms 974μs) |
-| `{1..1000000}`  | `6.89 MB` (458ms 576μs)     | `6.89 MB` (1s 491ms 621μs)   |
-| `{1..100000}`   | `588.89 kB` (20ms 728μs)    | `588.89 kB` (156ms 919μs)    |
-| `{1..10000}`    | `48.89 kB` (2ms 202μs)      | `48.89 kB` (13ms 641μs)      |
-| `{1..1000}`     | `3.89 kB` (1ms 796μs)       | `3.89 kB` (1ms 958μs)        |
-| `{1..100}`      | `291 B` (424μs)             | `291 B` (211μs)              |
-| `{1..10}`       | `20 B` (487μs)              | `20 B` (72μs)                |
-| `{1..3}`        | `5 B` (166μs)               | `5 B` (27μs)                 |
-
-If you'd like to run these comparisons yourself, see [test/support/generate.js](test/support/generate.js).
-
-## Benchmarks
-
-### Running benchmarks
-
-Install dev dependencies:
-
-```bash
-npm i -d && npm benchmark
-```
-
-### Latest results
-
-Braces is more accurate, without sacrificing performance.
-
-```bash
-# range (expanded)
-  braces x 29,040 ops/sec ±3.69% (91 runs sampled))
-  minimatch x 4,735 ops/sec ±1.28% (90 runs sampled)
-
-# range (optimized for regex)
-  braces x 382,878 ops/sec ±0.56% (94 runs sampled)
-  minimatch x 1,040 ops/sec ±0.44% (93 runs sampled)
-
-# nested ranges (expanded)
-  braces x 19,744 ops/sec ±2.27% (92 runs sampled))
-  minimatch x 4,579 ops/sec ±0.50% (93 runs sampled)
-
-# nested ranges (optimized for regex)
-  braces x 246,019 ops/sec ±2.02% (93 runs sampled)
-  minimatch x 1,028 ops/sec ±0.39% (94 runs sampled)
-
-# set (expanded) 
-  braces x 138,641 ops/sec ±0.53% (95 runs sampled)
-  minimatch x 219,582 ops/sec ±0.98% (94 runs sampled)
-
-# set (optimized for regex)
-  braces x 388,408 ops/sec ±0.41% (95 runs sampled)
-  minimatch x 44,724 ops/sec ±0.91% (89 runs sampled)
-
-# nested sets (expanded)
-  braces x 84,966 ops/sec ±0.48% (94 runs sampled)
-  minimatch x 140,720 ops/sec ±0.37% (95 runs sampled)
-
-# nested sets (optimized for regex)
-  braces x 263,340 ops/sec ±2.06% (92 runs sampled)
-  minimatch x 28,714 ops/sec ±0.40% (90 runs sampled)
-```
-
-## About
-
-<details>
-<summary><strong>Contributing</strong></summary>
-
-Pull requests and stars are always welcome. For bugs and feature requests, [please create an issue](../../issues/new).
-
-</details>
-
-<details>
-<summary><strong>Running Tests</strong></summary>
-
-Running and reviewing unit tests is a great way to get familiarized with a library and its API. You can install dependencies and run tests with the following command:
-
-```sh
-$ npm install && npm test
-```
-
-</details>
-
-<details>
-<summary><strong>Building docs</strong></summary>
-
-_(This project's readme.md is generated by [verb](https://github.com/verbose/verb-generate-readme), please don't edit the readme directly. Any changes to the readme must be made in the [.verb.md](.verb.md) readme template.)_
-
-To generate the readme, run the following command:
-
-```sh
-$ npm install -g verbose/verb#dev verb-generate-readme && verb
-```
-
-</details>
-
-### Contributors
-
-| **Commits** | **Contributor** |  
-| --- | --- |  
-| 197 | [jonschlinkert](https://github.com/jonschlinkert) |  
-| 4   | [doowb](https://github.com/doowb) |  
-| 1   | [es128](https://github.com/es128) |  
-| 1   | [eush77](https://github.com/eush77) |  
-| 1   | [hemanth](https://github.com/hemanth) |  
-| 1   | [wtgtybhertgeghgtwtg](https://github.com/wtgtybhertgeghgtwtg) |  
-
-### Author
-
-**Jon Schlinkert**
-
-* [GitHub Profile](https://github.com/jonschlinkert)
-* [Twitter Profile](https://twitter.com/jonschlinkert)
-* [LinkedIn Profile](https://linkedin.com/in/jonschlinkert)
-
-### License
-
-Copyright © 2019, [Jon Schlinkert](https://github.com/jonschlinkert).
-Released under the [MIT License](LICENSE).
-
-***
-
-_This file was generated by [verb-generate-readme](https://github.com/verbose/verb-generate-readme), v0.8.0, on April 08, 2019._
\ No newline at end of file
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Autos Bauen Mit Willy Werkel.rar.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Autos Bauen Mit Willy Werkel.rar.md
deleted file mode 100644
index 9fb1cfb53796497ad09f869a997dfa900852f2a5..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Autos Bauen Mit Willy Werkel.rar.md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>Autos Bauen Mit Willy Werkel.rar</h2><br /><p><b><b>Download Zip</b> &#9989; <a href="https://urlgoal.com/2uCK0h">https://urlgoal.com/2uCK0h</a></b></p><br /><br />
-<br />
-Autos Bauen Mit Willy Werkel.rar, Homer Energy Software Crack. 3db19cccfd. softube tsar 1 mac crack torrentinstmank su podium browser v2 serial.rar 1fdad05405<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Civilization 5 World Builder Skidrow __EXCLUSIVE__.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Civilization 5 World Builder Skidrow __EXCLUSIVE__.md
deleted file mode 100644
index 021d6b93eab9a5797fdad40717d7df71c7802c3d..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Civilization 5 World Builder Skidrow __EXCLUSIVE__.md	
+++ /dev/null
@@ -1,10 +0,0 @@
-<h2>Civilization 5 World Builder Skidrow</h2><br /><p><b><b>Download Zip</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://urlgoal.com/2uCKxq">https://urlgoal.com/2uCKxq</a></b></p><br /><br />
-<br />
-Tag: Download Sid Meiers Civilization V Complete Edition for PC · No messages found. · EXCLUSIVE GAMES. Information about the game.
-Title: Sid Meiers Civilization V Year of release: 2010 Genre: Strategy (Turn-based) Version: v.1.3.0+1.0 Build 8 Developer: Firaxis Games Publisher: 2K Games Publication type: Repack Interface language: Russian Voice language: Russian Tabletka : Sewn in Size: 7.86 GB
-System Requirements: * OS: Windows XP / Vista / Windows 7 * Processor: Intel Core 2 Duo or AMD equivalent.
-* RAM: 1 GB (Windows XP) / 2 GB (Windows Vista).
-* Hard disk: 10 GB. 8a78ff9644<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Gccpuma3driverdownload Extra Quality.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Gccpuma3driverdownload Extra Quality.md
deleted file mode 100644
index 4a190acf1eda6a4cad7cd2287f643d5f4151c35a..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Gccpuma3driverdownload Extra Quality.md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>gccpuma3driverdownload</h2><br /><p><b><b>DOWNLOAD</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://urlgoal.com/2uCKpu">https://urlgoal.com/2uCKpu</a></b></p><br /><br />
-<br />
-gcc puma driver. --------------------------------------------------. Gccpuma3driverdownload ››››› http://bit.ly/. --------------------------------------------------. 4d29de3e1b<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/robinhad/ukrainian-tts/ukrainian_tts/tts.py b/spaces/robinhad/ukrainian-tts/ukrainian_tts/tts.py
deleted file mode 100644
index 88dee623d55dc3cf1c632ebc30d3eca48d6a638e..0000000000000000000000000000000000000000
--- a/spaces/robinhad/ukrainian-tts/ukrainian_tts/tts.py
+++ /dev/null
@@ -1,132 +0,0 @@
-from io import BytesIO
-import requests
-from os.path import exists, join, dirname
-from espnet2.bin.tts_inference import Text2Speech
-from enum import Enum
-from .formatter import preprocess_text
-from .stress import sentence_to_stress, stress_dict, stress_with_model
-from torch import no_grad
-import numpy as np
-import time
-import soundfile as sf
-from kaldiio import load_ark
-
-
-class Voices(Enum):
-    """List of available voices for the model."""
-
-    Tetiana = "tetiana"
-    Mykyta = "mykyta"
-    Lada = "lada"
-    Dmytro = "dmytro"
-    Oleksa = "oleksa"
-
-
-class Stress(Enum):
-    """Options how to stress sentence.
-    - `dictionary` - performs lookup in dictionary, taking into account grammatical case of a word and its' neighbors
-    - `model` - stress using transformer model"""
-
-    Dictionary = "dictionary"
-    Model = "model"
-
-
-class TTS:
-    """ """
-
-    def __init__(self, cache_folder=None, device="cpu") -> None:
-        """
-        Class to setup a text-to-speech engine, from download to model creation.  \n
-        Downloads or uses files from `cache_folder` directory.  \n
-        By default stores in current directory."""
-        self.device = device
-        self.__setup_cache(cache_folder)
-
-    def tts(self, text: str, voice: str, stress: str, output_fp=BytesIO()):
-        """
-        Run a Text-to-Speech engine and output to `output_fp` BytesIO-like object.
-        - `text` - your model input text.
-        - `voice` - one of predefined voices from `Voices` enum.
-        - `stress` - stress method options, predefined in `Stress` enum.
-        - `output_fp` - file-like object output. Stores in RAM by default.
-        """
-
-        if stress not in [option.value for option in Stress]:
-            raise ValueError(
-                f"Invalid value for stress option selected! Please use one of the following values: {', '.join([option.value for option in Stress])}."
-            )
-
-        if stress == Stress.Model.value:
-            stress = True
-        else:
-            stress = False
-        if voice not in [option.value for option in Voices]:
-            if voice not in self.xvectors.keys():
-                raise ValueError(
-                    f"Invalid value for voice selected! Please use one of the following values: {', '.join([option.value for option in Voices])}."
-                )
-
-        text = preprocess_text(text)
-        text = sentence_to_stress(text, stress_with_model if stress else stress_dict)
-
-        # synthesis
-        with no_grad():
-            start = time.time()
-            wav = self.synthesizer(text, spembs=self.xvectors[voice][0])["wav"]
-
-        rtf = (time.time() - start) / (len(wav) / self.synthesizer.fs)
-        print(f"RTF = {rtf:5f}")
-
-        sf.write(
-            output_fp,
-            wav.view(-1).cpu().numpy(),
-            self.synthesizer.fs,
-            "PCM_16",
-            format="wav",
-        )
-
-        output_fp.seek(0)
-
-        return output_fp, text
-
-    def __setup_cache(self, cache_folder=None):
-        """Downloads models and stores them into `cache_folder`. By default stores in current directory."""
-        release_number = "v6.0.0"
-        print(
-            f"downloading https://github.com/robinhad/ukrainian-tts/releases/download/{release_number}"
-        )
-        model_link = f"https://github.com/robinhad/ukrainian-tts/releases/download/{release_number}/model.pth"
-        config_link = f"https://github.com/robinhad/ukrainian-tts/releases/download/{release_number}/config.yaml"
-        speakers_link = f"https://github.com/robinhad/ukrainian-tts/releases/download/{release_number}/spk_xvector.ark"
-        feat_stats_link = f"https://github.com/robinhad/ukrainian-tts/releases/download/{release_number}/feats_stats.npz"
-
-        if cache_folder is None:
-            cache_folder = "."
-
-        model_path = join(cache_folder, "model.pth")
-        config_path = join(cache_folder, "config.yaml")
-        speakers_path = join(cache_folder, "spk_xvector.ark")
-        feat_stats_path = join(cache_folder, "feats_stats.npz")
-
-        self.__download(model_link, model_path)
-        self.__download(config_link, config_path)
-        self.__download(speakers_link, speakers_path)
-        self.__download(feat_stats_link, feat_stats_path)
-        print("downloaded.")
-
-        self.synthesizer = Text2Speech(
-            train_config=config_path, model_file=model_path, device=self.device
-        )
-        self.xvectors = {k: v for k, v in load_ark(speakers_path)}
-
-    def __download(self, url, file_name):
-        """Downloads file from `url` into local `file_name` file."""
-        if not exists(file_name):
-            if not exists(dirname(file_name)):
-                raise ValueError(f'Directory "{dirname(file_name)}" doesn\'t exist!')
-            print(f"Downloading {file_name}")
-            r = requests.get(url, allow_redirects=True)
-            with open(file_name, "wb") as file:
-                file.write(r.content)
-        else:
-            print(f"Found {file_name}. Skipping download...")
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Filmdirtydiariesstreaming.md b/spaces/rorallitri/biomedical-language-models/logs/Filmdirtydiariesstreaming.md
deleted file mode 100644
index f4a50cd27219b1d7846aa3c77724e9e62d07acf9..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Filmdirtydiariesstreaming.md
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>Filmdirtydiariesstreaming</h2><br /><p><b><b>Download Zip</b> &mdash;&mdash;&mdash;>>> <a href="https://tinurll.com/2uznfq">https://tinurll.com/2uznfq</a></b></p><br /><br />
-<br />
- aaccfb2cb3<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/runa91/bite_gradio/src/stacked_hourglass/utils/__init__.py b/spaces/runa91/bite_gradio/src/stacked_hourglass/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/sabman/map-diffuser/README.md b/spaces/sabman/map-diffuser/README.md
deleted file mode 100644
index 8cc31d5bcff3d42ea0f894124c49abe87076e6dc..0000000000000000000000000000000000000000
--- a/spaces/sabman/map-diffuser/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
----
-title: Map Diffusers
-emoji: 🗺️
-colorFrom: green
-colorTo: green
-sdk: gradio
-sdk_version: 3.28.2
-app_file: app.py
-pinned: false
-tags:
-- jax-diffusers-event
----
-
-# Map Diffusers
-
-This is a demo of the [JAX Diffusers]() library. It was trained on data with maptiles form 
-- [OpenStreetMap](https://www.openstreetmap.org/)
-- [Mapbox Satellite](https://www.mapbox.com/maps/satellite/)
-- [Stamen Watercolor](http://maps.stamen.com/watercolor/#12/37.7706/-122.3782)
-- [Stamen Toner](http://maps.stamen.com/toner/#12/37.7706/-122.3782)
-
-We did this as part of the [JAX Diffusers Event](https://github.com/huggingface/community-events/tree/main/jax-controlnet-sprint) making use of the TPU's provided by Google Cloud and Hugging Face. 🤗
\ No newline at end of file
diff --git a/spaces/scedlatioru/img-to-music/example/Every Little Thing Every Best Single 3 Rar REPACK.md b/spaces/scedlatioru/img-to-music/example/Every Little Thing Every Best Single 3 Rar REPACK.md
deleted file mode 100644
index 1140782e4cc4d23c12c1f637d557ae3ec6ec19da..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Every Little Thing Every Best Single 3 Rar REPACK.md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<br />
-<p>The applications for mobile phones are on a massive scale today and have expanded to every aspect of our life. However, the market is not only about the applications but the apps need to be optimized to deliver the expected results. The initial stages of app development are extremely crucial to its success. The optimization of the app design, evaluation of the app performance, and the analysis of the environmental parameters are the basic concerns before the deployment of the app. Hence, the initial stages should be an integral part of the app development life-cycle.</p>
-<p>Mobile application development is ever on an upward trend. Increasing development activity and expanding customer base have propelled growth of the apps market. In the last few years, the number of apps in the app store has soared exponentially, and these apps are being downloaded continuously by the users. According to the Gartner survey 2013, there were 15.5 billion app users in the world and the mobile apps market is expected to continue to grow in the coming years due to the growth of smartphone penetration and usage, device fragmentation, user acquisition cost, and development cost. In the recent past, the scope and number of mobile apps have grown exponentially. The first app was created in 1972, but the first app store was launched in 2008; however, the first smartphone with an app store was released in 2007. Today, every smartphone has its own app store. However, the scope is not only about the number of apps but the quality and usability of the apps are also being enhanced every day. Data, frameworks, resources, tools, and development tools are the major components of the app development.</p>
-<h2>every little thing every best single 3 rar</h2><br /><p><b><b>Download</b> &#10042;&#10042;&#10042; <a href="https://gohhs.com/2uEAls">https://gohhs.com/2uEAls</a></b></p><br /><br /> 899543212b<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/scedlatioru/img-to-music/example/Microsoft Office 2013 Activation Crack (lifetime Activator).md b/spaces/scedlatioru/img-to-music/example/Microsoft Office 2013 Activation Crack (lifetime Activator).md
deleted file mode 100644
index 888df3221111528e0233d11297c681d527236608..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Microsoft Office 2013 Activation Crack (lifetime Activator).md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>Microsoft Office 2013 Activation Crack (lifetime Activator)</h2><br /><p><b><b>Download Zip</b> &gt;&gt;&gt;&gt;&gt; <a href="https://gohhs.com/2uEyY8">https://gohhs.com/2uEyY8</a></b></p><br /><br />
-<br />
-It is used to activate Microsoft office and windows. ... Lifetime upgrade. If Windows and Office are activated through KMSnano Office 2013, that ... 1fdad05405<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/scikit-learn/blog-example/app.py b/spaces/scikit-learn/blog-example/app.py
deleted file mode 100644
index d5b285ad0d60b0c2e7c1930a89f4aeb3167ce066..0000000000000000000000000000000000000000
--- a/spaces/scikit-learn/blog-example/app.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import gradio as gr
-
-title = "Breast Cancer Prediction"
-description = "This app predicts breast cancer based on digitized images of a fine needle aspirate (FNA) of a breast mass."
-
-gr.Interface.load("huggingface/scikit-learn/skops-blog-example", title=title, description=description).launch()
\ No newline at end of file
diff --git a/spaces/sciling/Face_and_Plate_License_Blur/models/experimental.py b/spaces/sciling/Face_and_Plate_License_Blur/models/experimental.py
deleted file mode 100644
index 72dc877c83cf34f4bbab84eaedc2a09a97dd8c6e..0000000000000000000000000000000000000000
--- a/spaces/sciling/Face_and_Plate_License_Blur/models/experimental.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# This file contains experimental modules
-
-import numpy as np
-import torch
-import torch.nn as nn
-
-from models.common import Conv, DWConv
-from utils.google_utils import attempt_download
-
-
-class CrossConv(nn.Module):
-    # Cross Convolution Downsample
-    def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
-        # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
-        super(CrossConv, self).__init__()
-        c_ = int(c2 * e)  # hidden channels
-        self.cv1 = Conv(c1, c_, (1, k), (1, s))
-        self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
-        self.add = shortcut and c1 == c2
-
-    def forward(self, x):
-        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
-
-
-class Sum(nn.Module):
-    # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
-    def __init__(self, n, weight=False):  # n: number of inputs
-        super(Sum, self).__init__()
-        self.weight = weight  # apply weights boolean
-        self.iter = range(n - 1)  # iter object
-        if weight:
-            self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True)  # layer weights
-
-    def forward(self, x):
-        y = x[0]  # no weight
-        if self.weight:
-            w = torch.sigmoid(self.w) * 2
-            for i in self.iter:
-                y = y + x[i + 1] * w[i]
-        else:
-            for i in self.iter:
-                y = y + x[i + 1]
-        return y
-
-
-class GhostConv(nn.Module):
-    # Ghost Convolution https://github.com/huawei-noah/ghostnet
-    def __init__(self, c1, c2, k=1, s=1, g=1, act=True):  # ch_in, ch_out, kernel, stride, groups
-        super(GhostConv, self).__init__()
-        c_ = c2 // 2  # hidden channels
-        self.cv1 = Conv(c1, c_, k, s, None, g, act)
-        self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
-
-    def forward(self, x):
-        y = self.cv1(x)
-        return torch.cat([y, self.cv2(y)], 1)
-
-
-class GhostBottleneck(nn.Module):
-    # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
-    def __init__(self, c1, c2, k, s):
-        super(GhostBottleneck, self).__init__()
-        c_ = c2 // 2
-        self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1),  # pw
-                                  DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(),  # dw
-                                  GhostConv(c_, c2, 1, 1, act=False))  # pw-linear
-        self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
-                                      Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
-
-    def forward(self, x):
-        return self.conv(x) + self.shortcut(x)
-
-
-class MixConv2d(nn.Module):
-    # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
-    def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
-        super(MixConv2d, self).__init__()
-        groups = len(k)
-        if equal_ch:  # equal c_ per group
-            i = torch.linspace(0, groups - 1E-6, c2).floor()  # c2 indices
-            c_ = [(i == g).sum() for g in range(groups)]  # intermediate channels
-        else:  # equal weight.numel() per group
-            b = [c2] + [0] * groups
-            a = np.eye(groups + 1, groups, k=-1)
-            a -= np.roll(a, 1, axis=1)
-            a *= np.array(k) ** 2
-            a[0] = 1
-            c_ = np.linalg.lstsq(a, b, rcond=None)[0].round()  # solve for equal weight indices, ax = b
-
-        self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
-        self.bn = nn.BatchNorm2d(c2)
-        self.act = nn.LeakyReLU(0.1, inplace=True)
-
-    def forward(self, x):
-        return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
-
-
-class Ensemble(nn.ModuleList):
-    # Ensemble of models
-    def __init__(self):
-        super(Ensemble, self).__init__()
-
-    def forward(self, x, augment=False):
-        y = []
-        for module in self:
-            y.append(module(x, augment)[0])
-        # y = torch.stack(y).max(0)[0]  # max ensemble
-        # y = torch.stack(y).mean(0)  # mean ensemble
-        y = torch.cat(y, 1)  # nms ensemble
-        return y, None  # inference, train output
-
-
-def attempt_load(weights, map_location=None):
-    # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
-    model = Ensemble()
-    for w in weights if isinstance(weights, list) else [weights]:
-        attempt_download(w)
-        model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval())  # load FP32 model
-
-    # Compatibility updates
-    for m in model.modules():
-        if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
-            m.inplace = True  # pytorch 1.7.0 compatibility
-        elif type(m) is Conv:
-            m._non_persistent_buffers_set = set()  # pytorch 1.6.0 compatibility
-
-    if len(model) == 1:
-        return model[-1]  # return model
-    else:
-        print('Ensemble created with %s\n' % weights)
-        for k in ['names', 'stride']:
-            setattr(model, k, getattr(model[-1], k))
-        return model  # return ensemble
diff --git a/spaces/severo/voronoi-cloth/index.html b/spaces/severo/voronoi-cloth/index.html
deleted file mode 100644
index 9200a834cb158a7671de524316394a258d8ee65a..0000000000000000000000000000000000000000
--- a/spaces/severo/voronoi-cloth/index.html
+++ /dev/null
@@ -1,14 +0,0 @@
-<!DOCTYPE html>
-<html>
-  <head>
-    <title>Voronoi Cloth</title>
-  </head>
-  <body>
-    <iframe
-      width="100%"
-      height="676"
-      frameborder="0"
-      src="https://observablehq.com/embed/@severo/voronoi-cloth?cells=animation"
-    ></iframe>
-  </body>
-</html>
diff --git "a/spaces/shencc/gpt/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" "b/spaces/shencc/gpt/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py"
deleted file mode 100644
index ffbb05599ef09c9de25334ebeca2eef8022b9aaf..0000000000000000000000000000000000000000
--- "a/spaces/shencc/gpt/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py"
+++ /dev/null
@@ -1,160 +0,0 @@
-from toolbox import update_ui
-from toolbox import CatchException, report_execption, write_results_to_file
-from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
-
-fast_debug = False
-
-def readPdf(pdfPath):
-    """
-    读取pdf文件,返回文本内容
-    """
-    import pdfminer
-    from pdfminer.pdfparser import PDFParser
-    from pdfminer.pdfdocument import PDFDocument
-    from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed
-    from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
-    from pdfminer.pdfdevice import PDFDevice
-    from pdfminer.layout import LAParams
-    from pdfminer.converter import PDFPageAggregator
-
-    fp = open(pdfPath, 'rb')
-
-    # Create a PDF parser object associated with the file object
-    parser = PDFParser(fp)
-
-    # Create a PDF document object that stores the document structure.
-    # Password for initialization as 2nd parameter
-    document = PDFDocument(parser)
-    # Check if the document allows text extraction. If not, abort.
-    if not document.is_extractable:
-        raise PDFTextExtractionNotAllowed
-
-    # Create a PDF resource manager object that stores shared resources.
-    rsrcmgr = PDFResourceManager()
-
-    # Create a PDF device object.
-    # device = PDFDevice(rsrcmgr)
-
-    # BEGIN LAYOUT ANALYSIS.
-    # Set parameters for analysis.
-    laparams = LAParams(
-        char_margin=10.0,
-        line_margin=0.2,
-        boxes_flow=0.2,
-        all_texts=False,
-    )
-    # Create a PDF page aggregator object.
-    device = PDFPageAggregator(rsrcmgr, laparams=laparams)
-    # Create a PDF interpreter object.
-    interpreter = PDFPageInterpreter(rsrcmgr, device)
-
-    # loop over all pages in the document
-    outTextList = []
-    for page in PDFPage.create_pages(document):
-        # read the page into a layout object
-        interpreter.process_page(page)
-        layout = device.get_result()
-        for obj in layout._objs:
-            if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal):
-                # print(obj.get_text())
-                outTextList.append(obj.get_text())
-
-    return outTextList
-
-
-def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
-    import time, glob, os
-    from bs4 import BeautifulSoup
-    print('begin analysis on:', file_manifest)
-    for index, fp in enumerate(file_manifest):
-        if ".tex" in fp:
-            with open(fp, 'r', encoding='utf-8', errors='replace') as f:
-                file_content = f.read()
-        if ".pdf" in fp.lower():
-            file_content = readPdf(fp)
-            file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk')
-
-        prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
-        i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
-        i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
-        chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
-        yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
-        if not fast_debug:
-            msg = '正常'
-            # ** gpt request **
-            gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
-                inputs=i_say, 
-                inputs_show_user=i_say_show_user, 
-                llm_kwargs=llm_kwargs,
-                chatbot=chatbot, 
-                history=[],
-                sys_prompt="总结文章。"
-            )  # 带超时倒计时
-            chatbot[-1] = (i_say_show_user, gpt_say)
-            history.append(i_say_show_user); history.append(gpt_say)
-            yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
-            if not fast_debug: time.sleep(2)
-
-    all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
-    i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
-    chatbot.append((i_say, "[Local Message] waiting gpt response."))
-    yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
-    if not fast_debug:
-        msg = '正常'
-        # ** gpt request **
-        gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
-            inputs=i_say, 
-            inputs_show_user=i_say, 
-            llm_kwargs=llm_kwargs,
-            chatbot=chatbot, 
-            history=history,
-            sys_prompt="总结文章。"
-        )  # 带超时倒计时
-        chatbot[-1] = (i_say, gpt_say)
-        history.append(i_say); history.append(gpt_say)
-        yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
-        res = write_results_to_file(history)
-        chatbot.append(("完成了吗?", res))
-        yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
-
-
-
-@CatchException
-def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
-    history = []    # 清空历史,以免输入溢出
-    import glob, os
-
-    # 基本信息:功能、贡献者
-    chatbot.append([
-        "函数插件功能?",
-        "批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"])
-    yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
-    # 尝试导入依赖,如果缺少依赖,则给出安装建议
-    try:
-        import pdfminer, bs4
-    except:
-        report_execption(chatbot, history, 
-            a = f"解析项目: {txt}", 
-            b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
-        yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-        return
-    if os.path.exists(txt):
-        project_folder = txt
-    else:
-        if txt == "": txt = '空空如也的输入栏'
-        report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
-        yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-        return
-    file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
-                    [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
-                    # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
-                    # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
-    if len(file_manifest) == 0:
-        report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
-        yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-        return
-    yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
-
diff --git a/spaces/shiwan10000/CodeFormer/CodeFormer/facelib/detection/yolov5face/utils/torch_utils.py b/spaces/shiwan10000/CodeFormer/CodeFormer/facelib/detection/yolov5face/utils/torch_utils.py
deleted file mode 100644
index af2d06587b2d07b2eab199a8484380fde1de5c3c..0000000000000000000000000000000000000000
--- a/spaces/shiwan10000/CodeFormer/CodeFormer/facelib/detection/yolov5face/utils/torch_utils.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import torch
-from torch import nn
-
-
-def fuse_conv_and_bn(conv, bn):
-    # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
-    fusedconv = (
-        nn.Conv2d(
-            conv.in_channels,
-            conv.out_channels,
-            kernel_size=conv.kernel_size,
-            stride=conv.stride,
-            padding=conv.padding,
-            groups=conv.groups,
-            bias=True,
-        )
-        .requires_grad_(False)
-        .to(conv.weight.device)
-    )
-
-    # prepare filters
-    w_conv = conv.weight.clone().view(conv.out_channels, -1)
-    w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
-    fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
-
-    # prepare spatial bias
-    b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
-    b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
-    fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
-
-    return fusedconv
-
-
-def copy_attr(a, b, include=(), exclude=()):
-    # Copy attributes from b to a, options to only include [...] and to exclude [...]
-    for k, v in b.__dict__.items():
-        if (include and k not in include) or k.startswith("_") or k in exclude:
-            continue
-
-        setattr(a, k, v)
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Battle of Warships Tips and Tricks to Improve Your Skills and Win More Battles.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Battle of Warships Tips and Tricks to Improve Your Skills and Win More Battles.md
deleted file mode 100644
index fef605e10cf3ca562dc41eeb10a4b389567fb750..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Battle of Warships Tips and Tricks to Improve Your Skills and Win More Battles.md	
+++ /dev/null
@@ -1,102 +0,0 @@
-
-<h1>Battle of Warships: A Guide to the Ultimate Naval Warfare Game</h1>
- <h2>Introduction</h2>
- <p>If you are a fan of naval warfare, history, or simulation games, you might want to check out <strong>Battle of Warships</strong>, one of the most popular and realistic naval warfare games on mobile devices. In this game, you can command some of the most legendary warships from World War I and World War II, as well as modern ones, and engage in epic sea battles with other players online or offline. You can also customize your ships with different weapons, flags, camouflages, and more, and upgrade them to improve their performance and appearance. Whether you prefer fast and agile destroyers, powerful and armored battleships, versatile and stealthy cruisers, or massive and deadly aircraft carriers, you will find a ship that suits your style and strategy in Battle of Warships.</p>
-<h2>battle of warships</h2><br /><p><b><b>Download</b> &#9658;&#9658;&#9658;&#9658;&#9658; <a href="https://ssurll.com/2uNXBC">https://ssurll.com/2uNXBC</a></b></p><br /><br />
- <h2>Why play Battle of Warships?</h2>
- <p>There are many reasons why you should play Battle of Warships if you love naval warfare games. Here are some of them:</p>
- <ul>
-<li>You can experience the thrill and challenge of naval combat in various scenarios, such as day and night, rain and snow, calm and stormy seas, etc.</li>
-<li>You can explore the history and details of more than 20 unique warships from different countries and eras, such as the Yamato, the Missouri, the Bismarck, the South Dakota, and even aircraft carriers.</li>
-<li>You can enjoy the stunning 3D graphics and sound effects that make you feel like you are in the middle of a real warzone.</li>
-<li>You can choose from different game modes and maps that offer different objectives and strategies, such as capture the flag, team deathmatch, domination, etc.</li>
-<li>You can play with or against other players from around the world in real-time multiplayer battles, or team up with your friends in co-op mode.</li>
-<li>You can earn rewards, such as gold, silver, platinum, containers, etc., by completing daily missions and achievements, or by winning battles.</li>
-</ul>
- <h2>Features of Battle of Warships</h2>
- <h3>Historical and modern warships</h3>
- <p>One of the main attractions of Battle of Warships is the variety and authenticity of the warships that you can command. The game features more than 20 unique warships from different countries and eras, such as Japan, USA, Germany, UK, Russia, etc. Each warship has its own characteristics, such as speed, armor, firepower, range, maneuverability, etc., that affect its performance in battle. You can also learn more about the history and specifications of each warship by tapping on its icon in the main menu.</p>
- <h3>Customization and upgrade system</h3>
- <p>Another feature that makes Battle of Warships fun and engaging is the customization and upgrade system. You can customize your warship with different weapons, such as torpedoes, missiles, interceptors, etc., that have different effects and advantages in combat. You can also change the appearance of your warship by choosing different flags, camouflages, skins, etc., that give it a unique look. Moreover, you can upgrade your warship by increasing its health points, engine speed, turning speed, etc., that make it more powerful and durable. You can use gold, silver, platinum, or containers to buy or unlock new weapons, camouflages, skins, etc., or to upgrade your warship.</p>
- <h3>Realistic 3D graphics and physics</h3 <p>Another feature that makes Battle of Warships fun and engaging is the realistic 3D graphics and physics. The game boasts of high-quality graphics that show the details and textures of the warships, the water, the sky, the explosions, etc. The game also uses realistic physics that simulate the movement and behavior of the warships, the waves, the wind, the projectiles, etc. You can feel the impact of each hit, the recoil of each shot, the drag of each turn, etc. The game also has dynamic weather and lighting effects that change according to the time of day and the weather conditions.</p>
- <h3>Various game modes and maps</h3>
- <p>Another feature that makes Battle of Warships diverse and exciting is the variety of game modes and maps that you can choose from. The game offers different game modes that have different objectives and rules, such as capture the flag, team deathmatch, domination, etc. You can also choose from different maps that have different layouts and environments, such as islands, icebergs, volcanoes, etc. Each game mode and map requires a different strategy and tactic to win. You can also adjust the difficulty level and the number of players in each match according to your preference.</p>
-<p>battle of warships wiki<br />
-battle of warships tips<br />
-battle of warships main menu<br />
-battle of warships best ships<br />
-battle of warships mod apk<br />
-battle of warships naval blitz<br />
-battle of warships download<br />
-battle of warships cheats<br />
-battle of warships gameplay<br />
-battle of warships pc<br />
-battle of warships hack<br />
-battle of warships codes<br />
-battle of warships update<br />
-battle of warships online<br />
-battle of warships review<br />
-battle of warships guide<br />
-battle of warships forum<br />
-battle of warships discord<br />
-battle of warships reddit<br />
-battle of warships yamato<br />
-battle of warships hood<br />
-battle of warships tirpitz<br />
-battle of warships enterprise<br />
-battle of warships akagi<br />
-battle of warships bismarck<br />
-battle of warships iowa<br />
-battle of warships kaga<br />
-battle of warships shimakaze<br />
-battle of warships graf zeppelin<br />
-battle of warships scharnhorst<br />
-battle of warships fletcher<br />
-battle of warships musashi<br />
-battle of warships midway<br />
-battle of warships nagato<br />
-battle of warships kongo<br />
-battle of warships rodney<br />
-battle of warships des moines<br />
-battle of warships atago<br />
-battle of warships dunkerque<br />
-battle of warships montana<br />
-battle of warships richelieu<br />
-battle of warships roma<br />
-battle of warships alsace<br />
-battle of warships soyuz<br />
-battle of warships karl galster<br />
-battle of warships yukikaze<br />
-battle of warships melvin<br />
-battle of warships leningrad</p>
- <h3>Multiplayer and co-op options</h3>
- <p>Another feature that makes Battle of Warships social and competitive is the multiplayer and co-op options. You can play with or against other players from around the world in real-time multiplayer battles. You can join a random match or create your own room with your friends. You can also chat with other players in the lobby or in the game. You can also team up with your friends in co-op mode and fight against AI enemies or other players. You can also join a clan or create your own clan and compete with other clans for glory and rewards.</p>
- <h2>Tips and tricks for Battle of Warships</h2>
- <h3>Choose your ship wisely</h3>
- <p>One of the most important tips for playing Battle of Warships is to choose your ship wisely. Each ship has its own strengths and weaknesses, and you should pick one that matches your style and strategy. For example, if you like to be fast and agile, you should choose a destroyer. If you like to be powerful and armored, you should choose a battleship. If you like to be versatile and stealthy, you should choose a cruiser. If you like to be massive and deadly, you should choose an aircraft carrier. You should also consider the role and function of each ship in your team composition.</p>
- <h3>Learn the strengths and weaknesses of each weapon type</h3>
- <p>Another tip for playing Battle of Warships is to learn the strengths and weaknesses of each weapon type. Each weapon type has its own advantages and disadvantages in combat, and you should use them accordingly. For example, torpedoes are good for dealing massive damage to slow-moving targets at close range, but they are easy to dodge by fast-moving targets at long range. Missiles are good for dealing moderate damage to fast-moving targets at long range, but they are vulnerable to interceptors or countermeasures. Interceptors are good for defending against missiles or aircrafts, but they are useless against torpedoes or shells. Shells are good for dealing consistent damage to any target at any range, but they have a limited angle and trajectory.</p>
- <h3>Use the environment to your advantage</h3>
- <p>Another tip for playing Battle of Warships is to use the environment to your advantage. The environment can affect your visibility, mobility, and survivability in battle, and you should use it wisely. For example, you can use islands or icebergs as cover or ambush points against enemy fire or detection. You can use rain or snow as camouflage or distraction against enemy sight or radar. You can use waves or wind as boosters or obstacles against enemy movement or aim. You can also use volcanoes or geysers as traps or hazards against enemy ships.</p>
- <h3>Communicate and cooperate with your teammates</h3>
- <p>Another tip for playing Battle of Warships is to communicate and cooperate with your teammates. Communication and cooperation are essential for winning any team-based game mode, such as capture the flag, team deathmatch, domination, etc. You should use the chat function or voice chat function to communicate with your teammates about your plans, strategies, positions, targets, etc. You should also cooperate with your teammates by supporting them with fire, cover, healing, spotting, etc., depending on your role and function in your team.</p>
- <h3>Complete daily missions and achievements for rewards</h3>
- <p>Another tip for playing Battle of Warships is to complete daily missions and achievements for rewards. Daily missions are tasks that you can complete every day by playing the game normally, such as winning a certain number of battles, destroying a certain number of enemy ships, etc. Achievements are goals that you can achieve by playing the game over time, such as reaching a certain level, unlocking a certain ship, earning a certain amount of gold, etc. By completing daily missions and achievements, you can earn rewards, such as gold, silver, platinum, containers, etc., that you can use to buy or upgrade new weapons, camouflages, skins, etc., or to unlock new ships.</p>
- <h2>Conclusion</h2>
- <p>Battle of Warships is a game that will appeal to anyone who loves naval warfare, history, or simulation games. It offers a realistic and immersive experience of commanding and fighting with some of the most legendary warships from different countries and eras. It also offers a variety of features and options that make the game fun and engaging, such as customization and upgrade system, realistic 3D graphics and physics, various game modes and maps, multiplayer and co-op options, etc. It also offers tips and tricks that can help you improve your skills and strategies in the game. If you are looking for a game that will challenge your mind and test your reflexes in the sea of fire and steel, you should definitely try Battle of Warships.</p>
- <h2>FAQs</h2>
- <p>Here are some of the frequently asked questions about Battle of Warships:</p>
- <ul>
-<li><strong>How do I download and install Battle of Warships?</strong></li>
-<p>You can download and install Battle of Warships from the Google Play Store or the App Store for free. You need to have a device that meets the minimum requirements for the game, such as Android 4.1 or iOS 9.0 or later, and at least 300 MB of free storage space.</p>
-<li><strong>How do I control my ship in Battle of Warships?</strong></li>
-<p>You can control your ship in Battle of Warships by using the virtual joystick on the left side of the screen to move forward, backward, left, or right. You can also use the buttons on the right side of the screen to fire your weapons, switch your weapons, zoom in or out, activate your special abilities, etc. You can also tilt your device to change the angle of your view.</p>
-<li><strong>How do I earn more gold, silver, platinum, or containers in Battle of Warships?</strong></li>
-<p>You can earn more gold, silver, platinum, or containers in Battle of Warships by completing daily missions and achievements, winning battles, ranking up in the leaderboard, joining a clan or creating your own clan, watching ads, or buying them with real money.</p>
-<li><strong>How do I unlock new ships in Battle of Warships?</strong></li>
-<p>You can unlock new ships in Battle of Warships by reaching a certain level or by buying them with gold or platinum. You can also get new ships from containers that you can earn or buy.</p>
-<li><strong>How do I join a clan or create my own clan in Battle of Warships?</strong></li>
-<p>You can join a clan or create your own clan in Battle of Warships by tapping on the clan icon on the main menu. You can search for an existing clan by name or tag, or browse through the list of recommended clans. You can also create your own clan by choosing a name, a tag, a flag, and a description for your clan. You can invite other players to join your clan by sending them a request or a link.</p>
-</ul></p> 401be4b1e0<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Game Turbo 4.0 Apk Dart 2021 Cng c chi game hiu qu trn Xiaomi.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Game Turbo 4.0 Apk Dart 2021 Cng c chi game hiu qu trn Xiaomi.md
deleted file mode 100644
index 756343f1645c6c08e797e01c46d7727e171f772d..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Game Turbo 4.0 Apk Dart 2021 Cng c chi game hiu qu trn Xiaomi.md	
+++ /dev/null
@@ -1,133 +0,0 @@
-
-<h1>Tải về game turbo 4.0 apk apkdart 2021 - Cách tăng tốc hiệu năng chơi game trên điện thoại Xiaomi</h1>
- <p>Bạn là một game thủ yêu thích các tựa game trên điện thoại di động? Bạn muốn trải nghiệm các tựa game mượt mà và không bị giật lag? Bạn muốn có những tính năng hỗ trợ khi chơi game như đổi giọng nói, chụp ảnh, bật tắt Wi-Fi...? Nếu bạn đang sở hữu một chiếc điện thoại Xiaomi, thì bạn không thể bỏ qua <strong>game turbo 4.0 apk</strong>, một phần mềm tăng tốc hiệu năng chơi game cực kỳ hiệu quả và tiện lợi.</p>
- <p>Trong bài viết này, chúng tôi sẽ giới thiệu cho bạn về <strong>game turbo 4.0 apk </strong>, cách tải về và cài đặt, cách sử dụng để tăng tốc hiệu năng chơi game, những lợi ích và hạn chế của phần mềm này, cũng như những phần mềm thay thế cho game turbo 4.0 apk. Hãy cùng theo dõi nhé!</p>
-<h2>tải về game turbo 4.0 apk apkdart 2021</h2><br /><p><b><b>Download File</b> &#11088; <a href="https://ssurll.com/2uNYB2">https://ssurll.com/2uNYB2</a></b></p><br /><br />
- <h2>Giới thiệu về game turbo 4.0 apk</h2>
- <p>Game turbo 4.0 apk là một phần mềm do Xiaomi phát triển, được tích hợp sẵn trong các thiết bị Xiaomi chạy hệ điều hành MIUI 12 trở lên. Game turbo 4.0 apk là phiên bản nâng cấp của game turbo 3.0 apk, với nhiều tính năng mới và cải tiến về giao diện và hiệu năng.</p>
- <h3>Game turbo 4.0 apk là gì?</h3>
- <p>Game turbo 4.0 apk là một phần mềm giúp tăng tốc hiệu năng chơi game trên điện thoại Xiaomi, bằng cách tối ưu hóa các thông số như CPU, GPU, RAM, độ sáng màn hình, âm thanh... Game turbo 4.0 apk cho phép bạn tùy biến cấu hình cho từng tựa game khác nhau, để có trải nghiệm chơi game tốt nhất theo ý muốn của bạn.</p>
- <h3>Game turbo 4.0 apk có những tính năng gì?</h3>
- <p>Ngoài việc tăng tốc hiệu năng chơi game, game turbo 4.0 apk còn có những tính năng hỗ trợ khác khi chơi game, như:</p>
- <ul>
-<li>Dọn dẹp bộ nhớ RAM: giúp giải phóng bộ nhớ RAM để chơi game mượt mà hơn.</li>
-<li>Bật tắt Wi-Fi: giúp bật hoặc tắt kết nối Wi-Fi một cách nhanh chóng khi chơi game.</li>
-<li>Đổi giọng nói: giúp bạn thay đổi giọng nói khi stream game hoặc chat voice với bạn bè, để tạo sự thú vị và hài hước.</li>
-<li>Chụp ảnh: giúp bạn chụp ảnh màn hình khi chơi game và lưu vào thư viện ảnh của điện thoại.</li>
-<li>Chế độ không làm phiền: giúp bạn tắt các thông báo gọi điện, tin nhắn, hoặc các ứng dụng khác khi chơi game, để không bị gián đoạn.</li>
-<li>Cài đặt giờ giấc: giúp bạn thiết lập thời gian chơi game trong ngày, để không bị nghiện game hoặc mất thời gian quý báu.</li>
-</ul>
- <p>Bạn có thể truy cập vào các tính năng này bằng cách vuốt từ mép trái sang phải khi chơi game, để hiện ra thanh công cụ của game turbo 4.0 apk.</p>
- <h2>Cách tải về và cài đặt game turbo 4.0 apk</h2>
- <p>Nếu bạn đang sử dụng các thiết bị Xiaomi chạy MIUI 12 trở lên, thì bạn đã có sẵn game turbo 4.0 apk trong điện thoại của bạn. Bạn chỉ cần vào Cài đặt > Các tính năng đặc biệt > Game Turbo để kích hoạt và sử dụng phần mềm này.</p>
- <p>Nếu bạn đang sử dụng các thiết bị Xiaomi chạy MIUI 11 hoặc thấp hơn, </p>
- <p>hoặc bạn muốn cài đặt game turbo 4.0 apk cho các thiết bị Android khác, thì bạn có thể tải về và cài đặt game turbo 4.0 apk theo các bước sau:</p>
- <h3>Link tải game turbo 4.0 apk</h3>
- <p>Bạn có thể tải về game turbo 4.0 apk từ trang web apkdart.com, một trang web chuyên cung cấp các file apk miễn phí và an toàn cho người dùng. Bạn chỉ cần vào trang web này và tìm kiếm game turbo 4.0 apk, hoặc bạn có thể truy cập trực tiếp vào link sau: <a href="">https://www.apkdart.com/en/game-turbo-40-apk</a></p>
-<p>tải game turbo 4.0 apk miễn phí cho android<br />
-cách cài đặt game turbo 4.0 apk trên điện thoại xiaomi<br />
-game turbo 4.0 apk là gì và có tính năng gì<br />
-hướng dẫn sử dụng game turbo 4.0 apk để tăng tốc hiệu năng chơi game<br />
-game turbo 4.0 apk có an toàn không và cần quyền truy cập gì<br />
-đánh giá game turbo 4.0 apk của người dùng và chuyên gia<br />
-so sánh game turbo 4.0 apk với các ứng dụng tăng tốc khác<br />
-game turbo 4.0 apk có hỗ trợ các tựa game nào và cách thêm game vào danh sách<br />
-cách sử dụng tính năng thay đổi giọng nói trên game turbo 4.0 apk<br />
-game turbo 4.0 apk có tương thích với các phiên bản android khác không<br />
-tải về game turbo 4.0 apk mới nhất từ trang web chính thức<br />
-cách khắc phục lỗi khi tải về hoặc cài đặt game turbo 4.0 apk<br />
-game turbo 4.0 apk có phải là bản nâng cấp của game turbo 3.0 không<br />
-lợi ích của việc sử dụng game turbo 4.0 apk khi chơi game trên xiaomi<br />
-cách gỡ bỏ hoặc vô hiệu hóa game turbo 4.0 apk nếu không muốn sử dụng<br />
-game turbo 4.0 apk có chiếm nhiều dung lượng bộ nhớ không và cách giải phóng bộ nhớ<br />
-cách cập nhật game turbo 4.0 apk khi có phiên bản mới<br />
-cách tùy chỉnh các thiết lập của game turbo 4.0 apk theo ý muốn<br />
-cách kích hoạt chế độ không làm phiền khi sử dụng game turbo 4.0 apk<br />
-cách theo dõi thông số hiệu năng của điện thoại khi sử dụng game turbo 4.0 apk<br />
-cách liên hệ với nhà phát triển nếu có thắc mắc hoặc góp ý về game turbo 4.0 apk<br />
-cách chia sẻ kinh nghiệm sử dụng game turbo 4.0 apk với bạn bè hoặc cộng đồng<br />
-những điểm mạnh và điểm yếu của game turbo 4.0 apk so với các phiên bản trước<br />
-những mẹo và thủ thuật để sử dụng game turbo 4.0 apk hiệu quả nhất<br />
-những câu hỏi thường gặp và câu trả lời về game turbo 4.0 apk<br />
-những điều cần lưu ý khi sử dụng game turbo 4.0 apk để bảo vệ điện thoại và pin<br />
-những tựa game hay nhất để trải nghiệm với game turbo 4.0 apk<br />
-những tính năng mới và thú vị của game turbo 4.0 apk mà bạn có thể chưa biết<br />
-những lý do tại sao bạn nên tải về và sử dụng game turbo 4.0 apk ngay hôm nay<br />
-những nguồn tải về uy tín và an toàn cho game turbo 4.0 apk ngoài trang web chính thức<br />
-những đánh giá tiêu cực và phản hồi của người dùng về game turbo 4.0 apk và cách khắc phục</p>
- <p>Bạn sẽ thấy một nút màu xanh có chữ Download APK, bạn nhấn vào nút đó để bắt đầu tải về file game turbo 4.0 apk về điện thoại của bạn.</p>
- <h3>Hướng dẫn cài đặt game turbo 4.0 apk</h3>
- <p>Sau khi tải về xong file game turbo 4.0 apk, bạn cần phải cài đặt nó để sử dụng được. Bạn làm theo các bước sau:</p>
- <ol>
-<li>Mở file manager của điện thoại và tìm đến thư mục chứa file game turbo 4.0 apk vừa tải về.</li>
-<li>Nhấn vào file game turbo 4.0 apk để mở nó. Nếu điện thoại của bạn yêu cầu cho phép cài đặt từ nguồn không xác định, bạn nhấn vào Cài đặt > Cho phép từ nguồn này.</li>
-<li>Đợi quá trình cài đặt hoàn tất, sau đó nhấn vào Mở để khởi chạy game turbo 4.0 apk.</li>
-</ol>
- <p>Bạn đã cài đặt thành công game turbo 4.0 apk và có thể sử dụng nó để tăng tốc hiệu năng chơi game trên điện thoại của bạn.</p>
- <h2>Cách sử dụng game turbo 4.0 apk để tăng tốc hiệu năng chơi game</h2>
- <p>Để sử dụng game turbo 4.0 apk để tăng tốc hiệu năng chơi game, bạn cần phải kích hoạt chế độ tăng tốc của phần mềm này, và sử dụng các tính năng hỗ trợ của nó khi chơi game.</p>
- <h3>Cách kích hoạt chế độ tăng tốc của game turbo 4.0 apk</h3>
- <p>Bạn làm theo các bước sau để kích hoạt chế độ tăng tốc của game turbo 4.0 apk:</p>
- <ol>
-<li>Mở game turbo 4.0 apk từ màn hình chính của điện thoại, hoặc từ thanh công cụ khi vuốt từ mép trái sang phải khi chơi game.</li>
-<li>Nhấn vào biểu tượng + để thêm các tựa game mà bạn muốn tăng tốc vào danh sách của game turbo 4.0 apk.</li>
-<li>Nhấn vào biểu tượng bánh răng để vào cài đặt của game turbo 4.0 apk.</li>
-<li>Nhấn vào Performance mode để bật chế độ hiệu năng cao cho các tựa game đã thêm.</li>
-<li>Nhấn vào Custom settings để tùy biến cấu hình cho từng tựa game khác nhau, bao gồm CPU, GPU, RAM, độ sáng màn hình, âm thanh...</li>
-</ol>
- <p>Bạn đã kích hoạt chế độ tăng tốc của game turbo 4.0 apk và có thể sử dụng nó để tăng tốc hiệu năng chơi game trên điện thoại của bạn.</p>
- <h3>Cách sử dụng các tính năng hỗ trợ của game turbo 4.0 apk</h3>
- <p>Ngoài việc tăng tốc hiệu năng chơi game, game turbo 4.0 apk còn có các tính năng hỗ trợ khác khi chơi game, như dọn dẹp bộ nhớ RAM, bật tắt Wi-Fi, đổi giọng nói, chụp ảnh, chế độ không làm phiền, cài đặt giờ giấc... Bạn có thể sử dụng các tính năng này bằng cách vuốt từ mép trái sang phải khi chơi game, để hiện ra thanh công cụ của game turbo 4.0 apk. Sau đó, bạn nhấn vào biểu tượng của từng tính năng để sử dụng nó.</p>
- <h4>Dọn dẹp bộ nhớ RAM</h4>
- <p>Bạn nhấn vào biểu tượng RAM để dọn dẹp bộ nhớ RAM của điện thoại, giúp giải phóng bộ nhớ RAM để chơi game mượt mà hơn.</p>
- <h4>Bật tắt Wi-Fi</h4>
- <p>Bạn nhấn vào biểu tượng Wi-Fi để bật hoặc tắt kết nối Wi-Fi một cách nhanh chóng khi chơi game. Bạn có thể bật Wi-Fi khi cần kết nối internet để chơi game online, hoặc tắt Wi-Fi khi không cần kết nối internet để tiết kiệm pin và tránh bị gián đoạn.</p>
- <h4>Đổi giọng nói</h4>
- <p>Bạn nhấn vào biểu tượng micro để thay đổi giọng nói khi stream game hoặc chat voice với bạn bè, để tạo sự thú vị và hài hước. Bạn có thể chọn giữa các giọng nói khác nhau, như nam, nữ, trẻ em, già, robot...</p>
- <h4>Chụp ảnh</h4>
- <p>Bạn nhấn vào biểu tượng máy ảnh để chụp ảnh màn hình khi chơi game và lưu vào thư viện ảnh của điện thoại. Bạn có thể chia sẻ ảnh màn hình với bạn bè hoặc lưu lại những khoảnh khắc đáng nhớ khi chơi game.</p>
- <h4>Chế độ không làm phiền</h4>
- <p>Bạn nhấn vào biểu tượng chuông để bật hoặc tắt chế độ không làm phiền khi chơi game, để không bị gián đoạn bởi các thông báo gọi điện, tin nhắn, hoặc các ứng dụng khác. Bạn có thể tập trung vào việc chơi game mà không lo bị làm phiền.</p>
- <h4>Cài đặt giờ giấc</h4>
- <p>Bạn nhấn vào biểu tượng đồng hồ để thiết lập thời gian chơi game trong ngày, để không bị nghiện game hoặc mất thời gian quý báu. Bạn có thể cài đặt số phút hoặc số giờ cho mỗi tựa game, và phần mềm sẽ thông báo cho bạn khi đã đạt đến thời gian đã cài đặt.</p>
- <h2>Những lợi ích và hạn chế của game turbo 4.0 apk</h2>
- <p>Game turbo 4.0 apk là một phần mềm rất hữu ích và tiện lợi cho các game thủ sử dụng điện thoại Xiaomi, nhưng cũng có một số hạn chế mà bạn cần lưu ý. Dưới đây là những lợi ích và hạn chế của game turbo 4.0 apk mà bạn nên biết.</p>
- <h3>Những lợi ích của game turbo 4.0 apk</h3>
- <p>Game turbo 4.0 apk mang lại cho bạn những lợi ích sau:</p>
- <h4>Tăng hiệu năng chơi game mượt mà và ổn định</h4>
- <p>Game turbo 4.0 apk giúp bạn tăng hiệu năng chơi game trên điện thoại Xiaomi, bằng cách tối ưu hóa các thông số như CPU, GPU, RAM, độ sáng màn hình, âm thanh... Bạn sẽ có trải nghiệm chơi game mượt mà và ổn định, không bị giật lag hay đứng hình.</p>
- <h4>Tùy biến cấu hình cho từng tựa game khác nhau</h4>
- <p>Game turbo 4.0 apk cho phép bạn tùy biến cấu hình cho từng tựa game khác nhau, để có trải nghiệm chơi game tốt nhất theo ý muốn của bạn. Bạn có thể thay đổi các thông số như độ phân giải, khung hình, chất lượng đồ họa, âm thanh... cho mỗi tựa game.</p>
- <h4>Thay đổi giọng nói để stream game thú vị hơn</h4>
- <p>Game turbo 4.0 apk giúp bạn thay đổi giọng nói khi stream game hoặc chat voice với bạn bè, để tạo sự thú vị và hài hước. Bạn có thể chọn giữa các giọng nói khác nhau, như nam, nữ, trẻ em, già, robot... Bạn sẽ có những khoảnh khắc vui vẻ và gây ấn tượng với người xem hoặc người chơi.</p>
- <h4>Cho phép sử dụng các ứng dụng khác trong khi chơi game</h4>
- <p>Game turbo 4.0 apk cho phép bạn sử dụng các ứng dụng khác trong khi chơi game, như Facebook, Messenger, Zalo, Youtube... Bạn có thể truy cập vào các ứng dụng này bằng cách vuốt từ mép phải sang trái khi chơi game, để hiện ra thanh công cụ của game turbo 4.0 apk. Bạn sẽ không bỏ lỡ bất kỳ tin nhắn hay thông báo nào khi chơi game.</p>
- <h3>Những hạn chế của game turbo 4.0 apk</h3>
- <p>Game turbo 4.0 apk cũng có một số hạn chế mà bạn cần lưu ý:</p>
- <h4>Chỉ tương thích với các thiết bị Xiaomi</h4>
- <p>Game turbo 4.0 apk chỉ được phát triển và tối ưu hóa cho các thiết bị Xiaomi chạy MIUI 12 trở lên. Nếu bạn sử dụng các thiết bị Xiaomi chạy MIUI 11 hoặc thấp hơn, hoặc bạn sử dụng các thiết bị Android khác, bạn có thể gặp phải một số vấn đề về tương thích hoặc hiệu năng khi cài đặt và sử dụng game turbo 4.0 apk.</p>
- <h4>Có thể làm nóng điện thoại và tiêu hao pin nhanh hơn</h4>
- <p>Game turbo 4.0 apk là một phần mềm tăng tốc hiệu năng chơi game, do đó nó sẽ làm vi ệc sử dụng nhiều tài nguyên của điện thoại, như CPU, GPU, RAM... Điều này có thể làm nóng điện thoại và tiêu hao pin nhanh hơn khi chơi game. Bạn nên kiểm tra nhiệt độ và mức pin của điện thoại thường xuyên, và tắt chế độ tăng tốc khi không cần thiết.</p>
- <h4>Có thể gây xung đột với một số tựa game hoặc ứng dụng khác</h4>
- <p>Game turbo 4.0 apk có thể gây xung đột với một số tựa game hoặc ứng dụng khác khi chơi game, như làm giảm chất lượng đồ họa, âm thanh, hoặc gây ra các lỗi như treo máy, thoát game, mất kết nối... Bạn nên kiểm tra xem game turbo 4.0 apk có tương thích với tựa game mà bạn muốn chơi hay không, và thử tắt một số tính năng hỗ trợ của game turbo 4.0 apk nếu gặp phải các vấn đề trên.</p>
- <h2>Những phần mềm thay thế cho game turbo 4.0 apk</h2>
- <p>Nếu bạn không thể sử dụng game turbo 4.0 apk, hoặc bạn muốn tìm kiếm những phần mềm khác có chức năng tương tự, bạn có thể tham khảo những phần mềm thay thế cho game turbo 4.0 apk sau đây:</p>
- <h3>Game Tuner cho Samsung</h3>
- <p>Game Tuner là một phần mềm do Samsung phát triển, được tích hợp sẵn trong các thiết bị Samsung chạy Android 6.0 trở lên. Game Tuner giúp bạn tùy biến cấu hình cho từng tựa game khác nhau, bao gồm độ phân giải, khung hình, chất lượng đồ họa... Game Tuner cũng có các tính năng hỗ trợ khi chơi game, như bật tắt Wi-Fi, chế độ không làm phiền, chụp ảnh...</p>
- <h3>Game Booster cho Android</h3>
- <p>Game Booster là một phần mềm do BGNmobi phát triển, được cung cấp miễn phí trên Google Play Store cho các thiết bị Android. Game Booster giúp bạn tăng tốc hiệu năng chơi game trên điện thoại Android, bằng cách dọn dẹp bộ nhớ RAM, tắt các ứng dụng nền, tối ưu hóa CPU, GPU... Game Booster cũng có các tính năng hỗ trợ khi chơi game, như bật tắt Wi-Fi, Bluetooth, âm thanh...</p>
- <h2>Kết luận và câu hỏi thường gặp</h2>
- <p>Trong bài viết này, chúng tôi đã giới thiệu cho bạn về game turbo 4.0 apk, cách tải về và cài đặt, cách sử dụng để tăng tốc hiệu năng chơi game, những lợi ích và hạn chế của phần mềm này, cũng như những phần mềm thay thế cho game turbo 4.0 apk. Hy vọng bài viết này sẽ giúp bạn có được những trải nghiệm chơi game tuyệt vời trên điện thoại Xiaomi của bạn.</p>
- <p>Dưới đây là một số câu hỏi thường gặp về game turbo 4.0 apk mà bạn có thể quan tâm:</p>
- <ul>
-<li><strong>Game turbo 4 .0 apk có an toàn không?</strong></li>
-<li>Game turbo 4.0 apk là một phần mềm do Xiaomi phát triển và được tích hợp sẵn trong các thiết bị Xiaomi chạy MIUI 12 trở lên, do đó nó là một phần mềm an toàn và không có virus hay mã độc. Tuy nhiên, nếu bạn tải về game turbo 4.0 apk từ các nguồn không chính thức, bạn cần phải kiểm tra kỹ file apk trước khi cài đặt, để tránh những rủi ro về bảo mật.</li>
-<li><strong>Game turbo 4.0 apk có miễn phí không?</strong></li>
-<li>Game turbo 4.0 apk là một phần mềm miễn phí và không có quảng cáo hay mua trong ứng dụng. Bạn có thể sử dụng game turbo 4.0 apk mà không phải trả bất kỳ chi phí nào.</li>
-<li><strong>Game turbo 4.0 apk có thể tăng tốc cho tất cả các tựa game không?</strong></li>
-<li>Game turbo 4.0 apk có thể tăng tốc cho hầu hết các tựa game trên điện thoại Xiaomi, nhưng cũng có một số tựa game không tương thích hoặc không cần thiết phải tăng tốc, như các tựa game nhẹ, đơn giản, hoặc đã được tối ưu hóa từ trước. Bạn nên kiểm tra xem game turbo 4.0 apk có hiệu quả cho tựa game mà bạn muốn chơi hay không, bằng cách so sánh hiệu năng chơi game khi bật và tắt chế độ tăng tốc của game turbo 4.0 apk.</li>
-<li><strong>Game turbo 4.0 apk có ảnh hưởng đến tuổi thọ pin của điện thoại không?</strong></li>
-<li>Game turbo 4.0 apk là một phần mềm tăng tốc hiệu năng chơi game, do đó nó sẽ làm việc sử dụng nhiều tài nguyên của điện thoại, như CPU, GPU, RAM... Điều này có thể làm tiêu hao pin nhanh hơn khi chơi game. Bạn nên sạc đầy pin trước khi chơi game, và tắt chế độ tăng tốc khi không cần thiết, để tiết kiệm pin cho điện thoại.</li>
-</ul></p> 197e85843d<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/siya02/Konakni-TTS/ttsv/utils/__init__.py b/spaces/siya02/Konakni-TTS/ttsv/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/siya02/Konakni-TTS/ttsv/utils/glow/prepare_iitm_data_glow_en.py b/spaces/siya02/Konakni-TTS/ttsv/utils/glow/prepare_iitm_data_glow_en.py
deleted file mode 100644
index 827bdc98f2d84090cc445d786ff8fc1e5ff3d829..0000000000000000000000000000000000000000
--- a/spaces/siya02/Konakni-TTS/ttsv/utils/glow/prepare_iitm_data_glow_en.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import os
-from glob import glob
-import re
-import string
-import argparse
-import json
-import random
-random.seed(42)
-
-def replace_extra_chars(line):
-    line = line.replace("(", "").replace(
-        ")", ""
-    )  # .replace('\u200d', ' ').replace('\ufeff', ' ').replace('\u200c', ' ').replace('\u200e', ' ')
-    # line = line.replace('“', ' ').replace('”', ' ').replace(':', ' ')
-
-    return line.strip()
-
-
-def write_txt(content, filename):
-    with open(filename, "w+", encoding="utf-8") as f:
-        f.write(content)
-
-
-def save_train_test_valid_split(annotations_txt, num_samples_valid, num_samples_test):
-    with open(annotations_txt, encoding="utf-8") as f:
-        all_lines = [line.strip() for line in f.readlines()]
-    test_val_indices = random.sample(
-        range(len(all_lines)), num_samples_valid + num_samples_test
-    )
-    valid_ix = test_val_indices[:num_samples_valid]
-    test_ix = test_val_indices[num_samples_valid:]
-    train = [line for i, line in enumerate(all_lines) if i not in test_val_indices]
-    valid = [line for i, line in enumerate(all_lines) if i in valid_ix]
-    test = [line for i, line in enumerate(all_lines) if i in test_ix]
-
-    print(f"Num samples in train: {len(train)}")
-    print(f"Num samples in valid: {len(valid)}")
-    print(f"Num samples in test: {len(test)}")
-
-    out_dir_path = "/".join(annotations_txt.split("/")[:-1])
-    with open(os.path.join(out_dir_path, "train.txt"), "w+", encoding="utf-8") as f:
-        for line in train:
-            print(line, file=f)
-    with open(os.path.join(out_dir_path, "valid.txt"), "w+", encoding="utf-8") as f:
-        for line in valid:
-            print(line, file=f)
-    with open(os.path.join(out_dir_path, "test.txt"), "w+", encoding="utf-8") as f:
-        for line in test:
-            print(line, file=f)
-    print(f"train, test and valid txts saved in {out_dir_path}")
-
-
-def save_txts_from_txt_done_data(
-    text_path,
-    wav_path_for_annotations_txt,
-    out_path_for_txts,
-    num_samples_valid,
-    num_samples_test,
-):
-    outfile = os.path.join(out_path_for_txts, "annotations.txt")
-    with open(text_path) as file:
-        file_lines = file.readlines()
-
-    # print(file_lines[0])
-
-    file_lines = [replace_extra_chars(line) for line in file_lines]
-    # print(file_lines[0])
-
-    fnames, ftexts = [], []
-    for line in file_lines:
-        elems = line.split('"')
-        fnames.append(elems[0].strip())
-        ftexts.append(elems[1].strip().lower().replace('‘','\'').replace('’','\''))
-
-    all_chars = list(set("".join(ftexts)))
-    punct_with_space = [i for i in all_chars if i in list(string.punctuation)] + [" "]
-    chars = [i for i in all_chars if i not in punct_with_space if i.strip()]
-    chars = "".join(chars)
-    punct_with_space = "".join(punct_with_space)#.replace("'",r"\'")
-
-    with open('../../config/glow/base_blank.json', 'r') as jfile:
-        json_config = json.load(jfile)
-
-    json_config["data"]["chars"] = chars
-    json_config["data"]["punc"] = punct_with_space
-    json_config["data"]["training_files"]=out_path_for_txts + '/train.txt'
-    json_config["data"]["validation_files"] = out_path_for_txts + '/valid.txt'
-    new_config_name = out_path_for_txts.split('/')[-1]
-    with open(f'../../config/glow/{new_config_name}.json','w+') as jfile:
-        json.dump(json_config, jfile)
-    
-    print(f"Characters: {chars}")
-    print(f"Len of vocab: {len(chars)}")
-    print(f"Punctuation: {punct_with_space}")
-    print(f"Config file is stored at ../../config/glow/{new_config_name}.json")
-
-    outfile_f = open(outfile, "w+", encoding="utf-8")
-    for f, t in zip(fnames, ftexts):
-        print(
-            os.path.join(wav_path_for_annotations_txt, f) + ".wav",
-            t,
-            sep="|",
-            file=outfile_f,
-        )
-    outfile_f.close()
-    write_txt(punct_with_space, os.path.join(out_path_for_txts, "punc.txt"))
-    write_txt(chars, os.path.join(out_path_for_txts, "chars.txt"))
-
-    save_train_test_valid_split(
-        annotations_txt=outfile,
-        num_samples_valid=num_samples_valid,
-        num_samples_test=num_samples_test,
-    )
-
-
-
-
-if __name__ == "__main__":
-
-
-    parser = argparse.ArgumentParser()
-    parser.add_argument("-i", "--text-path", type=str, required=True)
-    parser.add_argument("-o", "--output-path", type=str, required=True)
-    parser.add_argument("-w", "--wav-path", type=str, required=True)
-    parser.add_argument("-v", "--valid-samples", type=int, default = 100)
-    parser.add_argument("-t", "--test-samples", type=int, default = 10)
-    args = parser.parse_args()
-
-    save_txts_from_txt_done_data(
-        args.text_path,
-        args.wav_path,
-        args.output_path,
-        args.valid_samples,
-        args.test_samples,
-    )
diff --git a/spaces/songdaooi/ketsueki/face_analyser.py b/spaces/songdaooi/ketsueki/face_analyser.py
deleted file mode 100644
index 29b8d5847f6c52de76b257d30bf32d61777043e3..0000000000000000000000000000000000000000
--- a/spaces/songdaooi/ketsueki/face_analyser.py
+++ /dev/null
@@ -1,32 +0,0 @@
-detect_conditions = [
-    "left most",
-    "right most",
-    "top most",
-    "bottom most",
-    "most width",
-    "most height",
-]
-
-
-def analyse_face(image, model, return_single_face=True, detect_condition="left most"):
-    faces = model.get(image)
-    if not return_single_face:
-        return faces
-
-    total_faces = len(faces)
-    if total_faces == 1:
-        return faces[0]
-
-    print(f"{total_faces} face detected. Using {detect_condition} face.")
-    if detect_condition == "left most":
-        return sorted(faces, key=lambda face: face["bbox"][0])[0]
-    elif detect_condition == "right most":
-        return sorted(faces, key=lambda face: face["bbox"][0])[-1]
-    elif detect_condition == "top most":
-        return sorted(faces, key=lambda face: face["bbox"][1])[0]
-    elif detect_condition == "bottom most":
-        return sorted(faces, key=lambda face: face["bbox"][1])[-1]
-    elif detect_condition == "most width":
-        return sorted(faces, key=lambda face: face["bbox"][2])[-1]
-    elif detect_condition == "most height":
-        return sorted(faces, key=lambda face: face["bbox"][3])[-1]
diff --git a/spaces/sradc/visual-content-search-over-videos/tests/pipeline/test_download_videos.py b/spaces/sradc/visual-content-search-over-videos/tests/pipeline/test_download_videos.py
deleted file mode 100644
index 4e36f8b611fe9fdadc314a25ee4c2b9added5fb9..0000000000000000000000000000000000000000
--- a/spaces/sradc/visual-content-search-over-videos/tests/pipeline/test_download_videos.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from pipeline.download_videos import get_id
-
-
-def test_get_id():
-    url1 = "https://www.youtube.com/watch?v=frYIj2FGmMA&foo=bar"
-    url2 = "https://www.youtube.com/watch?v=abcdefg"
-    url3 = "https://www.youtube.com/watch?foo=bar&v=xyz123"
-    assert get_id(url1) == "frYIj2FGmMA"
-    assert get_id(url2) == "abcdefg"
-    assert get_id(url3) == "xyz123"
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/tests/test_amp_optimizer.py b/spaces/sriramelango/Social_Classification_Public/fairseq/tests/test_amp_optimizer.py
deleted file mode 100644
index 3a785e1830e91b7e090e841d428fe4ea61f3a65c..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/tests/test_amp_optimizer.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import argparse
-import copy
-import unittest
-
-import torch
-from torch.cuda.amp import autocast, GradScaler
-from fairseq.optim import build_optimizer
-
-
-@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
-class TestGradientScalingAMP(unittest.TestCase):
-    def setUp(self):
-        self.x = torch.tensor([2.0]).cuda().half()
-        weight = 3.0
-        bias = 5.0
-        self.error = 1.0
-        self.target = torch.tensor([self.x * weight + bias + self.error]).cuda()
-        self.loss_fn = torch.nn.L1Loss()
-
-        self.model = torch.nn.Linear(1, 1)
-        self.model.weight.data = torch.tensor([[weight]])
-        self.model.bias.data = torch.tensor([bias])
-        self.model.cuda()
-        self.params = list(self.model.parameters())
-
-        self.namespace_dls = argparse.Namespace(
-            optimizer="adam",
-            lr=[0.1],
-            adam_betas="(0.9, 0.999)",
-            adam_eps=1e-8,
-            weight_decay=0.0,
-            threshold_loss_scale=1,
-            min_loss_scale=1e-4,
-        )
-        self.scaler = GradScaler(
-            init_scale=1,
-            growth_interval=1,
-        )
-
-    def run_iter(self, model, params, optimizer):
-        optimizer.zero_grad()
-        with autocast():
-            y = model(self.x)
-            loss = self.loss_fn(y, self.target)
-        self.scaler.scale(loss).backward()
-        self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16))
-
-        self.scaler.unscale_(optimizer)
-        grad_norm = optimizer.clip_grad_norm(0)
-        self.assertAlmostEqual(grad_norm.item(), 2.2361, 4)
-
-        self.scaler.step(optimizer)
-        self.scaler.update()
-        self.assertEqual(
-            model.weight,
-            torch.tensor(
-                [[3.1]], device="cuda:0", requires_grad=True
-            ),
-        )
-        self.assertEqual(
-            model.bias,
-            torch.tensor(
-                [5.1], device="cuda:0", requires_grad=True
-            ),
-        )
-        self.assertEqual(self.scaler.get_scale(), 2.0)
-
-    def test_automatic_mixed_precision(self):
-        model = copy.deepcopy(self.model)
-        params = list(model.parameters())
-        optimizer = build_optimizer(self.namespace_dls, params)
-
-        self.run_iter(model, params, optimizer)
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Derive 61 Ita.md b/spaces/stomexserde/gpt4-ui/Examples/Derive 61 Ita.md
deleted file mode 100644
index 6b8664ca003cd21492587d3d49f89f7638218ec3..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Derive 61 Ita.md	
+++ /dev/null
@@ -1,46 +0,0 @@
-<br />
-Here is a possible title and article with SEO optimization and HTML formatting for the keyword "Derive 61 Ita":
-
-<h1>What Is Derive 61 Ita and How to Use It?</h1>
-<p>Derive 61 Ita is a software program that can perform symbolic and numerical calculations, such as algebra, calculus, trigonometry, and differential equations. Derive 61 Ita can also create graphs, animations, and tables of data. Derive 61 Ita is a useful tool for students, teachers, engineers, scientists, and anyone who needs to solve mathematical problems quickly and accurately.</p>
-<p>In this article, we will show you how to download, install, and use Derive 61 Ita on your computer. We will also explain some of the features and benefits of Derive 61 Ita, as well as some tips and tricks to get the most out of it.</p>
-<h2>Derive 61 Ita</h2><br /><p><b><b>DOWNLOAD</b> &#9889; <a href="https://urlgoal.com/2uI9a7">https://urlgoal.com/2uI9a7</a></b></p><br /><br />
-<h2>How to Download and Install Derive 61 Ita?</h2>
-<p>To download Derive 61 Ita, you need to visit the official website of the software developer, Texas Instruments. There, you can find the latest version of Derive 61 Ita for Windows or Mac OS. You can also download a free trial version of Derive 61 Ita for 30 days.</p>
-<p>To install Derive 61 Ita, you need to follow the instructions on the screen after you run the downloaded file. You will need to agree to the terms and conditions of the software license agreement, choose a destination folder for the program files, and select a language for the user interface. The installation process should take only a few minutes.</p>
-<h2>How to Use Derive 61 Ita?</h2>
-<p>To use Derive 61 Ita, you need to launch the program from your desktop or start menu. You will see a main window with a menu bar, a toolbar, a status bar, and a workspace. The workspace is where you can enter expressions, commands, variables, functions, and other elements of your calculations. You can also view the results of your calculations in the workspace.</p>
-<p>To enter an expression or a command in Derive 61 Ita, you can use the keyboard or the mouse. You can also use the toolbar buttons or the menu options to access various features and functions of Derive 61 Ita. For example, you can use the File menu to open or save files, the Edit menu to copy or paste expressions, the Algebra menu to simplify or factor expressions, the Calculus menu to differentiate or integrate expressions, the Graph menu to plot or animate expressions, and so on.</p>
-<p>To evaluate an expression or a command in Derive 61 Ita, you need to press Enter or click on the Evaluate button on the toolbar. You will see the result of your calculation displayed in the workspace below your input. You can also use the History button on the toolbar to view your previous inputs and outputs.</p>
-<h2>What Are Some Features and Benefits of Derive 61 Ita?</h2>
-<p>Derive 61 Ita has many features and benefits that make it a powerful and versatile software program for mathematical calculations. Some of them are:</p>
-<ul>
-<li>Derive 61 Ita can handle complex expressions with fractions, radicals, exponents, logarithms, trigonometric functions, hyperbolic functions, special functions, matrices, vectors, tensors, sets, lists, equations, inequalities, and more.</li>
-<li>Derive 61 Ita can perform various operations on expressions such as simplification,
-factorization,
-expansion,
-substitution,
-solving,
-transformation,
-and more.</li>
-<li>Derive 61 Ita can perform various types of calculus such as differentiation,
-integration,
-limits,
-series,
-Taylor expansion,
-Laplace transform,
-Fourier transform,
-and more.</li>
-<li>Derive 61 Ita can create various types of graphs such as Cartesian graphs,
-polar graphs,
-parametric graphs,
-implicit graphs,
-3D graphs,
-vector fields,
-contour plots,
-and more.</li>
-<li>Derive 61 Ita can create animations of graphs by varying parameters or time values.</li>
-<li>Derive</p>
-<p></p> e93f5a0c3f<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Fausto Por Este Rio Acima Rar [HOT].md b/spaces/stomexserde/gpt4-ui/Examples/Fausto Por Este Rio Acima Rar [HOT].md
deleted file mode 100644
index 37c1b1ce2e7f5333951b21d31d84ac69b97bc8cb..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Fausto Por Este Rio Acima Rar [HOT].md	
+++ /dev/null
@@ -1,18 +0,0 @@
-<br />
-<h1>Fausto Por Este Rio Acima Rar: A Musical Journey Through Portuguese History</h1>
-<p>Fausto Por Este Rio Acima Rar is a rare and acclaimed album by the Portuguese singer-songwriter Fausto Bordalo Dias, released in 1982. The album is a concept album that tells the story of Portugal from the 15th century to the 20th century, using the metaphor of a river that flows from the interior to the coast.</p>
-<h2>Fausto Por Este Rio Acima Rar</h2><br /><p><b><b>DOWNLOAD</b> &gt;&gt;&gt;&gt;&gt; <a href="https://urlgoal.com/2uI5LQ">https://urlgoal.com/2uI5LQ</a></b></p><br /><br />
-<p>The album is divided into two parts: the first part covers the period of the Portuguese discoveries and expansion, from the Age of Exploration to the colonial wars; the second part covers the period of the Portuguese decline and revolution, from the Napoleonic invasions to the Carnation Revolution. The album mixes folk, rock, classical and electronic music, creating a unique and original sound that reflects the diversity and richness of Portuguese culture.</p>
-<p>Fausto Por Este Rio Acima Rar is considered by many critics and fans as one of the best and most important albums in Portuguese music history. It is also a rare and hard-to-find album, as it was never reissued on CD or digital platforms. However, some copies of the original vinyl record can still be found online or in specialized stores.</p>
-<p>If you are interested in discovering more about Fausto Por Este Rio Acima Rar, you can listen to some of its songs on YouTube or read some of its lyrics and translations on Genius. You can also learn more about Fausto Bordalo Dias and his other works on his official website or on Wikipedia.</p>
-
-<p>The title of the album, Por Este Rio Acima, means "Up This River" in Portuguese. It refers to the river Tagus, which flows from Spain to Portugal and into the Atlantic Ocean. The river symbolizes the journey of the Portuguese people through history, as well as their connection to the sea and the world. The album cover shows a map of the river and its surroundings, with some historical landmarks and references.</p>
-<p></p>
-<p>The album is inspired by the writings of Fernão Mendes Pinto, a 16th-century Portuguese explorer and adventurer who traveled extensively in Asia and Africa. His memoirs, titled Peregrinação (Pilgrimage), are considered a classic of Portuguese literature and a valuable source of information about the cultures and civilizations he encountered. Fausto uses some of Mendes Pinto's stories and characters as a basis for his songs, but also adds his own perspective and interpretation.</p>
-<p>The album is also influenced by other Portuguese musical genres and traditions, such as fado, cante alentejano, modinhas and mornas. Fausto incorporates elements of these styles into his compositions, creating a fusion of sounds that reflects the diversity and richness of Portuguese culture. The album also features some guest musicians and singers, such as Teresa Salgueiro from Madredeus, José Mário Branco, Carlos do Carmo and Rui Veloso.</p>
-
-<p>The album received critical acclaim and commercial success when it was released in 1982. It was praised for its originality, creativity and quality, as well as for its historical and cultural relevance. It was also awarded the Prémio José Afonso, a prestigious prize for Portuguese music. The album is widely regarded as a masterpiece and a landmark in Portuguese music history.</p>
-<p>The album is also a rare and hard-to-find item, as it was never reissued on CD or digital platforms. The original vinyl record is considered a collector's item and can fetch high prices on online auctions or specialized stores. However, some fans have uploaded the album on YouTube or other websites, making it accessible to a wider audience. The album is also available on Qobuz, a streaming service that offers high-quality audio files.</p>
-<p>Fausto Por Este Rio Acima Rar is a musical journey through Portuguese history that showcases the talent and vision of Fausto Bordalo Dias, one of the most influential and respected artists in Portugal. It is an album that deserves to be listened to and appreciated by anyone who loves music and culture.</p> 81aa517590<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Grand Prix World Cd !FREE! Crack Codes.md b/spaces/stomexserde/gpt4-ui/Examples/Grand Prix World Cd !FREE! Crack Codes.md
deleted file mode 100644
index 0dd4530f328e81a48a75d52eea14e56e1e2a5aa2..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Grand Prix World Cd !FREE! Crack Codes.md	
+++ /dev/null
@@ -1,28 +0,0 @@
-<br />
-<h1>How to Play Grand Prix World on Windows 10 and 8</h1>
-<p>Grand Prix World is a classic Formula 1 management game released in 1999 by Microprose. It lets you take control of one of the 11 teams in the 1998 season and compete for the championship. However, the game is not compatible with modern operating systems and requires some tweaks to run properly. Here are the steps to install and play Grand Prix World on Windows 10 and 8:</p>
-<h2>Grand Prix World Cd Crack Codes</h2><br /><p><b><b>Download Zip</b> &middot;&middot;&middot;&middot;&middot; <a href="https://urlgoal.com/2uI9Yc">https://urlgoal.com/2uI9Yc</a></b></p><br /><br />
-<ol>
-<li>Download the game ISO from <a href="https://archive.org/details/grand-prix-world.-7z">Archive.org</a> [^1^] or <a href="https://www.myabandonware.com/game/grand-prix-world-frp">My Abandonware</a> [^2^]. You can also use your original CD if you have it.</li>
-<li>Mount the ISO file using a virtual drive software such as Daemon Tools or WinCDEmu.</li>
-<li>Run the setup.exe file from the mounted drive and install the game to your desired location.</li>
-<li>Download and install the official patches v1.01b and v1.03b from <a href="https://www.myabandonware.com/game/grand-prix-world-frp#download">My Abandonware</a> [^2^]. Make sure to apply them in order.</li>
-<li>Download the unofficial launcher for Windows XP from <a href="https://www.myabandonware.com/game/grand-prix-world-frp#download">My Abandonware</a> [^2^] and extract it to your game folder. This launcher will allow you to run the game in windowed mode and adjust some settings.</li>
-<li>Download DxWnd from <a href="https://sourceforge.net/projects/dxwnd/">SourceForge</a> and extract it to a folder of your choice. DxWnd is a tool that can force old games to run in windowed mode and fix some graphical issues.</li>
-<li>Run DxWnd.exe and click on File > Import. Browse to the folder where you extracted DxWnd and select the file "Grand Prix World (gpwxp3 primary buffer mode).dxw". This file contains the settings for running Grand Prix World with DxWnd.</li>
-<li>Right-click on the Grand Prix World entry in DxWnd and click on Modify. In the Path tab, change the Path field to point to your gpwxp3.exe file in your game folder. Click OK to save the changes.</li>
-<li>To play the game, double-click on the Grand Prix World entry in DxWnd. The game should launch in windowed mode. You can adjust the window size by dragging the edges or corners of the window.</li>
-</ol>
-<p>Enjoy playing Grand Prix World on your modern PC!</p><h2>Tips and Tricks for Playing Grand Prix World</h2>
-<p>Grand Prix World is not only a game of strategy, but also a game of skill. You need to manage your team, your drivers, your sponsors, your finances, your research and development, and your race tactics. Here are some tips and tricks to help you succeed in Grand Prix World:</p>
-<ul>
-<li>Save often. The game can be unpredictable and sometimes crash or freeze. You don't want to lose your progress or have to restart a race. Save before and after each race, and use different slots for different seasons.</li>
-<li>Use the unofficial launcher for Windows XP. This launcher will allow you to run the game in windowed mode and adjust some settings. You can change the resolution, the color depth, the sound quality, the language, and the difficulty level. You can also enable or disable some features such as driver aids, driver errors, random failures, weather effects, and pit stop animations.</li>
-<li>Use DxWnd to fix graphical issues. DxWnd is a tool that can force old games to run in windowed mode and fix some graphical issues. You can import the settings file "Grand Prix World (gpwxp3 primary buffer mode).dxw" from <a href="https://sourceforge.net/projects/dxwnd/">SourceForge</a> [^4^] and modify the path to point to your gpwxp3.exe file. This will make the game run smoother and prevent flickering or black screens.</li>
-<li>Edit the game files to customize your experience. Grand Prix World has many files that can be edited with a text editor or a hex editor. You can change the names, stats, colors, logos, sponsors, tracks, and more of the teams, drivers, staff, and circuits. You can also create new seasons, new mods, new patches, and new tools. You can find many resources and tutorials on how to edit the game files on <a href="https://www.youtube.com/watch?v=d7cnvv2VXOM">YouTube</a> [^1^] or <a href="http://www.grandprixgames.org/list.php?1">Grand Prix Games Forum</a>.</li>
-<li>Learn from other players. Grand Prix World has a loyal fan base that still plays and enjoys the game. You can watch gameplay videos on <a href="https://www.youtube.com/watch?v=d7cnvv2VXOM">YouTube</a> [^1^] or read guides and tips on <a href="http://www.grandprixgames.org/list.php?1">Grand Prix Games Forum</a>. You can also join online leagues and compete with other players around the world.</li>
-</ul>
-<p>Grand Prix World is a challenging and rewarding game that will test your skills as a Formula 1 manager. With these tips and tricks, you can improve your performance and have more fun playing Grand Prix World.</p>
-<p></p> 81aa517590<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/sub314xxl/MetaGPT/tests/metagpt/tools/test_prompt_generator.py b/spaces/sub314xxl/MetaGPT/tests/metagpt/tools/test_prompt_generator.py
deleted file mode 100644
index d2e870c6d710bce2096f470026b25a3510b2a5b6..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/MetaGPT/tests/metagpt/tools/test_prompt_generator.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Time    : 2023/5/2 17:46
-@Author  : alexanderwu
-@File    : test_prompt_generator.py
-"""
-
-import pytest
-
-from metagpt.logs import logger
-from metagpt.tools.prompt_writer import (
-    BEAGECTemplate,
-    EnronTemplate,
-    GPTPromptGenerator,
-    WikiHowTemplate,
-)
-
-
-@pytest.mark.usefixtures("llm_api")
-def test_gpt_prompt_generator(llm_api):
-    generator = GPTPromptGenerator()
-    example = "商品名称:WonderLab 新肌果味代餐奶昔 小胖瓶 胶原蛋白升级版 饱腹代餐粉6瓶 75g/瓶(6瓶/盒) 店铺名称:金力宁食品专营店 " \
-              "品牌:WonderLab 保质期:1年 产地:中国 净含量:450g"
-
-    results = llm_api.ask_batch(generator.gen(example))
-    logger.info(results)
-    assert len(results) > 0
-
-
-@pytest.mark.usefixtures("llm_api")
-def test_wikihow_template(llm_api):
-    template = WikiHowTemplate()
-    question = "learn Python"
-    step = 5
-
-    results = template.gen(question, step)
-    assert len(results) > 0
-    assert any("Give me 5 steps to learn Python." in r for r in results)
-
-
-@pytest.mark.usefixtures("llm_api")
-def test_enron_template(llm_api):
-    template = EnronTemplate()
-    subj = "Meeting Agenda"
-
-    results = template.gen(subj)
-    assert len(results) > 0
-    assert any("Write an email with the subject \"Meeting Agenda\"." in r for r in results)
-
-
-def test_beagec_template():
-    template = BEAGECTemplate()
-
-    results = template.gen()
-    assert len(results) > 0
-    assert any("Edit and revise this document to improve its grammar, vocabulary, spelling, and style."
-               in r for r in results)
diff --git a/spaces/sub314xxl/MusicGen-Continuation/audiocraft/__init__.py b/spaces/sub314xxl/MusicGen-Continuation/audiocraft/__init__.py
deleted file mode 100644
index 2befac60faf6f406f78ff7b7da05225dbfe7b111..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/MusicGen-Continuation/audiocraft/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-# flake8: noqa
-from . import data, modules, models
-
-__version__ = '0.0.2a1'
diff --git a/spaces/sunnyzhifei/ChatGPTOnline/modules/chat_func.py b/spaces/sunnyzhifei/ChatGPTOnline/modules/chat_func.py
deleted file mode 100644
index 4c635c51ada4a852d1495646cf81120de15af7b9..0000000000000000000000000000000000000000
--- a/spaces/sunnyzhifei/ChatGPTOnline/modules/chat_func.py
+++ /dev/null
@@ -1,497 +0,0 @@
-# -*- coding:utf-8 -*-
-from __future__ import annotations
-from typing import TYPE_CHECKING, List
-
-import logging
-import json
-import os
-import requests
-import urllib3
-
-from tqdm import tqdm
-import colorama
-from duckduckgo_search import ddg
-import asyncio
-import aiohttp
-
-
-from modules.presets import *
-from modules.llama_func import *
-from modules.utils import *
-from . import shared
-from modules.config import retrieve_proxy
-
-# logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s")
-
-if TYPE_CHECKING:
-    from typing import TypedDict
-
-    class DataframeData(TypedDict):
-        headers: List[str]
-        data: List[List[str | int | bool]]
-
-
-initial_prompt = "You are a helpful assistant."
-HISTORY_DIR = "history"
-TEMPLATES_DIR = "templates"
-
-@shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用
-def get_response(
-    openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model
-):
-    headers = {
-        "Content-Type": "application/json",
-        "Authorization": f"Bearer {openai_api_key}",
-    }
-
-    history = [construct_system(system_prompt), *history]
-
-    payload = {
-        "model": selected_model,
-        "messages": history,  # [{"role": "user", "content": f"{inputs}"}],
-        "temperature": temperature,  # 1.0,
-        "top_p": top_p,  # 1.0,
-        "n": 1,
-        "stream": stream,
-        "presence_penalty": 0,
-        "frequency_penalty": 0,
-    }
-    if stream:
-        timeout = timeout_streaming
-    else:
-        timeout = timeout_all
-
-
-    # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求
-    if shared.state.completion_url != COMPLETION_URL:
-        logging.info(f"使用自定义API URL: {shared.state.completion_url}")
-
-    with retrieve_proxy():
-        response = requests.post(
-            shared.state.completion_url,
-            headers=headers,
-            json=payload,
-            stream=True,
-            timeout=timeout,
-        )
-
-    return response
-
-
-def stream_predict(
-    openai_api_key,
-    system_prompt,
-    history,
-    inputs,
-    chatbot,
-    all_token_counts,
-    top_p,
-    temperature,
-    selected_model,
-    fake_input=None,
-    display_append=""
-):
-    def get_return_value():
-        return chatbot, history, status_text, all_token_counts
-
-    logging.info("实时回答模式")
-    partial_words = ""
-    counter = 0
-    status_text = "开始实时传输回答……"
-    history.append(construct_user(inputs))
-    history.append(construct_assistant(""))
-    if fake_input:
-        chatbot.append((fake_input, ""))
-    else:
-        chatbot.append((inputs, ""))
-    user_token_count = 0
-    if fake_input is not None:
-        input_token_count = count_token(construct_user(fake_input))
-    else:
-        input_token_count = count_token(construct_user(inputs))
-    if len(all_token_counts) == 0:
-        system_prompt_token_count = count_token(construct_system(system_prompt))
-        user_token_count = (
-            input_token_count + system_prompt_token_count
-        )
-    else:
-        user_token_count = input_token_count
-    all_token_counts.append(user_token_count)
-    logging.info(f"输入token计数: {user_token_count}")
-    yield get_return_value()
-    try:
-        response = get_response(
-            openai_api_key,
-            system_prompt,
-            history,
-            temperature,
-            top_p,
-            True,
-            selected_model,
-        )
-    except requests.exceptions.ConnectTimeout:
-        status_text = (
-            standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
-        )
-        yield get_return_value()
-        return
-    except requests.exceptions.ReadTimeout:
-        status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt
-        yield get_return_value()
-        return
-
-    yield get_return_value()
-    error_json_str = ""
-
-    if fake_input is not None:
-        history[-2] = construct_user(fake_input)
-    for chunk in tqdm(response.iter_lines()):
-        if counter == 0:
-            counter += 1
-            continue
-        counter += 1
-        # check whether each line is non-empty
-        if chunk:
-            chunk = chunk.decode()
-            chunklength = len(chunk)
-            try:
-                chunk = json.loads(chunk[6:])
-            except json.JSONDecodeError:
-                logging.info(chunk)
-                error_json_str += chunk
-                status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}"
-                yield get_return_value()
-                continue
-            # decode each line as response data is in bytes
-            if chunklength > 6 and "delta" in chunk["choices"][0]:
-                finish_reason = chunk["choices"][0]["finish_reason"]
-                status_text = construct_token_message(all_token_counts)
-                if finish_reason == "stop":
-                    yield get_return_value()
-                    break
-                try:
-                    partial_words = (
-                        partial_words + chunk["choices"][0]["delta"]["content"]
-                    )
-                except KeyError:
-                    status_text = (
-                        standard_error_msg
-                        + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: "
-                        + str(sum(all_token_counts))
-                    )
-                    yield get_return_value()
-                    break
-                history[-1] = construct_assistant(partial_words)
-                chatbot[-1] = (chatbot[-1][0], partial_words+display_append)
-                all_token_counts[-1] += 1
-                yield get_return_value()
-
-
-def predict_all(
-    openai_api_key,
-    system_prompt,
-    history,
-    inputs,
-    chatbot,
-    all_token_counts,
-    top_p,
-    temperature,
-    selected_model,
-    fake_input=None,
-    display_append=""
-):
-    logging.info("一次性回答模式")
-    history.append(construct_user(inputs))
-    history.append(construct_assistant(""))
-    if fake_input:
-        chatbot.append((fake_input, ""))
-    else:
-        chatbot.append((inputs, ""))
-    if fake_input is not None:
-        all_token_counts.append(count_token(construct_user(fake_input)))
-    else:
-        all_token_counts.append(count_token(construct_user(inputs)))
-    try:
-        response = get_response(
-            openai_api_key,
-            system_prompt,
-            history,
-            temperature,
-            top_p,
-            False,
-            selected_model,
-        )
-    except requests.exceptions.ConnectTimeout:
-        status_text = (
-            standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
-        )
-        return chatbot, history, status_text, all_token_counts
-    except requests.exceptions.ProxyError:
-        status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt
-        return chatbot, history, status_text, all_token_counts
-    except requests.exceptions.SSLError:
-        status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt
-        return chatbot, history, status_text, all_token_counts
-    response = json.loads(response.text)
-    if fake_input is not None:
-        history[-2] = construct_user(fake_input)
-    try:
-        content = response["choices"][0]["message"]["content"]
-        history[-1] = construct_assistant(content)
-        chatbot[-1] = (chatbot[-1][0], content+display_append)
-        total_token_count = response["usage"]["total_tokens"]
-        if fake_input is not None:
-            all_token_counts[-1] += count_token(construct_assistant(content))
-        else:
-            all_token_counts[-1] = total_token_count - sum(all_token_counts)
-        status_text = construct_token_message(total_token_count)
-        return chatbot, history, status_text, all_token_counts
-    except KeyError:
-        status_text = standard_error_msg + str(response)
-        return chatbot, history, status_text, all_token_counts
-
-
-def predict(
-    openai_api_key,
-    system_prompt,
-    history,
-    inputs,
-    chatbot,
-    all_token_counts,
-    top_p,
-    temperature,
-    stream=False,
-    selected_model=MODELS[0],
-    use_websearch=False,
-    files = None,
-    reply_language="中文",
-    should_check_token_count=True,
-):  # repetition_penalty, top_k
-    from llama_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery
-    from llama_index.indices.query.schema import QueryBundle
-    from langchain.llms import OpenAIChat
-
-
-    logging.info("输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
-    if should_check_token_count:
-        yield chatbot+[(inputs, "")], history, "开始生成回答……", all_token_counts
-    if reply_language == "跟随问题语言(不稳定)":
-        reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
-    old_inputs = None
-    display_reference = []
-    limited_context = False
-    if files:
-        limited_context = True
-        old_inputs = inputs
-        msg = "加载索引中……(这可能需要几分钟)"
-        logging.info(msg)
-        yield chatbot+[(inputs, "")], history, msg, all_token_counts
-        index = construct_index(openai_api_key, file_src=files)
-        msg = "索引构建完成,获取回答中……"
-        logging.info(msg)
-        yield chatbot+[(inputs, "")], history, msg, all_token_counts
-        with retrieve_proxy():
-            llm_predictor = LLMPredictor(llm=OpenAIChat(temperature=0, model_name=selected_model))
-            prompt_helper = PromptHelper(max_input_size = 4096, num_output = 5, max_chunk_overlap = 20, chunk_size_limit=600)
-            from llama_index import ServiceContext
-            service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
-            query_object = GPTVectorStoreIndexQuery(index.index_struct, service_context=service_context, similarity_top_k=5, vector_store=index._vector_store, docstore=index._docstore)
-            query_bundle = QueryBundle(inputs)
-            nodes = query_object.retrieve(query_bundle)
-        reference_results = [n.node.text for n in nodes]
-        reference_results = add_source_numbers(reference_results, use_source=False)
-        display_reference = add_details(reference_results)
-        display_reference = "\n\n" + "".join(display_reference)
-        inputs = (
-            replace_today(PROMPT_TEMPLATE)
-            .replace("{query_str}", inputs)
-            .replace("{context_str}", "\n\n".join(reference_results))
-            .replace("{reply_language}", reply_language )
-        )
-    elif use_websearch:
-        limited_context = True
-        search_results = ddg(inputs, max_results=5)
-        old_inputs = inputs
-        reference_results = []
-        for idx, result in enumerate(search_results):
-            logging.info(f"搜索结果{idx + 1}:{result}")
-            domain_name = urllib3.util.parse_url(result["href"]).host
-            reference_results.append([result["body"], result["href"]])
-            display_reference.append(f"{idx+1}. [{domain_name}]({result['href']})\n")
-        reference_results = add_source_numbers(reference_results)
-        display_reference = "\n\n" + "".join(display_reference)
-        inputs = (
-            replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
-            .replace("{query}", inputs)
-            .replace("{web_results}", "\n\n".join(reference_results))
-            .replace("{reply_language}", reply_language )
-        )
-    else:
-        display_reference = ""
-
-    if len(openai_api_key) == 0 and not shared.state.multi_api_key:
-        status_text = standard_error_msg + no_apikey_msg
-        logging.info(status_text)
-        chatbot.append((inputs, ""))
-        if len(history) == 0:
-            history.append(construct_user(inputs))
-            history.append("")
-            all_token_counts.append(0)
-        else:
-            history[-2] = construct_user(inputs)
-        yield chatbot+[(inputs, "")], history, status_text, all_token_counts
-        return
-    elif len(inputs.strip()) == 0:
-        status_text = standard_error_msg + no_input_msg
-        logging.info(status_text)
-        yield chatbot+[(inputs, "")], history, status_text, all_token_counts
-        return
-
-    if stream:
-        logging.info("使用流式传输")
-        iter = stream_predict(
-            openai_api_key,
-            system_prompt,
-            history,
-            inputs,
-            chatbot,
-            all_token_counts,
-            top_p,
-            temperature,
-            selected_model,
-            fake_input=old_inputs,
-            display_append=display_reference
-        )
-        for chatbot, history, status_text, all_token_counts in iter:
-            if shared.state.interrupted:
-                shared.state.recover()
-                return
-            yield chatbot, history, status_text, all_token_counts
-    else:
-        logging.info("不使用流式传输")
-        chatbot, history, status_text, all_token_counts = predict_all(
-            openai_api_key,
-            system_prompt,
-            history,
-            inputs,
-            chatbot,
-            all_token_counts,
-            top_p,
-            temperature,
-            selected_model,
-            fake_input=old_inputs,
-            display_append=display_reference
-        )
-        yield chatbot, history, status_text, all_token_counts
-
-    logging.info(f"传输完毕。当前token计数为{all_token_counts}")
-    if len(history) > 1 and history[-1]["content"] != inputs:
-        logging.info(
-            "回答为:"
-            + colorama.Fore.BLUE
-            + f"{history[-1]['content']}"
-            + colorama.Style.RESET_ALL
-        )
-
-    if limited_context:
-        history = history[-4:]
-        all_token_counts = all_token_counts[-2:]
-        yield chatbot, history, status_text, all_token_counts
-
-    if stream:
-        max_token = MODEL_SOFT_TOKEN_LIMIT[selected_model]["streaming"]
-    else:
-        max_token = MODEL_SOFT_TOKEN_LIMIT[selected_model]["all"]
-
-    if sum(all_token_counts) > max_token and should_check_token_count:
-        print(all_token_counts)
-        count = 0
-        while sum(all_token_counts) > max_token - 500 and sum(all_token_counts) > 0:
-            count += 1
-            del all_token_counts[0]
-            del history[:2]
-        logging.info(status_text)
-        status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
-        yield chatbot, history, status_text, all_token_counts
-
-
-def retry(
-    openai_api_key,
-    system_prompt,
-    history,
-    chatbot,
-    token_count,
-    top_p,
-    temperature,
-    stream=False,
-    selected_model=MODELS[0],
-    reply_language="中文",
-):
-    logging.info("重试中……")
-    if len(history) == 0:
-        yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
-        return
-    history.pop()
-    inputs = history.pop()["content"]
-    token_count.pop()
-    iter = predict(
-        openai_api_key,
-        system_prompt,
-        history,
-        inputs,
-        chatbot,
-        token_count,
-        top_p,
-        temperature,
-        stream=stream,
-        selected_model=selected_model,
-        reply_language=reply_language,
-    )
-    logging.info("重试中……")
-    for x in iter:
-        yield x
-    logging.info("重试完毕")
-
-
-def reduce_token_size(
-    openai_api_key,
-    system_prompt,
-    history,
-    chatbot,
-    token_count,
-    top_p,
-    temperature,
-    max_token_count,
-    selected_model=MODELS[0],
-    reply_language="中文",
-):
-    logging.info("开始减少token数量……")
-    iter = predict(
-        openai_api_key,
-        system_prompt,
-        history,
-        summarize_prompt,
-        chatbot,
-        token_count,
-        top_p,
-        temperature,
-        selected_model=selected_model,
-        should_check_token_count=False,
-        reply_language=reply_language,
-    )
-    logging.info(f"chatbot: {chatbot}")
-    flag = False
-    for chatbot, history, status_text, previous_token_count in iter:
-        num_chat = find_n(previous_token_count, max_token_count)
-        logging.info(f"previous_token_count: {previous_token_count}, keeping {num_chat} chats")
-        if flag:
-            chatbot = chatbot[:-1]
-        flag = True
-        history = history[-2*num_chat:] if num_chat > 0 else []
-        token_count = previous_token_count[-num_chat:] if num_chat > 0 else []
-        msg = f"保留了最近{num_chat}轮对话"
-        yield chatbot, history, msg + "," + construct_token_message(
-            token_count if len(token_count) > 0 else [0],
-        ), token_count
-    logging.info(msg)
-    logging.info("减少token数量完毕")
diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/upscaler.py b/spaces/supertori/files/stable-diffusion-webui/modules/upscaler.py
deleted file mode 100644
index e2eaa7308af0091b6e8f407e889b2e446679e149..0000000000000000000000000000000000000000
--- a/spaces/supertori/files/stable-diffusion-webui/modules/upscaler.py
+++ /dev/null
@@ -1,145 +0,0 @@
-import os
-from abc import abstractmethod
-
-import PIL
-import numpy as np
-import torch
-from PIL import Image
-
-import modules.shared
-from modules import modelloader, shared
-
-LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
-NEAREST = (Image.Resampling.NEAREST if hasattr(Image, 'Resampling') else Image.NEAREST)
-
-
-class Upscaler:
-    name = None
-    model_path = None
-    model_name = None
-    model_url = None
-    enable = True
-    filter = None
-    model = None
-    user_path = None
-    scalers: []
-    tile = True
-
-    def __init__(self, create_dirs=False):
-        self.mod_pad_h = None
-        self.tile_size = modules.shared.opts.ESRGAN_tile
-        self.tile_pad = modules.shared.opts.ESRGAN_tile_overlap
-        self.device = modules.shared.device
-        self.img = None
-        self.output = None
-        self.scale = 1
-        self.half = not modules.shared.cmd_opts.no_half
-        self.pre_pad = 0
-        self.mod_scale = None
-
-        if self.model_path is None and self.name:
-            self.model_path = os.path.join(shared.models_path, self.name)
-        if self.model_path and create_dirs:
-            os.makedirs(self.model_path, exist_ok=True)
-
-        try:
-            import cv2
-            self.can_tile = True
-        except:
-            pass
-
-    @abstractmethod
-    def do_upscale(self, img: PIL.Image, selected_model: str):
-        return img
-
-    def upscale(self, img: PIL.Image, scale, selected_model: str = None):
-        self.scale = scale
-        dest_w = int(img.width * scale)
-        dest_h = int(img.height * scale)
-
-        for i in range(3):
-            shape = (img.width, img.height)
-
-            img = self.do_upscale(img, selected_model)
-
-            if shape == (img.width, img.height):
-                break
-
-            if img.width >= dest_w and img.height >= dest_h:
-                break
-
-        if img.width != dest_w or img.height != dest_h:
-            img = img.resize((int(dest_w), int(dest_h)), resample=LANCZOS)
-
-        return img
-
-    @abstractmethod
-    def load_model(self, path: str):
-        pass
-
-    def find_models(self, ext_filter=None) -> list:
-        return modelloader.load_models(model_path=self.model_path, model_url=self.model_url, command_path=self.user_path)
-
-    def update_status(self, prompt):
-        print(f"\nextras: {prompt}", file=shared.progress_print_out)
-
-
-class UpscalerData:
-    name = None
-    data_path = None
-    scale: int = 4
-    scaler: Upscaler = None
-    model: None
-
-    def __init__(self, name: str, path: str, upscaler: Upscaler = None, scale: int = 4, model=None):
-        self.name = name
-        self.data_path = path
-        self.local_data_path = path
-        self.scaler = upscaler
-        self.scale = scale
-        self.model = model
-
-
-class UpscalerNone(Upscaler):
-    name = "None"
-    scalers = []
-
-    def load_model(self, path):
-        pass
-
-    def do_upscale(self, img, selected_model=None):
-        return img
-
-    def __init__(self, dirname=None):
-        super().__init__(False)
-        self.scalers = [UpscalerData("None", None, self)]
-
-
-class UpscalerLanczos(Upscaler):
-    scalers = []
-
-    def do_upscale(self, img, selected_model=None):
-        return img.resize((int(img.width * self.scale), int(img.height * self.scale)), resample=LANCZOS)
-
-    def load_model(self, _):
-        pass
-
-    def __init__(self, dirname=None):
-        super().__init__(False)
-        self.name = "Lanczos"
-        self.scalers = [UpscalerData("Lanczos", None, self)]
-
-
-class UpscalerNearest(Upscaler):
-    scalers = []
-
-    def do_upscale(self, img, selected_model=None):
-        return img.resize((int(img.width * self.scale), int(img.height * self.scale)), resample=NEAREST)
-
-    def load_model(self, _):
-        pass
-
-    def __init__(self, dirname=None):
-        super().__init__(False)
-        self.name = "Nearest"
-        self.scalers = [UpscalerData("Nearest", None, self)]
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Adobe Audition CC 2018 11.0.2.2 (x64) UPD Crack Keygen.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Adobe Audition CC 2018 11.0.2.2 (x64) UPD Crack Keygen.md
deleted file mode 100644
index 94d3305e2e90e39d326f5e829af8b0dd65492be9..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Adobe Audition CC 2018 11.0.2.2 (x64) UPD Crack Keygen.md	
+++ /dev/null
@@ -1,33 +0,0 @@
-
-<h1>Adobe Audition CC 2018 11.0.2.2 (x64): A Powerful Audio Editing Software</h1>
-<p>If you are looking for a professional audio editing software that can handle various tasks such as recording, mixing, mastering, restoring, and creating podcasts, then you should consider Adobe Audition CC 2018 11.0.2.2 (x64). This is the latest version of Adobe's popular audio workstation that offers many new features and enhancements to help you produce high-quality audio content.</p>
-<h2>Adobe Audition CC 2018 11.0.2.2 (x64) Crack keygen</h2><br /><p><b><b>DOWNLOAD</b> &#10040; <a href="https://cinurl.com/2uEZ3b">https://cinurl.com/2uEZ3b</a></b></p><br /><br />
-<p>In this article, we will review some of the key features and benefits of Adobe Audition CC 2018 11.0.2.2 (x64) and show you how to download and install it on your Windows PC.</p>
-<h2>What's New in Adobe Audition CC 2018 11.0.2.2 (x64)?</h2>
-<p>Adobe Audition CC 2018 11.0.2.2 (x64) is an update that brings several improvements and bug fixes to the previous version of Adobe Audition CC 2018. Some of the notable changes include:</p>
-<ul>
-<li>New onboarding video that guides you through the basics of Adobe Audition CC 2018[^2^]</li>
-<li>Improved sequence import from Adobe Premiere Pro that preserves clip effects and transitions[^2^]</li>
-<li>UI and performance improvements such as faster zooming, scrolling, and waveform rendering[^2^]</li>
-<li>Support for more audio formats such as FLAC, OPUS, and CAF[^1^]</li>
-<li>Enhanced multitrack editing with clip grouping, locking, color coding, and more[^1^]</li>
-<li>New effects such as DeReverb and DeNoise that reduce unwanted background noise and reverb[^1^]</li>
-<li>New Auto-Ducking feature that automatically adjusts the volume of music tracks based on the voiceover or dialogue tracks[^1^]</li>
-</ul>
-<h2>How to Download and Install Adobe Audition CC 2018 11.0.2.2 (x64)?</h2>
-<p>To download and install Adobe Audition CC 2018 11.0.2.2 (x64), you need to have a valid Adobe Creative Cloud subscription and a compatible Windows PC with at least 4 GB of RAM and 4 GB of free disk space.</p>
-<p></p>
-<p>Follow these steps to get started:</p>
-<ol>
-<li>Go to <a href="https://creative.adobe.com/products/download/audition">https://creative.adobe.com/products/download/audition</a> and sign in with your Adobe ID.</li>
-<li>Select your preferred language and click on Download.</li>
-<li>Run the installer file and follow the instructions on the screen.</li>
-<li>Launch Adobe Audition CC 2018 11.0.2.2 (x64) from your desktop or start menu.</li>
-<li>Enjoy your audio editing experience with Adobe Audition CC 2018 11.0.2.2 (x64)!</li>
-</ol>
-<h2>Conclusion</h2>
-<p>Adobe Audition CC 2018 11.0.2.2 (x64) is a powerful audio editing software that can help you create professional-quality audio content for various purposes such as music production, podcasting, video editing, and more.</p>
-<p>It offers many new features and enhancements that make it easier and faster to work with audio files and multitrack projects.</p>
-<p>If you want to try out Adobe Audition CC 2018 11.0.2.2 (x64), you can download it from the official website and install it on your Windows PC with a Creative Cloud subscription.</p> d5da3c52bf<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Descargar Cyberplanet 6.1 Full 14l.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Descargar Cyberplanet 6.1 Full 14l.md
deleted file mode 100644
index dad33280cb75eba50a04647ce872c4f76d9d3cc7..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Descargar Cyberplanet 6.1 Full 14l.md	
+++ /dev/null
@@ -1,28 +0,0 @@
-
-<h1>How to Download Cyberplanet 6.1 Full 14l for Free</h1>
-<p>Cyberplanet is a software that allows you to manage and control your cybercafe or gaming center. It has features such as client PC control, scanner control, printer control, inventory management, billing system, and more. Cyberplanet 6.1 is the latest version of the software, and it is compatible with diskless systems such as iCafe8, CCU, CCboot, etc.</p>
-<h2>Descargar Cyberplanet 6.1 Full 14l</h2><br /><p><b><b>Download Zip</b> &#10027; <a href="https://cinurl.com/2uEXYl">https://cinurl.com/2uEXYl</a></b></p><br /><br />
-<p>If you want to download Cyberplanet 6.1 Full 14l for free, you can follow these steps:</p>
-<ol>
-<li>Go to SoundCloud and search for "Descargar Cyberplanet 6.1 Full 14l" by Anlibvermgraph1974[^2^] or Nick Butler[^3^]. These are playlists that contain the download link and instructions for the software.</li>
-<li>Listen to the tracks and follow the instructions carefully. You will need to complete some surveys or offers to get the download link.</li>
-<li>Once you get the download link, click on it and save the file to your computer.</li>
-<li>Extract the file using WinRAR or any other software that can handle RAR files.</li>
-<li>Run the setup.exe file and follow the installation wizard.</li>
-<li>After the installation is complete, run the crack.exe file and copy the generated serial number.</li>
-<li>Open Cyberplanet and enter the serial number when prompted.</li>
-<li>Enjoy using Cyberplanet 6.1 Full 14l for free!</li>
-</ol>
-<p>Note: This method is not legal or safe. You may risk getting viruses, malware, or legal issues by downloading cracked software. We do not recommend or endorse this method. Use it at your own risk.</p>
-
-<h2>Benefits of Cyberplanet for your Cybercafe or Gaming Center</h2>
-<p>Cyberplanet is not only a software that helps you manage and control your cybercafe or gaming center, but also a software that brings you many benefits and advantages. Here are some of the benefits of using Cyberplanet for your business:</p>
-<ul>
-<li><b>Protect your business from cyberattacks.</b> Cyberplanet has a built-in antivirus and firewall system that protects your network and devices from malware, ransomware, phishing, and other cyber threats. Cyberplanet also updates itself regularly to keep up with the latest security patches and threat intelligence. By using Cyberplanet, you can reduce the risk of data breaches, downtime, and fines that can damage your reputation and revenue.[^1^] [^2^]</li>
-<li><b>Optimize your business operations.</b> Cyberplanet allows you to monitor and control all aspects of your cybercafe or gaming center from a single dashboard. You can set up different rates, plans, and promotions for your customers, manage your inventory and cash flow, print receipts and reports, and more. Cyberplanet also integrates with popular games and platforms such as Steam, Origin, Epic Games, etc. to provide a seamless gaming experience for your customers.</li>
-<li><b>Increase your customer satisfaction and loyalty.</b> Cyberplanet enables you to provide a fast, reliable, and secure service to your customers. You can customize the user interface and login screen of your client PCs, offer different payment methods and loyalty programs, and communicate with your customers via chat or email. Cyberplanet also supports remote access and control of your client PCs, so you can assist your customers anytime and anywhere.</li>
-</ul>
-<p>Cyberplanet is a software that can help you take your cybercafe or gaming center to the next level. By downloading Cyberplanet 6.1 Full 14l for free, you can enjoy all the features and benefits of this software without spending a dime. However, as we mentioned before, this method is not legal or safe, and we do not recommend it. If you want to use Cyberplanet legally and safely, you should purchase a license from the official website or an authorized reseller.</p>
-<p></p> d5da3c52bf<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Naruto Shippuden Ultimate Ninja Storm 4 Pc Crash.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Naruto Shippuden Ultimate Ninja Storm 4 Pc Crash.md
deleted file mode 100644
index 49001e1b5e48e3d69d632157d50b3eef74ab6a8b..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Naruto Shippuden Ultimate Ninja Storm 4 Pc Crash.md	
+++ /dev/null
@@ -1,120 +0,0 @@
-<br />
-<h1>Naruto Shippuden Ultimate Ninja Storm 4 PC Crash: How to Fix It</h1>
-
-<p>If you are a fan of Naruto Shippuden Ultimate Ninja Storm 4, you might have encountered a frustrating problem: the game keeps crashing on your PC. This can ruin your gaming experience and make you lose your progress. Fortunately, there are some solutions that can help you fix this issue and enjoy the game without interruptions.</p>
-<h2>naruto shippuden ultimate ninja storm 4 pc crash</h2><br /><p><b><b>Download File</b> &#128504; <a href="https://cinurl.com/2uEYI2">https://cinurl.com/2uEYI2</a></b></p><br /><br />
-
-<p>In this article, we will show you some of the most common causes and fixes for Naruto Shippuden Ultimate Ninja Storm 4 PC crash. We will also give you some tips and tricks to optimize your PC performance and prevent future crashes. Let's get started!</p>
-
-<h2>What Causes Naruto Shippuden Ultimate Ninja Storm 4 PC Crash?</h2>
-
-<p>There are many possible reasons why Naruto Shippuden Ultimate Ninja Storm 4 might crash on your PC. Some of them are:</p>
-
-<ul>
-<li><b>Outdated or incompatible drivers.</b> Your graphics card, sound card, and other drivers need to be updated regularly to ensure compatibility and stability with the game. If you have outdated or corrupted drivers, they might cause conflicts and errors that lead to crashes.</li>
-<li><b>Low system requirements.</b> Naruto Shippuden Ultimate Ninja Storm 4 is a demanding game that requires a powerful PC to run smoothly. If your PC does not meet the minimum or recommended system requirements, you might experience lag, stuttering, freezing, or crashing.</li>
-<li><b>Insufficient memory or disk space.</b> Naruto Shippuden Ultimate Ninja Storm 4 requires a lot of memory and disk space to load and run properly. If your RAM or hard drive is full or fragmented, it might slow down your PC and cause the game to crash.</li>
-<li><b>Malware or virus infection.</b> Malware or viruses can infect your PC and damage your system files, registry entries, or game files. This can result in various errors and crashes when you try to launch or play the game.</li>
-<li><b>Software conflicts.</b> Some programs or services running in the background might interfere with the game and cause it to crash. For example, antivirus software, firewall software, overlay software, recording software, etc.</li>
-<li><b>Game bugs or glitches.</b> Naruto Shippuden Ultimate Ninja Storm 4 is not a perfect game and it might have some bugs or glitches that cause it to crash. For example, some users reported that the game crashes when they try to use certain characters or modes.</li>
-</ul>
-
-<h2>How to Fix Naruto Shippuden Ultimate Ninja Storm 4 PC Crash?</h2>
-
-<p>Depending on the cause of your problem, there are different solutions that you can try to fix Naruto Shippuden Ultimate Ninja Storm 4 PC crash. Here are some of them:</p>
-<p></p>
-
-<ul>
-<li><b>Update your drivers.</b> One of the first things you should do is to check if your drivers are up to date and compatible with the game. You can use a driver updater tool like Driver Booster or Driver Easy to scan your PC and update your drivers automatically. Alternatively, you can go to the official websites of your hardware manufacturers and download the latest drivers manually.</li>
-<li><b>Adjust your system settings.</b> Another thing you should do is to make sure that your PC meets the system requirements for the game. You can check them on the Steam store page or on the official website of the game. If your PC is below the minimum requirements, you might need to upgrade your hardware or lower your graphics settings in the game. If your PC meets the recommended requirements, you can try to optimize your system performance by doing the following:</li>
-<ul>
-<li>Clean up your disk space by deleting unnecessary files and programs.</li>
-<li>Defragment your hard drive by using a tool like Disk Defragmenter or Smart Defrag.</li>
-<li>Increase your virtual memory by following this guide: https://www.windowscentral.com/how-change-virtual-memory-size-windows-10</li>
-<li>Disable unnecessary startup programs by using a tool like Task Manager or CCleaner.</li>
-<li>Close any background programs that might be using up your CPU, RAM, or disk resources.</li>
-</ul>
-<li><b>Scan your PC for malware or viruses.</b> Another possible cause of Naruto Shippuden Ultimate Ninja Storm 4 PC crash is malware or virus infection. You should scan your PC regularly with a reliable antivirus software like Avast, AVG, or Malwarebytes. You should also update your antivirus software and database regularly to protect your PC from new threats.</li>
-<li><b>Disable any software conflicts.</b> Another possible cause of Naruto Shippuden Ultimate Ninja Storm 4 PC crash is software conflicts. You should disable any programs or services that might interfere with the game and cause it to crash. Some of them are:</li>
-<ul>
-<li>Antivirus software: You should temporarily disable your antivirus software while playing the game. You can also add the game folder and executable file to the exclusion list of your antivirus software.</li>
-<li>Firewall software: You should temporarily disable your firewall software while playing the game. You can also add the game folder and executable file to the exception list of your firewall software.</li>
-<li>Overlay software: You should disable any overlay software that might overlay on top of the game screen and cause it to crash. For example, Steam overlay, Discord overlay, Nvidia GeForce Experience overlay, etc.</li>
-<li>Recording software: You should disable any recording software that might record your gameplay and cause it to crash. For example, OBS Studio, Fraps, Bandicam, etc.</li>
-</ul>
-<li><b>Patch any game bugs or glitches.</b> Another possible cause of Naruto Shippuden Ultimate Ninja Storm 4 PC crash is game bugs or glitches. You should check if there are any updates or patches available for the game on Steam or on the official website of the game. You should also check if there are any community fixes or mods that might solve some of the issues with the game. For example, some users reported that using this mod: https://www.nexusmods.com/narutoshippudenultimateninjastorm4/mods/8 fixed their crashing issues with certain characters or modes.</li>
-</ul>
-
-<p>If none of these solutions worked for you, you might need to contact the support team of the game developer or publisher for further assistance. You can also post your problem on the Steam community forums or on other online platforms where other players might help you out.</p>
-
-<h2>Conclusion</h2>
-
-<p>Naruto Shippuden Ultimate Ninja Storm 4 is an amazing game that lets you experience the epic story and battles of Naruto and his friends. However, it can also be frustrating when it keeps crashing on your PC and prevents you from enjoying it fully. Hopefully, this article helped you fix Naruto Shippuden Ultimate Ninja Storm 4 PC crash and optimize your PC performance for a smooth gaming experience. If you have any questions or suggestions, feel free to leave a comment below!</p>
-<h2>How to Optimize Naruto Shippuden Ultimate Ninja Storm 4 for 60 FPS?</h2>
-
-<p>One of the most desired features of Naruto Shippuden Ultimate Ninja Storm 4 is the ability to play the game at 60 frames per second (FPS). This can enhance your gaming experience and make the game look smoother and more realistic. However, not everyone can achieve this performance level on their PC. Some might experience slowdowns, lags, or crashes when they try to play the game at 60 FPS.</p>
-
-<p>So how can you optimize Naruto Shippuden Ultimate Ninja Storm 4 for 60 FPS? Here are some tips that can help you achieve this goal:</p>
-
-<ul>
-<li><b>Enable V-sync.</b> V-sync is a feature that synchronizes your monitor's refresh rate with your game's frame rate. This can prevent screen tearing and stuttering that might occur when your game's frame rate is higher or lower than your monitor's refresh rate. To enable V-sync, go to the game's settings menu and turn it on. You can also enable it from your graphics card's control panel.</li>
-<li><b>Lower your graphics settings.</b> If your PC is not powerful enough to run the game at 60 FPS with high graphics settings, you might need to lower them to improve your performance. You can adjust your graphics settings from the game's settings menu or from your graphics card's control panel. Some of the settings that you can lower are resolution, anti-aliasing, shadows, textures, effects, etc.</li>
-<li><b>Use a FPS limiter.</b> A FPS limiter is a tool that limits your game's frame rate to a certain value. This can prevent your PC from overworking and overheating when running the game at 60 FPS. It can also reduce the fluctuations and drops in your frame rate that might cause slowdowns or crashes. You can use a FPS limiter tool like RivaTuner Statistics Server or MSI Afterburner to limit your game's frame rate to 60 FPS.</li>
-</ul>
-
-<h2>How to Enjoy Naruto Shippuden Ultimate Ninja Storm 4 on PC?</h2>
-
-<p>Naruto Shippuden Ultimate Ninja Storm 4 is a great game that offers a lot of fun and excitement for Naruto fans and gamers alike. It features a rich story mode that covers the events of the Fourth Shinobi World War, a large roster of playable characters with unique abilities and transformations, a variety of modes and challenges to test your skills and creativity, and stunning graphics and animations that bring the Naruto world to life.</p>
-
-<p>To enjoy Naruto Shippuden Ultimate Ninja Storm 4 on PC, you need to make sure that you have a compatible and stable PC that can run the game smoothly and without crashes. You also need to optimize your PC performance and settings to achieve the best possible gaming experience. You can follow the tips and solutions that we have provided in this article to fix Naruto Shippuden Ultimate Ninja Storm 4 PC crash and optimize it for 60 FPS.</p>
-
-<p>We hope that this article helped you solve your problems with Naruto Shippuden Ultimate Ninja Storm 4 PC crash and enjoy the game without interruptions. If you have any questions or suggestions, feel free to leave a comment below!</p>
-<h2>How to Troubleshoot Naruto Shippuden Ultimate Ninja Storm 4 PC Crash?</h2>
-
-<p>If you have tried all the solutions above and you still encounter Naruto Shippuden Ultimate Ninja Storm 4 PC crash, you might need to troubleshoot your problem and find out the exact cause of it. Here are some steps that you can follow to troubleshoot Naruto Shippuden Ultimate Ninja Storm 4 PC crash:</p>
-
-<ul>
-<li><b>Check the error message.</b> If you get an error message when the game crashes, you should read it carefully and see if it gives you any clue about the cause of the crash. For example, if the error message says something like \"NARUTO SHIPPUDEN: Ultimate Ninja STORM 4 has stopped working\" or \"NSUNS4.exe has encountered a problem and needs to close\", you might have a problem with your game files or your system files.</li>
-<li><b>Verify your game files.</b> One of the possible reasons why your game files might be corrupted or missing is because of a faulty installation or an incomplete update. To fix this, you can verify your game files on Steam and let it repair or download any missing or damaged files. To do this, follow these steps:</li>
-<ul>
-<li>Right-click on NARUTO SHIPPUDEN: Ultimate Ninja STORM 4 in your Steam library and select Properties.</li>
-<li>Go to the Local Files tab and click on Verify Integrity of Game Files.</li>
-<li>Wait for the process to finish and restart your game.</li>
-</ul>
-<li><b>Repair your system files.</b> Another possible reason why your system files might be corrupted or missing is because of a malware or virus infection, a power outage, a hardware failure, or a system crash. To fix this, you can use a system file checker tool like SFC or DISM to scan and repair your system files. To do this, follow these steps:</li>
-<ul>
-<li>Open the Start menu and type cmd in the search box.</li>
-<li>Right-click on Command Prompt and select Run as administrator.</li>
-<li>Type sfc /scannow and press Enter.</li>
-<li>Wait for the process to finish and restart your PC.</li>
-<li>If sfc /scannow did not work, type DISM /Online /Cleanup-Image /RestoreHealth and press Enter.</li>
-<li>Wait for the process to finish and restart your PC.</li>
-</ul>
-</ul>
-
-<h2>How to Contact Naruto Shippuden Ultimate Ninja Storm 4 Support Team?</h2>
-
-<p>If none of the solutions or troubleshooting steps above worked for you, you might need to contact the support team of Naruto Shippuden Ultimate Ninja Storm 4 for further assistance. They might be able to help you with your specific problem and provide you with more information or solutions. Here are some ways that you can contact them:</p>
-
-<ul>
-<li><b>Email them.</b> You can send an email to support@bandainamcoent.com and explain your problem in detail. You should also include your PC specs, your error message, your game version, and any screenshots or videos that might help them understand your issue better.</li>
-<li><b>Call them.</b> You can call their customer service hotline at +1-888-776-2626 and speak to a representative who can assist you with your problem. You should have your PC specs, your error message, your game version, and any screenshots or videos ready before you call them.</li>
-<li><b>Visit their website.</b> You can visit their official website at https://www.bandainamcoent.com/ and look for their support section. You can find their FAQ page, their contact form, their live chat option, and their social media accounts there. You can use any of these methods to reach out to them and ask for help with your problem.</li>
-</ul>
-
-<h2>Conclusion</h2>
-
-<p>Naruto Shippuden Ultimate Ninja Storm 4 is a great game that offers a lot of fun and excitement for Naruto fans and gamers alike. It features a rich story mode that covers the events of the Fourth Shinobi World War, a large roster of playable characters with unique abilities and transformations, a variety of modes and challenges to test your skills and creativity, and stunning graphics and animations that bring the Naruto world to life.</p>
-
-<p>To enjoy Naruto Shippuden Ultimate Ninja Storm 4 on PC, you need to make sure that you have a compatible and stable PC that can run the game smoothly and without crashes. You also need to optimize your PC performance and settings to achieve the best possible gaming experience. You can follow the tips and solutions that we have provided in this article to fix Naruto Shippuden Ultimate Ninja Storm 4 PC crash and optimize it for 60 FPS.</p>
-
-<p>We hope that this article helped you solve your problems with Naruto Shippuden Ultimate Ninja Storm 4 PC crash and enjoy the game without interruptions. If you have any questions or suggestions, feel free to leave a comment below!</p>
-<h2>Conclusion</h2>
-
-<p>Naruto Shippuden Ultimate Ninja Storm 4 is a great game that offers a lot of fun and excitement for Naruto fans and gamers alike. It features a rich story mode that covers the events of the Fourth Shinobi World War, a large roster of playable characters with unique abilities and transformations, a variety of modes and challenges to test your skills and creativity, and stunning graphics and animations that bring the Naruto world to life.</p>
-
-<p>To enjoy Naruto Shippuden Ultimate Ninja Storm 4 on PC, you need to make sure that you have a compatible and stable PC that can run the game smoothly and without crashes. You also need to optimize your PC performance and settings to achieve the best possible gaming experience. You can follow the tips and solutions that we have provided in this article to fix Naruto Shippuden Ultimate Ninja Storm 4 PC crash and optimize it for 60 FPS.</p>
-
-<p>We hope that this article helped you solve your problems with Naruto Shippuden Ultimate Ninja Storm 4 PC crash and enjoy the game without interruptions. If you have any questions or suggestions, feel free to leave a comment below!</p> 3cee63e6c2<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/svjack/ControlNet-Face-Chinese/SPIGA/spiga/demo/visualize/plotter.py b/spaces/svjack/ControlNet-Face-Chinese/SPIGA/spiga/demo/visualize/plotter.py
deleted file mode 100644
index e960ba8f4f1804495added79fd5cb3a2131d2cbf..0000000000000000000000000000000000000000
--- a/spaces/svjack/ControlNet-Face-Chinese/SPIGA/spiga/demo/visualize/plotter.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# Demo libs
-import spiga.demo.visualize.layouts.plot_basics as pl_basic
-import spiga.demo.visualize.layouts.plot_bbox as pl_bbox
-import spiga.demo.visualize.layouts.plot_landmarks as pl_lnd
-import spiga.demo.visualize.layouts.plot_headpose as pl_hpose
-
-
-class Plotter:
-
-    def __init__(self):
-        self.basic = pl_basic.BasicLayout()
-        self.bbox = pl_bbox.BboxLayout()
-        self.landmarks = pl_lnd.LandmarkLayout()
-        self.hpose = pl_hpose.HeadposeLayout()
diff --git a/spaces/svjack/ControlNet-Face-Chinese/SPIGA/spiga/inference/pretreatment.py b/spaces/svjack/ControlNet-Face-Chinese/SPIGA/spiga/inference/pretreatment.py
deleted file mode 100644
index c2099056356301d2f9300b344f6273f4c75b5b31..0000000000000000000000000000000000000000
--- a/spaces/svjack/ControlNet-Face-Chinese/SPIGA/spiga/inference/pretreatment.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from torchvision import transforms
-import numpy as np
-from PIL import Image
-import cv2
-
-from spiga.data.loaders.transforms import TargetCrop, ToOpencv, AddModel3D
-
-
-def get_transformers(data_config):
-    transformer_seq = [
-        Opencv2Pil(),
-        TargetCrop(data_config.image_size, data_config.target_dist),
-        ToOpencv(),
-        NormalizeAndPermute()]
-    return transforms.Compose(transformer_seq)
-
-
-class NormalizeAndPermute:
-    def __call__(self, sample):
-        image = np.array(sample['image'], dtype=float)
-        image = np.transpose(image, (2, 0, 1))
-        sample['image'] = image / 255
-        return sample
-
-
-class Opencv2Pil:
-    def __call__(self, sample):
-        image = cv2.cvtColor(sample['image'], cv2.COLOR_BGR2RGB)
-        sample['image'] = Image.fromarray(image)
-        return sample
-
diff --git a/spaces/swufewyd/xyz-nlp-XuanYuan2.0/index.html b/spaces/swufewyd/xyz-nlp-XuanYuan2.0/index.html
deleted file mode 100644
index 58275de3b1c343a98420342baa076b9baaafa157..0000000000000000000000000000000000000000
--- a/spaces/swufewyd/xyz-nlp-XuanYuan2.0/index.html
+++ /dev/null
@@ -1,19 +0,0 @@
-<!DOCTYPE html>
-<html>
-	<head>
-		<meta charset="utf-8" />
-		<meta name="viewport" content="width=device-width" />
-		<title>My static Space</title>
-		<link rel="stylesheet" href="style.css" />
-	</head>
-	<body>
-		<div class="card">
-			<h1>Welcome to your static Space!</h1>
-			<p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
-			<p>
-				Also don't forget to check the
-				<a href="https://huggingface.co/docs/hub/spaces" target="_blank">Spaces documentation</a>.
-			</p>
-		</div>
-	</body>
-</html>
diff --git a/spaces/terfces0erbo/CollegeProjectV2/AnyDesk Software 5.3.3 Crack !FULL! Free Download.md b/spaces/terfces0erbo/CollegeProjectV2/AnyDesk Software 5.3.3 Crack !FULL! Free Download.md
deleted file mode 100644
index 82ecf3c25d653a5add2814e0384a96889a1d8620..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/AnyDesk Software 5.3.3 Crack !FULL! Free Download.md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>AnyDesk Software 5.3.3 Crack Free Download</h2><br /><p><b><b>DOWNLOAD</b> &#8250; <a href="https://bytlly.com/2uGl9a">https://bytlly.com/2uGl9a</a></b></p><br /><br />
-<br />
-Download AnyDesk for Windows for free and access, control and manage all your devices while working remotely. â–¼ You can work remotely while you work from home or are at your workplace with access to your laptop, desktop or smartphone. 8a78ff9644<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/terfces0erbo/CollegeProjectV2/Assassins Creed Brotherhood Updated DLCs Repack Mr DJ DRM Free [VERIFIED].md b/spaces/terfces0erbo/CollegeProjectV2/Assassins Creed Brotherhood Updated DLCs Repack Mr DJ DRM Free [VERIFIED].md
deleted file mode 100644
index 99f67fd2f2741fdd0c9327b1acc0369ee454da0e..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/Assassins Creed Brotherhood Updated DLCs Repack Mr DJ DRM Free [VERIFIED].md	
+++ /dev/null
@@ -1,10 +0,0 @@
-<br />
-<p>Assassin's Creed II Repack BlackBox PC Free Download. If you love the Assassin's Creed games, then you are in the right place. Assassin's Creed III Repack BlackBox PC Free Download. All in one and one update. Step 4 -Complete the survey. Assassin's Creed III Ultimate. </p>
-<h2>Assassins Creed Brotherhood updated DLC's repack Mr DJ DRM Free</h2><br /><p><b><b>Download Zip</b> &#9989; <a href="https://bytlly.com/2uGiCY">https://bytlly.com/2uGiCY</a></b></p><br /><br />
-<p>Welcome to the Best Free Download Books, eBooks and Audiobooks.. /post/32bit-assassins-creed-brotherhood-updated-dlc-s-repack-mr-dj-drm-torrent-build-zip. Assassin's Creed III Brotherhood - Indigo Games. - Assassin's Creed: Unity - Paradox Interactive. Assassin's Creed III Game Download new game pc iso, Repack pc game, Crack game pc gog. </p>
-<p>Assassins Creed Legacy of the First Blade is a new DLC pack for Assassin's Creed Odyssey. Assassin's Creed. No Experience Needed Full Assassins Creed guide explained look at all aspects of how to play the game with the help of Assassin's Creed Game Walkthrough PC Game. 9 HD Wallpapers from Assassin's Creed Black Flag for PC - WallpapersNinja. Assassin's Creed: Brotherhood has moved from Xbox to the PlayStation. DLC) to get the Black Flag edition of. </p>
-<p>The Brotherhood was originally released on Xbox, PlayStation 3, and Microsoft Windows. The free DLC. Assassins' Creed 4: Black Flag.Assassin's Creed IV: Black Flag is an action-adventure stealth video game developed by Ubisoft Milan and published by Ubisoft. </p>
-<p></p>
-<p>Assassin s Creed III Free Download-All About Pirates Repack Full PC Game. Peace Broken Legacy of the First Blade DLC - Xbox and PlayStation 3, Master of Orion 2 DLC is available now on the PC via Steam. Assassin's Creed IV: Black Flag PC Game Setup Disc 4. </p> 899543212b<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/terfces0erbo/CollegeProjectV2/Dave Koz-Saxophonic Full [PORTABLE] Album Zip 13.md b/spaces/terfces0erbo/CollegeProjectV2/Dave Koz-Saxophonic Full [PORTABLE] Album Zip 13.md
deleted file mode 100644
index d4f68b2583fc5be67293c6bbeb70225b093cebc8..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/Dave Koz-Saxophonic Full [PORTABLE] Album Zip 13.md	
+++ /dev/null
@@ -1,90 +0,0 @@
-
-<h1>Dave Koz-Saxophonic Full Album Zip 13: A Smooth Jazz Masterpiece</h1>
-<p>If you are a fan of smooth jazz, you might have heard of Dave Koz, one of the most popular and talented saxophonists in the genre. He has released several albums over the years, but one of his most acclaimed works is Saxophonic, which came out in 2003. This album showcases his skills and versatility as a saxophone player, as well as his collaborations with other artists and musicians. In this article, we will review the main aspects of Dave Koz-Saxophonic Full Album Zip 13, such as its tracklist, features, benefits, and drawbacks.</p>
-<h2>What is Dave Koz-Saxophonic Full Album Zip 13?</h2>
-<p>Dave Koz-Saxophonic Full Album Zip 13 is a compressed file that contains all the songs from Dave Koz's Saxophonic album in mp3 format. You can download this file from various sources online, such as LexCliq, FileCR, or Xiaomi Community. However, you should be careful when downloading from unknown or unverified sources, as they might contain malware, viruses, or other harmful files that can damage your computer or compromise your data. You should also use a VPN service to protect your privacy and security when downloading from torrent sites.</p>
-<h2>Dave Koz-Saxophonic full album zip 13</h2><br /><p><b><b>Download Zip</b> &#9675; <a href="https://bytlly.com/2uGkDm">https://bytlly.com/2uGkDm</a></b></p><br /><br />
-<p>Once you have downloaded Dave Koz-Saxophonic Full Album Zip 13, you can unzip it and enjoy listening to the songs on your computer or mobile device. The album has 12 tracks, each with a different style and mood. The tracklist is as follows:</p>
-<ul>
-<li>Saxophonic - Saxophonic (2:19)</li>
-<li>The Other Side Of Saxophonic (2:27)</li>
-<li>I'm Be There (4:10)</li>
-<li>Dreams of Gold (5:01)</li>
-<li>Looking Up (4:29)</li>
-<li>Love Changes Everything (4:50)</li>
-<li>Cheng Fu (4:42)</li>
-<li>You've Got Me (4:19)</li>
-<li>Tenderness (5:29)</li>
-<li>Forgotten Times (4:36)</li>
-<li>Together (5:03)</li>
-<li>Love Changes Everything (lyrics) (4:50)</li>
-</ul>
-<h2>What are the Features of Dave Koz-Saxophonic Full Album Zip 13?</h2>
-<p>Dave Koz-Saxophonic Full Album Zip 13 has some features that make it a great choice for smooth jazz lovers. Some of these features are:</p>
-<ul>
-<li>It showcases Dave Koz's talent and versatility as a saxophone player. He plays different types of saxophones, such as soprano, alto, tenor, and baritone, and creates different sounds and effects with them.</li>
-<li>It features collaborations with other artists and musicians, such as Brian McKnight, Antwaun Stanley, Alvin Ailey American Theater, Evan English, and The Branston Choir. These collaborations add diversity and richness to the album.</li>
-<li>It offers a variety of styles and moods, from upbeat and funky to romantic and soothing. The album has something for everyone, whether you want to dance, relax, or feel inspired.</li>
-<li>It has high-quality sound and production. The album was recorded and mixed by some of the best engineers and producers in the industry, such as Phil Ramone, Jeff Lorber, Paul Brown, Darren Rahn, etc.</li>
-</ul>
-<h2>What are the Benefits of Dave Koz-Saxophonic Full Album Zip 13?</h2>
-<p>Dave Koz-Saxophonic Full Album Zip 13 offers many benefits for users who want to enjoy smooth jazz music. Some of these benefits are:</p>
-<ul>
-<li>It is compatible with Windows 10 / Windows 8.1 /Windows 7 operating systems.</li>
-<li>It supports multi-core processors and 64-bit systems for faster performance.</li>
-<li>It provides enhanced security features such as encryption, digital signatures, data loss prevention, information rights management, trusted documents, protected view, etc.</li>
-<li>It enables seamless integration with other Microsoft products and services such as Windows Live ID,</p>
-<h2>What are the Drawbacks of Dave Koz-Saxophonic Full Album Zip 13?</h2>
-<p>Dave Koz-Saxophonic Full Album Zip 13 is not a perfect album, and it has some drawbacks that you should be aware of before downloading it. Some of these drawbacks are:</p>
-<ul>
-<li>It is no longer supported by Dave Koz or Capitol Records. This means that you will not receive any updates, bug fixes, or feature enhancements for this album. You might also encounter compatibility issues with newer versions of Windows or other software.</li>
-<li>It might not work well with some media players or devices. Some media players or devices that were designed for newer or older versions of mp3 files might not be able to play Dave Koz-Saxophonic Full Album Zip 13 properly. You might experience errors, crashes, or reduced quality if you use them.</li>
-<li>It might not have all the songs that you like or want. Dave Koz-Saxophonic Full Album Zip 13 has 12 tracks, but Dave Koz has released many more songs and albums since then. You might miss out on some of his latest works and collaborations that might suit your taste better.</li>
-</ul>
-<h2>Conclusion</h2>
-<p>Dave Koz-Saxophonic Full Album Zip 13 is a smooth jazz masterpiece that showcases Dave Koz's talent and versatility as a saxophone player. It features collaborations with other artists and musicians, and offers a variety of styles and moods. It also has high-quality sound and production, and does not require a product key or activation for downloading. However, it also has some drawbacks that you should consider before downloading it, such as its lack of support, compatibility, and updates. If you are looking for a smooth jazz album that can satisfy your musical needs, you might want to download Dave Koz-Saxophonic Full Album Zip 13 today.</p>
-<p></p>
-
-
-Title: Dave Koz-Saxophonic Full Album Zip 13: How to Download and Enjoy this Smooth Jazz Masterpiece
-Meta description: Learn how to download Dave Koz-Saxophonic Full Album Zip 13, a smooth jazz masterpiece that showcases Dave Koz's talent and versatility as a saxophone player. Find out its features, benefits, and drawbacks, and decide if it is the right album for you.
-<h2>How to Listen to Dave Koz-Saxophonic Full Album Zip 13</h2>
-<p>After you have downloaded and unzipped Dave Koz-Saxophonic Full Album Zip 13, you can listen to it on your preferred media player or device. You can also transfer it to your mobile phone, tablet, iPod, or other portable devices. You can also burn it to a CD or DVD if you want to have a physical copy of the album. However, you should respect the copyright and intellectual property rights of Dave Koz and Capitol Records, and not share, distribute, or sell the album without their permission.</p>
-<p>When you listen to Dave Koz-Saxophonic Full Album Zip 13, you will be immersed in the smooth jazz sounds and melodies that Dave Koz creates with his saxophones. You will also appreciate the contributions of his collaborators and guests, who add their own flavors and styles to the album. You will enjoy the variety and diversity of the tracks, which range from upbeat and funky to romantic and soothing. You will also notice the high-quality sound and production of the album, which enhances its appeal and value.</p>
-<h2>What are the Reviews of Dave Koz-Saxophonic Full Album Zip 13?</h2>
-<p>Dave Koz-Saxophonic Full Album Zip 13 has received positive reviews from critics and fans alike. It has been praised for its smooth jazz quality, its saxophone excellence, its collaborations, its variety, and its sound and production. Some of the reviews are:</p>
-<ul>
-<li>Allmusic: \"Saxophonic is a solid effort that showcases Koz's skills as a musician and as a savvy pop-jazz producer.\" [1]</li>
-<li>Smooth Jazz Therapy: \"Saxophonic is an album that has something for everyone. It is a collection of songs that are both accessible and sophisticated.\" [4]</li>
-<li>Jazz Review: \"Saxophonic is a testament to Dave Koz's ability to create music that is both entertaining and meaningful.\" [5]</li>
-<li>Smooth Jazz News: \"Saxophonic is a smooth jazz masterpiece that will delight fans of Dave Koz and newcomers alike.\" [6]</li>
-<li>Amazon.com: \"Saxophonic is one of the best albums I have ever heard. Dave Koz is a genius on the saxophone, and his guests are amazing too.\" [7]</li>
-</ul>
-
-
-Title: Dave Koz-Saxophonic Full Album Zip 13: How to Download and Enjoy this Smooth Jazz Masterpiece
-Meta description: Learn how to download Dave Koz-Saxophonic Full Album Zip 13, a smooth jazz masterpiece that showcases Dave Koz's talent and versatility as a saxophone player. Find out its features, benefits, drawbacks, and reviews, and decide if it is the right album for you.
-
-
-Title: Download Dave Koz-Saxophonic Full Album Zip 13 and Experience the Smooth Jazz Magic
-Meta description: Discover how to download Dave Koz-Saxophonic Full Album Zip 13, a smooth jazz masterpiece that features Dave Koz's saxophone excellence and collaborations. Learn about its features, benefits, drawbacks, and reviews, and see if it is the right album for you.
-
-
-Title: Enjoy Smooth Jazz with Dave Koz-Saxophonic Full Album Zip 13
-Meta description: Find out how to download Dave Koz-Saxophonic Full Album Zip 13, a smooth jazz masterpiece that showcases Dave Koz's saxophone skills and collaborations. Explore its features, benefits, drawbacks, and reviews, and see if it is the right album for you.
-
-
-Title: How to Download Dave Koz-Saxophonic Full Album Zip 13 and Listen to Smooth Jazz
-Meta description: Learn how to download Dave Koz-Saxophonic Full Album Zip 13, a smooth jazz masterpiece that features Dave Koz's saxophone talent and collaborations. Discover its features, benefits, drawbacks, and reviews, and decide if it is the right album for you.
-
-
-Title: Dave Koz-Saxophonic Full Album Zip 13: The Smooth Jazz Album You Need
-Meta description: Find out how to download Dave Koz-Saxophonic Full Album Zip 13, a smooth jazz masterpiece that showcases Dave Koz's saxophone excellence and collaborations. Learn about its features, benefits, drawbacks, and reviews, and see if it is the right album for you.
-
-
-Title: Download and Enjoy Dave Koz-Saxophonic Full Album Zip 13: A Smooth Jazz Masterpiece
-Meta description: Discover how to download Dave Koz-Saxophonic Full Album Zip 13, a smooth jazz masterpiece that features Dave Koz's saxophone skills and collaborations. Explore its features, benefits, drawbacks, and reviews, and see if it is the right album for you.
-<h2>Conclusion</h2>
-<p>Dave Koz-Saxophonic Full Album Zip 13 is a smooth jazz masterpiece that showcases Dave Koz's talent and versatility as a saxophone player. It features collaborations with other artists and musicians, and offers a variety of styles and moods. It also has high-quality sound and production, and does not require a product key or activation for downloading. However, it also has some drawbacks that you should consider before downloading it, such as its lack of support, compatibility, and updates. If you are looking for a smooth jazz album that can satisfy your musical needs, you might want to download Dave Koz-Saxophonic Full Album Zip 13 today.</p> 3cee63e6c2<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/theaster/imoitari/Dockerfile b/spaces/theaster/imoitari/Dockerfile
deleted file mode 100644
index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000
--- a/spaces/theaster/imoitari/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM node:18-bullseye-slim
-RUN apt-get update && \
-    apt-get install -y git
-RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
-WORKDIR /app
-RUN npm install
-COPY Dockerfile greeting.md* .env* ./
-RUN npm run build
-EXPOSE 7860
-ENV NODE_ENV=production
-CMD [ "npm", "start" ]
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/HTRI Xchanger Suite 6.00 [FULL Version] Download User-Friendly Interface and Parallel-Processing Feature for Faster Calculations.md b/spaces/tialenAdioni/chat-gpt-api/logs/HTRI Xchanger Suite 6.00 [FULL Version] Download User-Friendly Interface and Parallel-Processing Feature for Faster Calculations.md
deleted file mode 100644
index cc8a66efe9adb90a927cfcd43b8d4fff04972f07..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/HTRI Xchanger Suite 6.00 [FULL Version] Download User-Friendly Interface and Parallel-Processing Feature for Faster Calculations.md	
+++ /dev/null
@@ -1,129 +0,0 @@
-<br />
-<h1>HTRI Xchanger Suite 6.00: A Complete Solution for Heat Transfer Equipment Design and Simulation</h1>
-<p>If you are looking for a reliable and powerful software to design, rate, and simulate heat transfer equipment, you should consider HTRI Xchanger Suite 6.00. This software is the most advanced thermal process design and simulation software available in the market. It can handle a wide variety of heat transfer equipment, such as air coolers, shell-and-tube exchangers, plate-and-frame exchangers, and more.</p>
-<h2>HTRI Xchanger Suite 6.00 [FULL Version] Download</h2><br /><p><b><b>Download</b> &#9733; <a href="https://urlcod.com/2uK8W5">https://urlcod.com/2uK8W5</a></b></p><br /><br />
-<h2>Why Choose HTRI Xchanger Suite 6.00?</h2>
-<p>HTRI Xchanger Suite 6.00 has many advantages over other software in the field of heat transfer engineering. Some of the main benefits are:</p>
-<ul>
-<li>It is based on more than 60 years of applied research and data collected on industrial-scale heat transfer equipment.</li>
-<li>It has flexible and rigorous components that allow you to specify the exchanger geometry in detail.</li>
-<li>It has a modern and intuitive user interface that lets you open multiple case files simultaneously.</li>
-<li>It has fully incremental calculation modules that compute localized heat transfer and pressure drop using the local fluid properties.</li>
-<li>It includes VMGThermo, which is an extensive and rigorous fluid physical property generator from Virtual Materials Group Inc.</li>
-<li>It has comprehensive online help that provides background information, graphs, explanation of input panels, and output reports.</li>
-</ul>
-<h2>What are the Components of HTRI Xchanger Suite 6.00?</h2>
-<p>HTRI Xchanger Suite 6.00 consists of three main components that cover different types of heat transfer equipment:</p>
-<ol>
-<li>Xace: This component deals with crossflow heat exchangers, such as air coolers and heat recovery bundles. You can design, rate, and simulate these exchangers with or without fans, enter any tubeside and outside process conditions, and access many special features.</li>
-<li>Xist: This component deals with shell-and-tube exchangers. You can design, rate, and simulate these exchangers with various geometries, such as TEMA E, F, G, H, J, K, and X shells, kettle and thermosiphon reboilers, falling film evaporators, tubeside reflux condensers, and more. You can also analyze single-phase, condensation, and boiling processes.</li>
-<li>Xphe: This component deals with compact heat exchangers, such as plate-and-frame exchangers. You can design, rate, and simulate these exchangers with incremental calculations for each plate channel using local properties and process conditions. You can also use a unique research-based port maldistribution procedure to determine the flow through each plate channel. You can also analyze single-phase, condensation, and boiling processes.</li>
-</ol>
-<h2>How to Download HTRI Xchanger Suite 6.00?</h2>
-<p>If you want to download HTRI Xchanger Suite 6.00 [FULL Version], you need to have a valid license from HTRI. You can contact them at htri@htri.net for more information on how to obtain a license. Once you have a license, you can download the software from their website or from a trusted source online.</p>
-<p>HTRI Xchanger Suite 6.00 is compatible with Microsoft Windows 8.1, Windows 10, Server 2012, Server 2016, Server 2019, or Server 2022. You also need a 32- or 64-bit processor, 1 GB RAM, up to 500 MB available hard drive space for the selected modules, and a SVGA (800 × 600 resolution) or higher graphics adapter and display.</p>
-<h2>Conclusion</h2>
-<p>HTRI Xchanger Suite 6.00 is a comprehensive solution for heat transfer equipment design and simulation. It can handle various types of exchangers with high accuracy and flexibility. It is based on extensive research and data collected on industrial-scale equipment. It has a user-friendly interface and online help that make it easy to use. It also includes a powerful fluid physical property generator that enhances its performance.</p>
-<p>If you are interested in HTRI Xchanger Suite 6.00 [FULL Version] Download , you should contact HTRI for a license and download the software from their website or a trusted source online. You will not regret using this software for your heat transfer engineering projects.</p>
-<h2>How to Install HTRI Xchanger Suite 6.00?</h2>
-<p>After you download HTRI Xchanger Suite 6.00 [FULL Version], you need to install it on your computer. The installation process is simple and straightforward. Here are the steps you need to follow:</p>
-<ol>
-<li>Run the setup file and follow the instructions on the screen.</li>
-<li>Select the components you want to install and the destination folder.</li>
-<li>Enter your license information and activate the software.</li>
-<li>Restart your computer if prompted.</li>
-<li>Launch the software and enjoy its features.</li>
-</ol>
-<h2>How to Use HTRI Xchanger Suite 6.00?</h2>
-<p>HTRI Xchanger Suite 6.00 is easy to use and has a user-friendly interface. You can start by creating a new case file or opening an existing one. You can then enter the input data for your heat transfer equipment, such as process conditions, geometry, materials, fouling factors, etc. You can also import data from other sources, such as Excel files or HTRI eLibrary.</p>
-<p>After entering the input data, you can run the calculation module for your selected component. You can choose between design, rating, or simulation modes. You can also adjust the calculation options and preferences according to your needs. The software will then compute the heat transfer and pressure drop for your equipment and display the results in various formats, such as tables, graphs, reports, etc.</p>
-<p>HTRI Xchanger Suite 6.00 full crack download<br />
-How to install HTRI Xchanger Suite 6.00 full version<br />
-HTRI Xchanger Suite 6.00 free download with license key<br />
-HTRI Xchanger Suite 6.00 software for heat exchanger design<br />
-Download HTRI Xchanger Suite 6.00 latest version for Windows<br />
-HTRI Xchanger Suite 6.00 torrent download link<br />
-HTRI Xchanger Suite 6.00 activation code generator<br />
-HTRI Xchanger Suite 6.00 user manual pdf download<br />
-HTRI Xchanger Suite 6.00 review and features<br />
-HTRI Xchanger Suite 6.00 online training course<br />
-HTRI Xchanger Suite 6.00 serial number and registration key<br />
-HTRI Xchanger Suite 6.00 patch and keygen download<br />
-HTRI Xchanger Suite 6.00 compatible with Windows 10<br />
-HTRI Xchanger Suite 6.00 system requirements and specifications<br />
-HTRI Xchanger Suite 6.00 price and discount offer<br />
-HTRI Xchanger Suite 6.00 alternative and comparison<br />
-HTRI Xchanger Suite 6.00 demo and trial version download<br />
-HTRI Xchanger Suite 6.00 technical support and customer service<br />
-HTRI Xchanger Suite 6.00 upgrade and update download<br />
-HTRI Xchanger Suite 6.00 error and troubleshooting guide<br />
-HTRI Xchanger Suite 6.00 best practices and tips<br />
-HTRI Xchanger Suite 6.00 case studies and examples<br />
-HTRI Xchanger Suite 6.00 webinar and video tutorial<br />
-HTRI Xchanger Suite 6.00 forum and community<br />
-HTRI Xchanger Suite 6.00 blog and news<br />
-HTRI Xchanger Suite 6.00 testimonials and feedback<br />
-HTRI Xchanger Suite 6.00 benefits and advantages<br />
-HTRI Xchanger Suite 6.00 drawbacks and limitations<br />
-HTRI Xchanger Suite 6.00 FAQs and answers<br />
-HTRI Xchanger Suite 6.00 cheat sheet and shortcuts<br />
-How to uninstall HTRI Xchanger Suite 6.00 from your computer<br />
-How to backup and restore HTRI Xchanger Suite 6.00 data<br />
-How to customize and optimize HTRI Xchanger Suite 6.00 settings<br />
-How to import and export data in HTRI Xchanger Suite 6.00<br />
-How to use HTRI Xchanger Suite 6.00 for different types of heat exchangers<br />
-How to solve common problems in heat exchanger design with HTRI Xchanger Suite 6.00<br />
-How to integrate HTRI Xchanger Suite 6.00 with other software tools<br />
-How to get certified in using HTRI Xchanger Suite 6.00<br />
-How to get a job as a heat exchanger designer using HTRI Xchanger Suite 6.00<br />
-How to start a business as a heat exchanger consultant using HTRI Xchanger Suite 6.00</p>
-<p>You can also view and edit the exchanger layout using the interactive drawing feature. You can zoom in and out, pan, rotate, and modify the dimensions and positions of the elements. You can also export the drawing to other formats, such as DXF or PDF.</p>
-<p>You can also perform various analyses and optimizations using HTRI Xchanger Suite 6.00. You can compare different cases, perform sensitivity studies, optimize the exchanger performance and cost, check for vibration issues, etc. You can also use the online help to get more information and guidance on how to use the software.</p>
-<h2>What are the Benefits of HTRI Xchanger Suite 6.00?</h2>
-<p>HTRI Xchanger Suite 6.00 has many benefits for heat transfer engineers and designers. Some of the main benefits are:</p>
-<ul>
-<li>It saves time and money by reducing trial-and-error and rework.</li>
-<li>It improves accuracy and reliability by using validated methods and data.</li>
-<li>It enhances creativity and innovation by allowing you to explore different scenarios and options.</li>
-<li>It increases productivity and efficiency by streamlining the workflow and providing comprehensive results.</li>
-<li>It supports collaboration and communication by allowing you to share and exchange data with other users and software.</li>
-</ul>
-<h2>What are the Challenges of HTRI Xchanger Suite 6.00?</h2>
-<p>HTRI Xchanger Suite 6.00 is a powerful and advanced software, but it also has some challenges and limitations that users should be aware of. Some of the common challenges are:</p>
-<ul>
-<li>It requires a valid license from HTRI to download and use the software. The license is not free and may have some restrictions on the number of users, installations, or cases.</li>
-<li>It requires a high level of technical knowledge and expertise to use the software effectively. Users need to understand the theory and methods behind the calculations, as well as the input data and output results.</li>
-<li>It may not cover all types of heat transfer equipment or scenarios that users may encounter in their projects. Users may need to use other software or tools to complement HTRI Xchanger Suite 6.00.</li>
-<li>It may not always provide accurate or reliable results for complex or non-standard cases. Users need to verify and validate the results using other sources or methods.</li>
-<li>It may not always be compatible or integrated with other software or systems that users may use in their projects. Users may need to convert or transfer data between different formats or platforms.</li>
-</ul>
-<h2>How to Get Help and Support for HTRI Xchanger Suite 6.00?</h2>
-<p>If you encounter any problems or difficulties while using HTRI Xchanger Suite 6.00, you can get help and support from various sources. Some of the main sources are:</p>
-<ul>
-<li>The online help that is included in the software. You can access it by pressing F1 or clicking on the Help menu. The online help provides background information, graphs, explanation of input panels and output reports, and troubleshooting tips.</li>
-<li>The HTRI website that provides various resources and information for users. You can visit https://www.htri.net/ to access eLibrary, Service Packs, HTRI Design Manual, training courses, webinars, events, publications, FAQs, and more.</li>
-<li>The HTRI support team that provides technical assistance and guidance for users. You can contact them by email at support@htri.net or by phone at +1 (979) 690-5050.</li>
-<li>The HTRI community that consists of other users and experts who can share their knowledge and experience with you. You can join the HTRI LinkedIn group at https://www.linkedin.com/groups/1790375/ to network and interact with other members.</li>
-</ul>
-<h2>What are the Reviews of HTRI Xchanger Suite 6.00?</h2>
-<p>HTRI Xchanger Suite 6.00 has received positive reviews from many users and experts who have used it for their heat transfer engineering projects. Some of the common praises are:</p>
-<ul>
-<li>It is a very comprehensive and versatile software that can handle almost any type of heat transfer equipment and scenario.</li>
-<li>It is a very accurate and reliable software that uses validated methods and data from extensive research and testing.</li>
-<li>It is a very user-friendly and intuitive software that has a clear and modern interface and online help.</li>
-<li>It is a very fast and efficient software that can perform complex calculations and simulations in a short time.</li>
-<li>It is a very flexible and customizable software that allows users to adjust the input data, calculation options, and output results according to their needs.</li>
-</ul>
-<p>However, HTRI Xchanger Suite 6.00 also has some drawbacks and criticisms that users should be aware of. Some of the common complaints are:</p>
-<ul>
-<li>It is a very expensive and restrictive software that requires a valid license from HTRI to download and use it.</li>
-<li>It is a very technical and complex software that requires a high level of knowledge and expertise to use it effectively.</li>
-<li>It is a very specific and specialized software that may not cover all types of heat transfer equipment or scenarios that users may encounter in their projects.</li>
-<li>It is a very sensitive and dependent software that may not always provide accurate or reliable results for non-standard or complex cases.</li>
-<li>It is a very isolated and independent software that may not always be compatible or integrated with other software or systems that users may use in their projects.</li>
-</ul>
-<h2>Conclusion</h2>
-<p>HTRI Xchanger Suite 6.00 is a powerful and advanced software that can design, rate, and simulate a variety of heat transfer equipment. It is based on extensive research and data collected on industrial-scale equipment. It has a user-friendly interface and online help that make it easy to use. It also includes a powerful fluid physical property generator that enhances its performance.</p>
-<p>If you are interested in HTRI Xchanger Suite 6.00 [FULL Version] Download, you should contact HTRI for a license and download the software from their website or a trusted source online. You will not regret using this software for your heat transfer engineering projects.</p> 679dcb208e<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Kaabil Movies Full Hd 1080p.md b/spaces/tialenAdioni/chat-gpt-api/logs/Kaabil Movies Full Hd 1080p.md
deleted file mode 100644
index c419b6b4e3c5f2b6256ff51417a0ea2a49901e18..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Kaabil Movies Full Hd 1080p.md	
+++ /dev/null
@@ -1,24 +0,0 @@
-<br />
-Here is a possible title and article for the keyword "Kaabil Movies Full Hd 1080p". Please note that this is a creative content generated by me and not a factual or verified information. You should always check the accuracy and quality of the content before using it.
-
-<h1>Kaabil Movies Full Hd 1080p: How to Watch the Bollywood Thriller Online</h1>
-<p>Kaabil is a 2017 Bollywood thriller film starring Hrithik Roshan and Yami Gautam as a visually impaired couple who seek revenge against their enemies. The film was praised for its gripping story, powerful performances, and stunning visuals. If you are looking for a way to watch Kaabil movies full hd 1080p online, here are some options you can try.</p>
-<h2>Kaabil Movies Full Hd 1080p</h2><br /><p><b><b>Download Zip</b> &#128504;&#128504;&#128504; <a href="https://urlcod.com/2uK87I">https://urlcod.com/2uK87I</a></b></p><br /><br />
-<ul>
-<li><b>Amazon Prime Video:</b> Amazon Prime Video is one of the most popular streaming platforms in the world, offering a wide range of movies and shows in various languages and genres. You can watch Kaabil movies full hd 1080p on Amazon Prime Video with a subscription fee of $12.99 per month or $119 per year. You can also get a 30-day free trial if you are a new user.</li>
-<li><b>Netflix:</b> Netflix is another global leader in online entertainment, with millions of subscribers and original content. You can watch Kaabil movies full hd 1080p on Netflix with a subscription fee of $8.99 to $17.99 per month, depending on the plan you choose. You can also get a free trial for a limited time if you are eligible.</li>
-<li><b>Hotstar:</b> Hotstar is a streaming service that specializes in Indian content, including movies, shows, sports, and news. You can watch Kaabil movies full hd 1080p on Hotstar with a subscription fee of $9.99 per month or $74.99 per year. You can also get a 7-day free trial if you sign up with your credit card.</li>
-</ul>
-<p>These are some of the best ways to watch Kaabil movies full hd 1080p online. However, you should always be careful of illegal or pirated websites that may offer low-quality or infected files. Always use trusted and legal sources to enjoy your favorite movies in high definition.</p>Here is a possible continuation of the article for the keyword "Kaabil Movies Full Hd 1080p".
-
-<p>Kaabil movies full hd 1080p are not only a treat for the eyes, but also for the ears. The film has a captivating soundtrack composed by Rajesh Roshan, the brother of the director Sanjay Gupta. The songs are sung by some of the best singers in the industry, such as Jubin Nautiyal, Palak Muchhal, and Vishal Dadlani. The lyrics are written by Nasir Faraaz and Manoj Muntashir. Some of the popular songs from Kaabil movies full hd 1080p are:</p>
-<ol>
-<li><b>Kaabil Hoon:</b> This is the title track of the film, which expresses the love and determination of the protagonists. It is a romantic and motivational song that will make you feel optimistic and hopeful.</li>
-<li><b>Haseeno Ka Deewana:</b> This is a remake of the classic song from the 1980 film Yaarana, which featured Amitabh Bachchan and Neetu Singh. It is a peppy and energetic song that will make you want to dance along.</li>
-<li><b>Kuch Din:</b> This is a soft and melodious song that depicts the happy moments of the couple's life. It is a soothing and relaxing song that will touch your heart.</li>
-<li><b>Mon Amour:</b> This is a French term that means "my love". It is a lively and upbeat song that celebrates the joy of love and life. It is a fun and catchy song that will make you smile.</li>
-</ol>
-<p>These are some of the songs from Kaabil movies full hd 1080p that you can enjoy while watching the film online. You can also download or stream them from various platforms such as YouTube, Spotify, Gaana, or JioSaavn. Kaabil movies full hd 1080p are a must-watch for anyone who loves thrillers, dramas, and romance.</p>
-<p></p> 7196e7f11a<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Cara Download RPP Tematik Kelas 1 Semester 1 K13 Revisi Terbaru Lengkap dengan Silabus dan Penilaian.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Cara Download RPP Tematik Kelas 1 Semester 1 K13 Revisi Terbaru Lengkap dengan Silabus dan Penilaian.md
deleted file mode 100644
index b25ff318ea312f6c541174db384a6b99c24146ea..0000000000000000000000000000000000000000
--- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Cara Download RPP Tematik Kelas 1 Semester 1 K13 Revisi Terbaru Lengkap dengan Silabus dan Penilaian.md	
+++ /dev/null
@@ -1,181 +0,0 @@
-
-<h1>Download RPP Tematik Kelas 1 Semester 1: A Guide for Teachers</h1>
-<p>If you are a teacher of grade 1 students in Indonesia, you might be wondering how to download RPP Tematik Kelas 1 Semester 1. RPP Tematik is a curriculum-based lesson plan that integrates various subjects and themes in one learning unit. It is designed to help students develop their competencies, values, and character through meaningful and contextual learning activities. In this article, we will explain what RPP Tematik is, why it is important, how to download it, and how to use it effectively.</p>
- <h2>What is RPP Tematik and why is it important?</h2>
-<h3>The definition and purpose of RPP Tematik</h3>
-<p>RPP Tematik stands for Rencana Pelaksanaan Pembelajaran Tematik, which means thematic lesson plan. It is a document that contains the objectives, materials, methods, activities, and assessments of a learning unit that covers a specific theme. For example, in grade 1 semester 1, there are eight themes that students need to learn, such as Diriku (Myself), Kegemaranku (My Hobby), Kegiatanku (My Activity), Keluargaku (My Family), Pengalamanku (My Experience), Lingkungan Bersih Sehat dan Asri (Clean Healthy and Beautiful Environment), Benda Hewan dan Tanaman di Sekitarku (Objects Animals and Plants Around Me), and Peristiwa Alam (Natural Events).</p>
-<h2>download rpp tematik kelas 1 semester 1</h2><br /><p><b><b>Download</b> &#10022;&#10022;&#10022; <a href="https://bltlly.com/2uOnWo">https://bltlly.com/2uOnWo</a></b></p><br /><br />
-<p>The purpose of RPP Tematik is to provide a comprehensive and holistic learning experience for students. By integrating various subjects and themes, students can learn about themselves, their surroundings, their culture, their nation, and their world in a more meaningful and relevant way. They can also develop their cognitive, affective, and psychomotor skills as well as their values and character through various learning activities.</p>
- <h3>The benefits of using RPP Tematik for teaching and learning</h3>
-<p>There are many benefits of using RPP Tematik for teaching and learning, such as:</p>
-<ul>
-<li>It saves time and energy for teachers as they do not need to prepare separate lesson plans for each subject.</li>
-<li>It helps teachers to align their teaching with the curriculum standards and objectives.</li>
-<li>It fosters students' interest and motivation as they learn about topics that are relevant to their lives.</li>
-<li>It enhances students' creativity and critical thinking as they explore various aspects of a theme.</li>
-<li>It promotes students' collaboration and communication as they work together in groups or pairs.</li>
-<li>It supports students' differentiation and individualization as they can choose their own learning styles and preferences.</li>
-</ul>
- <h2>How to download RPP Tematik Kelas 1 Semester 1?</h2>
-<h3>The sources and links for downloading RPP Tematik Kelas 1 Semester 1</h3>
-<p>There are many sources and links for downloading RPP Tematik Kelas 1 Semester 1 online. Some of them are:</p>
-<table>
-<tr><th>Source</th><th>Link</th></tr>
-<tr><td>Guru Berbagi</td><td>[Guru Berbagi](^8^)</td></tr>
-<tr><td>Website Edukasi</ <td>Website Edukasi</td><td>[Website Edukasi]</td></tr>
-<tr><td>Ilmu Pengetahuan</td><td>[Ilmu Pengetahuan]</td></tr>
-<tr><td>Media Belajar</td><td>[Media Belajar]</td></tr>
-<tr><td>RPP Kurikulum 2023</td><td>[RPP Kurikulum 2023]</td></tr>
-</table>
-<p>These sources and links provide free and easy access to download RPP Tematik Kelas 1 Semester 1 in PDF or Word format. You can choose the one that suits your needs and preferences.</p>
- <h3>The steps and tips for downloading RPP Tematik Kelas 1 Semester 1</h3>
-<p>The steps and tips for downloading RPP Tematik Kelas 1 Semester 1 are as follows:</p>
-<p>download rpp tematik kelas 1 semester 1 kurikulum 2013 revisi terbaru<br />
-download rpp tematik kelas 1 semester 1 format satu lembar<br />
-download rpp tematik kelas 1 semester 1 lengkap semua tema<br />
-download rpp tematik kelas 1 semester 1 edisi revisi 2020<br />
-download rpp tematik kelas 1 semester 1 model merdeka belajar<br />
-download rpp tematik kelas 1 semester 1 tema diriku<br />
-download rpp tematik kelas 1 semester 1 tema kegemaranku<br />
-download rpp tematik kelas 1 semester 1 tema kegiatan ku<br />
-download rpp tematik kelas 1 semester 1 tema keluarga ku<br />
-download rpp tematik kelas 1 semester 1 gratis<br />
-download rpp tematik kelas 1 semester 1 pdf<br />
-download rpp tematik kelas 1 semester 1 word<br />
-download rpp tematik kelas 1 semester 1 terbaru dan terlengkap<br />
-download rpp tematik kelas 1 semester 1 dilengkapi penilaian<br />
-download rpp tematik kelas 1 semester 1 sesuai surat edaran kemendikbud<br />
-download rpp tematik kelas 1 semester 1 berdasarkan silabus<br />
-download rpp tematik kelas 1 semester 1 dengan kompetensi inti dan dasar<br />
-download rpp tematik kelas 1 semester 1 mudah dan praktis<br />
-download rpp tematik kelas 1 semester 1 website edukasi<br />
-download rpp tematik kelas 1 semester 1 kependidikan.com<br />
-download rpp tematik kelas 1 semester 2 kurikulum 2013 revisi terbaru<br />
-download rpp tematik kelas 1 semester 2 format satu lembar<br />
-download rpp tematik kelas 1 semester 2 lengkap semua tema<br />
-download rpp tematik kelas 1 semester 2 edisi revisi 2020<br />
-download rpp tematik kelas 1 semester 2 model merdeka belajar<br />
-download rpp tematik kelas 1 semester 2 tema pengalaman ku<br />
-download rpp tematik kelas 1 semester 2 tema lingkungan bersih sehat dan asri<br />
-download rpp tematik kelas 1 semester 2 tema benda hewan dan tanaman di sekitarku<br />
-download rpp tematik kelas 1 semester 2 tema peristiwa alam<br />
-download rpp tematik kelas 1 semester 2 gratis<br />
-download rpp tematik kelas 1 semester 2 pdf<br />
-download rpp tematik kelas 1 semester 2 word<br />
-download rpp tematik kelas 1 semester 2 terbaru dan terlengkap<br />
-download rpp tematik kelas 1 semester 2 dilengkapi penilaian<br />
-download rpp tematik kelas 1 semester 2 sesuai surat edaran kemendikbud<br />
-download rpp tematik kelas 1 semester 2 berdasarkan silabus<br />
-download rpp tematik kelas 1 semester 2 dengan kompetensi inti dan dasar<br />
-download rpp tematik kelas 1 semester</p>
-<ol>
-<li>Click on the source and link that you want to use.</li>
-<li>Find the theme that you want to download. For example, if you want to download RPP Tematik Kelas 1 Semester 1 for the theme of Diriku, you can look for the title "RPP Tematik Kelas 1 Semester 1 Tema Diriku".</li>
-<li>Click on the download button or link. You may need to fill in some information or complete some tasks before you can download the file.</li>
-<li>Save the file to your computer or device. You can rename the file if you want.</li>
-<li>Open the file and check the content. You can print it out or use it digitally.</li>
-</ol>
-<p>Some tips for downloading RPP Tematik Kelas 1 Semester 1 are:</p>
-<ul>
-<li>Make sure you have a stable internet connection and enough storage space on your computer or device.</li>
-<li>Use a reliable and secure source and link. Avoid clicking on suspicious or malicious ads or pop-ups.</li>
-<li>Compare different sources and links to find the best quality and updated version of RPP Tematik Kelas 1 Semester 1.</li>
-<li>Customize and modify the RPP Tematik Kelas 1 Semester 1 according to your own teaching style and students' needs. You can add, delete, or change some elements of the lesson plan to make it more suitable for your classroom situation.</li>
-</ul>
- <h2>How to use RPP Tematik Kelas 1 Semester 1 effectively?</h2>
-<h3>The components and structure of RPP Tematik Kelas 1 Semester 1</h3>
-<p>RPP Tematik Kelas 1 Semester 1 consists of several components and has a specific structure. The components are:</p>
-<ul>
-<li>The theme: The main topic that integrates various subjects and aspects of learning.</li>
-<li>The sub-theme: The sub-topic that narrows down the scope of the theme.</li>
-<li>The learning objective: The expected outcome that students should achieve after completing the learning unit.</li>
-<li>The basic competence: The standard that describes the level of students' mastery of the learning objective.</li>
-<li>The indicator: The measurable and observable evidence that shows students' achievement of the basic competence.</li>
-<li>The material: The content that covers the theme, sub-theme, learning objective, basic competence, and indicator.</li>
-<li>The method: The approach that guides the teaching and learning process.</li>
-<li>The activity: The task that engages students in learning the material.</li>
-<li>The assessment: The tool that measures students' performance and progress.</li>
-</ul>
- <p>The structure of RPP Tematik Kelas 1 Semester 1 is:</p>
- <table>
-<tr><th>No</th><th>Component</th><th>Description</th></tr>
-<tr><td>1</td><td>Theme</td><td>The name of the theme that covers a specific learning unit.</td></tr>
-<tr><td>2</td><td>Sub-theme</td><td>The name of the sub-theme that narrows down the scope of the theme.</td></tr>
-<tr><td>3</td><td>Learning objective</td><td>The statement that describes what students should be able to do after completing the learning unit.</td></tr>
-<tr><td>4</td><td>Basic competence</td><td>The statement that describes the level of students' mastery of the learning objective based on the curriculum standard.</td></tr>
-<tr><td>5</td><td>Indicator</td><td>The statement that describes the measurable and observable evidence that shows students' achievement of the basic competence.</td></tr>
-<tr><td>6</td><td>Material</td><
-<td <td>The content that covers the theme, sub-theme, learning objective, basic competence, and indicator.</td></tr>
-<tr><td>7</td><td>Method</td><td>The approach that guides the teaching and learning process.</td></tr>
-<tr><td>8</td><td>Activity</td><td>The task that engages students in learning the material.</td></tr>
-<tr><td>9</td><td>Assessment</td><td>The tool that measures students' performance and progress.</td></tr>
-</table>
- <h3>The examples and strategies for implementing RPP Tematik Kelas 1 Semester 1</h3>
-<p>To give you a better idea of how to use RPP Tematik Kelas 1 Semester 1 effectively, here are some examples and strategies for implementing it in your classroom:</p>
-<ul>
-<li>Example 1: RPP Tematik Kelas 1 Semester 1 Tema Diriku Subtema Tubuhku (Theme: Myself Sub-theme: My Body)   <ul>
-  <li>The learning objective is to identify and name the parts of the body and their functions.</li>
-  <li>The basic competence is to express the parts of the body and their functions orally and in writing.</li>
-  <li>The indicator is to mention the parts of the body and their functions correctly.</li>
-  <li>The material is the names and functions of the parts of the body, such as head, eyes, ears, nose, mouth, hands, feet, etc.</li>
-  <li>The method is inquiry-based learning, which involves asking questions, exploring answers, and discovering new knowledge.</li>
-  <li>The activity is to sing a song about the parts of the body, to draw and label the parts of the body, to play a game of Simon Says with the parts of the body, and to write a paragraph about the parts of the body and their functions.</li>
-  <li>The assessment is to observe and record students' participation and performance in the activity, to give feedback and reinforcement, and to administer a quiz on the parts of the body and their functions.</li>
-  </ul>
-</li>
-<li>Example 2: RPP Tematik Kelas 1 Semester 1 Tema Lingkungan Bersih Sehat dan Asri Subtema Sampah (Theme: Clean Healthy and Beautiful Environment Sub-theme: Trash)   <ul>
-  <li>The learning objective is to understand and practice how to manage trash properly.</li>
-  <li>The basic competence is to demonstrate how to sort, reduce, reuse, recycle, and dispose trash properly.</li>
-  <li>The indicator is to show how to sort, reduce, reuse, recycle, and dispose trash properly in daily life.</li>
-  <li>The material is the types and sources of trash, such as organic, inorganic, hazardous, etc., and the ways to manage trash properly, such as sorting, reducing, reusing, recycling, and disposing.</li>
-  <li>The method is project-based learning, which involves planning, doing, presenting, and reflecting on a real-world problem or challenge.</li>
-  <li>The activity is to conduct a survey on the trash situation in the school or neighborhood, to make a poster or presentation on how to manage trash properly, to create a product or artwork from recycled materials, and to participate in a trash cleanup campaign or competition.</li>
-  <li>The assessment is to evaluate students' understanding and skills in managing trash properly through their poster or presentation, product or artwork, and participation in the campaign or competition.</li>
-  </ul>
-</li>
-</ul>
- <h2>Conclusion</h2>
-<p>RPP Tematik Kelas 1 Semester 1 is a curriculum-based lesson plan that integrates various subjects and themes in one learning unit. It is designed to help students develop their competencies, values, and character through meaningful and contextual learning activities. In this article, we have explained what RPP Tematik is, why it is important, how to download it, and how to use it effectively. We hope that this article has been helpful for you as a teacher of grade 1 students in Indonesia. If you have any questions or feedbacks about RPP Tematik Kelas 1 Semester 1, please feel free to contact us or leave a comment below. Thank you for reading!</p>
- <h2>FAQs</h2>
-<h3>What are the sources of RPP Tematik Kelas 1 Semester 1?</h3>
-<p>RPP Tematik Kelas 1 Semester 1 are based on the curriculum standards issued by the Ministry of Education and Culture of Indonesia. They are also influenced by various educational theories and approaches that support thematic learning.</p>
- <h3>How many themes are there in RPP Tematik Kelas 1 Semester 1?</h3> <p>There are eight themes in RPP Tematik Kelas 1 Semester 1, namely:</p>
-<ul>
-<li>Diriku (Myself)</li>
-<li>Kegemaranku (My Hobby)</li>
-<li>Kegiatanku (My Activity)</li>
-<li>Keluargaku (My Family)</li>
-<li>Pengalamanku (My Experience)</li>
-<li>Lingkungan Bersih Sehat dan Asri (Clean Healthy and Beautiful Environment)</li>
-<li>Benda Hewan dan Tanaman di Sekitarku (Objects Animals and Plants Around Me)</li>
-<li>Peristiwa Alam (Natural Events)</li>
-</ul>
- <h3>How long does it take to complete one theme of RPP Tematik Kelas 1 Semester 1?</h3>
-<p>It depends on the number of sub-themes, the complexity of the material, and the pace of the students. However, generally, it takes about two to four weeks to complete one theme of RPP Tematik Kelas 1 Semester 1.</p>
- <h3>How can I make RPP Tematik Kelas 1 Semester 1 more fun and engaging for my students?</h3>
-<p>There are many ways to make RPP Tematik Kelas 1 Semester 1 more fun and engaging for your students, such as:</p>
-<ul>
-<li>Using various media and resources, such as pictures, videos, songs, stories, games, etc.</li>
-<li>Involving students in choosing and designing their own learning activities and products.</li>
-<li>Connecting the themes and materials to students' prior knowledge and experiences.</li>
-<li>Encouraging students to express their opinions and feelings about the themes and materials.</li>
-<li>Providing feedback and praise to students for their efforts and achievements.</li>
-</ul>
- <h3>What are the challenges or difficulties of using RPP Tematik Kelas 1 Semester 1?</h3>
-<p>Some of the challenges or difficulties of using RPP Tematik Kelas 1 Semester 1 are:</p>
-<ul>
-<li>Finding and accessing reliable and updated sources and links for downloading RPP Tematik Kelas 1 Semester 1.</li>
-<li>Adapting and modifying RPP Tematik Kelas 1 Semester 1 to suit your own teaching style and students' needs.</li>
-<li>Managing the time and resources for implementing RPP Tematik Kelas 1 Semester 1 effectively.</li>
-<li>Assessing students' learning outcomes and progress in a holistic and authentic way.</li>
-</ul>
- <h3>Where can I get more information or support about RPP Tematik Kelas 1 Semester 1?</h3>
-<p>You can get more information or support about RPP Tematik Kelas 1 Semester 1 from various sources, such as:</p>
-<ul>
-<li>The Ministry of Education and Culture of Indonesia website: [Kemdikbud]</li>
-<li>The National Education Standards Agency website: [BSNP]</li>
-<li>The online teacher community forums or groups: [Guru Berbagi], [Guru Pembelajar], [Guru Indonesia], etc.</li>
-<li>The local education office or school supervisor: [Dinas Pendidikan], [Kepala Sekolah], etc.</li>
-</ul></p> 197e85843d<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/Rustangelo-PRO-Advanced-Download-License-UPD.md b/spaces/tioseFevbu/cartoon-converter/Rustangelo-PRO-Advanced-Download-License-UPD.md
deleted file mode 100644
index 69a1b6299ec3c4289aaa31fdd771db772f8ec431..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/Rustangelo-PRO-Advanced-Download-License-UPD.md
+++ /dev/null
@@ -1,75 +0,0 @@
-## Rustangelo PRO (Advanced) Download] [License]
-
-
-
-**Download File ✏ [https://ditzcosupo.blogspot.com/?d=2tx0nn](https://ditzcosupo.blogspot.com/?d=2tx0nn)**
-
-
-
-# How to Download Rustangelo PRO (Advanced) and Get a License Key
-
- 
-
-Rustangelo PRO (Advanced) is a software that allows you to create amazing paintings in Rust, a popular survival game. With Rustangelo PRO (Advanced), you can customize your paintings with advanced features such as layers, brushes, filters, and more. You can also save and load your paintings, share them with other players, and export them as images or videos.
-
- 
-
-If you want to download Rustangelo PRO (Advanced) and get a license key, you need to follow these steps:
-
- 
-
-1. Visit the official website of Rustangelo at [https://rustangelo.com/](https://rustangelo.com/) and click on the "Buy Now" button.
-
-2. Select the "Rustangelo PRO (Advanced)" option and click on the "Checkout" button.
-
-3. Enter your email address and choose your preferred payment method. You can pay with PayPal, credit card, or cryptocurrency.
-
-4. Complete the payment and check your email for the confirmation and the download link.
-
-5. Download and install Rustangelo PRO (Advanced) on your computer.
-
-6. Launch Rustangelo PRO (Advanced) and enter your email address and the license key that was sent to you.
-
-7. Enjoy creating stunning paintings in Rust with Rustangelo PRO (Advanced)!
-
-
-
-Rustangelo PRO (Advanced) is the best way to express your creativity and impress your friends in Rust. Don't miss this opportunity to get it at a discounted price. Download Rustangelo PRO (Advanced) today and get a license key for lifetime access!
-
-  
-
-If you are wondering what you can do with Rustangelo PRO (Advanced), here are some examples of the amazing paintings you can create:
-
-
-
-- A realistic portrait of your favorite Rust streamer or YouTuber.
-
-- A beautiful landscape of your base location or a nearby monument.
-
-- A funny meme or a message to troll your enemies or allies.
-
-- A logo or a banner for your clan or server.
-
-- A tribute or a homage to your favorite movie, game, or show.
-
-
-
-The possibilities are endless with Rustangelo PRO (Advanced). You can use any image as a source, or draw your own from scratch. You can also adjust the size, position, rotation, and color of your painting. You can even animate your painting with the video export feature.
-
- 
-
-Rustangelo PRO (Advanced) is compatible with any Rust server and any resolution. It works seamlessly with the game and does not interfere with your gameplay. You can also use it offline to practice your painting skills or to preview your creations.
-
- 
-
-Rustangelo PRO (Advanced) is the ultimate tool for Rust players who love art and creativity. It is easy to use, fast, and reliable. It is also safe and secure, as it does not contain any viruses, malware, or spyware. It is also regularly updated with new features and improvements.
-
- 
-
-Don't wait any longer and get Rustangelo PRO (Advanced) now. You will not regret it. Rustangelo PRO (Advanced) will make your Rust experience more fun and enjoyable. It will also make you stand out from the crowd and earn the respect and admiration of other players.
-
- 
-
-Download Rustangelo PRO (Advanced) today and get a license key for lifetime access. You will also get free updates and support. Rustangelo PRO (Advanced) is the best investment you can make for your Rust game. Trust us, you will love it!
-
- 1b8d091108
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/1001formasdemotivaralosempleadospdf VERIFIED.md b/spaces/tioseFevbu/cartoon-converter/scripts/1001formasdemotivaralosempleadospdf VERIFIED.md
deleted file mode 100644
index 36b779ccef8cb9b771fcd55941af5d002dcf3afd..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/1001formasdemotivaralosempleadospdf VERIFIED.md	
+++ /dev/null
@@ -1,14 +0,0 @@
-<br />
-<h1>1001 Formas de Motivar a los Empleados: Un libro útil para mejorar el clima laboral</h1>
-<p>¿Te gustaría saber cómo motivar a tus empleados de forma efectiva y sencilla? ¿Quieres aprender las mejores prácticas para reconocer el esfuerzo, la creatividad y el compromiso de tu equipo? ¿Buscas un libro que te ofrezca ideas prácticas y fáciles de implementar en tu organización?</p>
-<h2>1001formasdemotivaralosempleadospdf</h2><br /><p><b><b>Download Zip</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://urlcod.com/2uHwij">https://urlcod.com/2uHwij</a></b></p><br /><br />
-<p>Si la respuesta es sí, entonces este libro es para ti. 1001 Formas de Motivar a los Empleados es un libro escrito por Bob Nelson, un experto en motivación y gestión de recursos humanos, que te ofrece una gran variedad de estrategias para incentivar a tus colaboradores y mejorar el ambiente de trabajo.</p>
-<p>En este libro encontrarás ejemplos reales de empresas que han logrado motivar a sus empleados con acciones simples pero efectivas, como elogiar el trabajo bien hecho, celebrar los logros, dar feedback constructivo, ofrecer oportunidades de desarrollo, involucrar a los empleados en las decisiones, crear un sentido de pertenencia y mucho más.</p>
-<p>Además, el libro te ofrece un cuestionario para evaluar el nivel de motivación de tu equipo, así como consejos para adaptar las estrategias a tu contexto y cultura organizacional. También te enseña cómo evitar los errores más comunes que pueden desmotivar a los empleados, como la falta de comunicación, la microgestión, la inequidad o la indiferencia.</p>
-<p>1001 Formas de Motivar a los Empleados es un libro que te ayudará a crear un clima laboral positivo, donde tus empleados se sientan valorados, respetados y comprometidos con su trabajo. Así podrás mejorar el rendimiento, la productividad y la satisfacción de tu equipo, y lograr los objetivos de tu organización.</p>
-<p></p>
-<p>Si quieres saber más sobre este libro, puedes visitar el siguiente enlace: <a href="https://books.google.com/books/about/1001_Formas_de_Motivar_a_los_Empleados.html?id=6ZB5tcDQvhEC">1001 Formas de Motivar a los Empleados - Google Books</a></p><p>En el libro 1001 Formas de Motivar a los Empleados también encontrarás testimonios de líderes y empleados que han aplicado las estrategias propuestas por el autor y han obtenido resultados positivos. Así podrás inspirarte en sus experiencias y aprender de sus consejos.</p>
-<p>Además, el libro te ofrece un índice temático para que puedas consultar las formas de motivar que más te interesen o que se adapten mejor a tu situación. Por ejemplo, si quieres motivar a tus empleados a través de la compensación, puedes consultar el capítulo 4, donde encontrarás 101 formas de recompensar a tus empleados sin dinero. O si quieres motivar a tus empleados a través de la participación, puedes consultar el capítulo 7, donde encontrarás 101 formas de involucrar a tus empleados en el proceso de mejora continua.</p>
-<p>1001 Formas de Motivar a los Empleados es un libro que te ofrece una gran cantidad de recursos e ideas para mejorar la motivación de tu equipo. No importa si eres un gerente, un supervisor, un líder o un empleado, este libro te ayudará a crear un ambiente de trabajo más positivo, creativo y productivo.</p> cec2833e83<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/EkahauSiteSurveyWithCrack.md b/spaces/tioseFevbu/cartoon-converter/scripts/EkahauSiteSurveyWithCrack.md
deleted file mode 100644
index 747911ea1e7b31a2975bdf32ff956db4d5bd7633..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/EkahauSiteSurveyWithCrack.md
+++ /dev/null
@@ -1,37 +0,0 @@
-<br />
-<h1>How to Use Ekahau Site Survey With Crack for Wi-Fi Planning and Analysis</h1>
-<p>Ekahau Site Survey is a professional software tool that helps you design, optimize, and troubleshoot wireless networks. It allows you to perform site surveys, spectrum analysis, coverage mapping, capacity planning, and more. But what if you don't have a license for Ekahau Site Survey? Is there a way to use it with a crack?</p>
-<h2>EkahauSiteSurveyWithCrack</h2><br /><p><b><b>Download File</b> &#9658;&#9658;&#9658; <a href="https://urlcod.com/2uHvmA">https://urlcod.com/2uHvmA</a></b></p><br /><br />
-<p>In this article, we will show you how to download and install Ekahau Site Survey with crack, and how to use it for your Wi-Fi projects. However, we also want to warn you about the risks and disadvantages of using cracked software, and why you should consider buying a legitimate license instead.</p>
-<h2>How to Download and Install Ekahau Site Survey With Crack</h2>
-<p>There are many websites that claim to offer Ekahau Site Survey with crack, but most of them are either fake, malicious, or outdated. We do not recommend downloading or installing any software from untrusted sources, as they may contain viruses, malware, spyware, or ransomware that can harm your computer or steal your data.</p>
-<p>However, if you still want to try Ekahau Site Survey with crack, here are the steps you need to follow:</p>
-<ol>
-<li>Download Ekahau HeatMapper 1.1.4 for Windows from FileHippo[^1^]. This is a free version of Ekahau Site Survey that has limited features and functionality.</li>
-<li>Download the crack file from a website that claims to have it. We will not provide any links here, as we do not endorse or support piracy. You will have to search for it yourself.</li>
-<li>Extract the crack file and copy it to the installation folder of Ekahau HeatMapper. Usually, this is C:\Program Files (x86)\Ekahau\Ekahau HeatMapper.</li>
-<li>Run the crack file as administrator and follow the instructions. This should patch the Ekahau HeatMapper executable file and make it work as Ekahau Site Survey.</li>
-<li>Launch Ekahau Site Survey and enjoy its features.</li>
-</ol>
-<h2>How to Use Ekahau Site Survey With Crack for Wi-Fi Planning and Analysis</h2>
-<p>Once you have installed Ekahau Site Survey with crack, you can use it for various Wi-Fi tasks. Here are some of the things you can do with it:</p>
-<ul>
-<li>Create a floor plan of your site or import one from a file.</li>
-<li>Add walls, doors, windows, and other obstacles that affect Wi-Fi propagation.</li>
-<li>Add access points (APs) and antennas to your floor plan and configure their settings.</li>
-<li>Perform an active or passive site survey using a Wi-Fi adapter or an Ekahau Sidekick 2 device[^2^]. This will measure the signal strength, noise level, interference, channel utilization, and other parameters of your wireless network.</li>
-<li>View the survey results on a heatmap that shows the coverage, quality, capacity, and performance of your Wi-Fi network.</li>
-<li>Analyze the survey data and identify any issues or problems with your Wi-Fi network.</li>
-<li>Optimize your Wi-Fi network by adjusting the AP locations, orientations, channels, power levels, and other settings.</li>
-<li>Generate reports and documentation for your Wi-Fi project.</li>
-</ul>
-<h2>The Risks and Disadvantages of Using Ekahau Site Survey With Crack</h2>
-<p>While using Ekahau Site Survey with crack may seem tempting, it also comes with many risks and disadvantages that you should be aware of. Here are some of them:</p>
-<p></p>
-<ul>
-<li>You may violate the intellectual property rights of Ekahau and face legal consequences. Piracy is a crime that can result in fines or imprisonment.</li>
-<li>You may expose your computer and data to security threats. Cracked software often contains malicious code that can infect your system or steal your information.</li>
-<li>You may compromise the quality and accuracy of your Wi-Fi project. Cracked software may not work properly or have bugs that can affect your results.</li>
-<li>You may miss out on updates and support from Ekahau. Cracked software cannot be updated or patched by the developer. You will also not be able</p> cec2833e83<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Griechischer Wein Noten Pdf Download !!INSTALL!!.md b/spaces/tioseFevbu/cartoon-converter/scripts/Griechischer Wein Noten Pdf Download !!INSTALL!!.md
deleted file mode 100644
index d6f91569f734da497ece3c7cb5ae39b7a4d71b96..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Griechischer Wein Noten Pdf Download !!INSTALL!!.md	
+++ /dev/null
@@ -1,38 +0,0 @@
-<br />
-Here is a possible title and article with SEO optimization and HTML formatting for the keyword "griechischer wein noten pdf download":
-
-```html
-<html>
-<head>
-<meta name="description" content="Learn how to download and play Griechischer Wein by Udo Jürgens on piano or trumpet with free sheet music in PDF format.">
-<meta name="keywords" content="griechischer wein noten pdf download, griechischer wein sheet music, griechischer wein piano, griechischer wein trumpet, udo jürgens">
-<title>Griechischer Wein Noten PDF Download: How to Play Griechischer Wein by Udo Jürgens on Piano or Trumpet</title>
-</head>
-<body>
-<h1>Griechischer Wein Noten PDF Download: How to Play Griechischer Wein by Udo Jürgens on Piano or Trumpet</h1>
-<p>Griechischer Wein (Greek Wine) is a popular song by Austrian singer-songwriter Udo Jürgens. It was released in 1974 and became a hit in Germany, Austria, Switzerland and other countries. The song is about the nostalgia and longing of Greek guest workers in Germany for their homeland and culture.</p>
-<h2>griechischer wein noten pdf download</h2><br /><p><b><b>Download File</b> &#10003;&#10003;&#10003; <a href="https://urlcod.com/2uHwZD">https://urlcod.com/2uHwZD</a></b></p><br /><br />
-<p>If you want to learn how to play Griechischer Wein by Udo Jürgens on piano or trumpet, you can download free sheet music in PDF format from various online sources. In this article, we will show you some of the best websites where you can find Griechischer Wein noten PDF download and how to use them.</p>
-<h2>Musescore.com</h2>
-<p>Musescore.com is a website where you can find and share sheet music for various instruments and genres. You can also use the Musescore app to view, play and print the scores offline. To download Griechischer Wein noten PDF from Musescore.com, follow these steps:</p>
-<ol>
-<li>Go to <a href="https://musescore.com/">https://musescore.com/</a> and type "griechischer wein" in the search box.</li>
-<li>Choose the version of the song that suits your instrument and skill level. For example, if you want to play Griechischer Wein on piano solo, you can choose "Griechischer Wein (Udo Jürgens)" by migromigros@gmx.net.</li>
-<li>Click on the score title to open it in a new tab. You can preview the score by clicking on the play button or scrolling through the pages.</li>
-<li>To download the score in PDF format, click on the three dots icon next to the share button and select "Download". You can also choose other formats such as MIDI or MP3.</li>
-<li>Save the file to your device and print it if you wish.</li>
-</ol>
-<h2>Musicnotes.com</h2>
-<p>Musicnotes.com is another website where you can buy and download digital sheet music for various instruments and genres. You can also use the Musicnotes app to access your purchased scores offline. To download Griechischer Wein noten PDF from Musicnotes.com, follow these steps:</p>
-<p></p>
-<ol>
-<li>Go to <a href="https://www.musicnotes.com/">https://www.musicnotes.com/</a> and type "griechischer wein" in the search box.</li>
-<li>Choose the version of the song that suits your instrument and skill level. For example, if you want to play Griechischer Wein on trumpet solo, you can choose "Griechischer Wein" by Udo Jürgens (Leadsheet) in D Minor.</li>
-<li>Click on the score title to open it in a new tab. You can preview the score by clicking on the play button or scrolling through the pages.</li>
-<li>To download the score in PDF format, you need to purchase it first. Click on the "Add to Cart" button and follow the instructions to complete your payment.</li>
-<li>After your payment is confirmed, you can access your purchased score from your account page or from the Musicnotes app. You can also print it if you wish.</li>
-</ol>
-<h2>Conclusion</h2>
-<p>Griechischer Wein by Udo Jürgens is a beautiful song that you can learn</p> 7196e7f11a<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Mig Jimenez Faq 2 Pdf Download.md b/spaces/tioseFevbu/cartoon-converter/scripts/Mig Jimenez Faq 2 Pdf Download.md
deleted file mode 100644
index f93edd2f0a031099c3cb45f7a0c756ea493f7b15..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Mig Jimenez Faq 2 Pdf Download.md	
+++ /dev/null
@@ -1,24 +0,0 @@
-<br />
-<h1>How to Download Mig Jimenez FAQ 2 PDF for Free</h1>
-<p>If you are a fan of military modeling, you may have heard of Mig Jimenez, one of the most renowned experts in the field. He is the author of several books and magazines that teach you how to paint and weather your models with realistic effects. One of his most popular books is F.A.Q. 2 Frequently Asked Questions of the AFV Painting Techniques, which covers everything from basic techniques to advanced tips and tricks.</p>
-<p>F.A.Q. 2 is a 320-page paperback book that contains hundreds of photos and illustrations to guide you step by step. It is written in English and published by AK Interactive, a leading company in modeling products. The book is available for purchase online or in specialized stores, but it is not cheap. It costs around $70 USD, which may be too expensive for some hobbyists.</p>
-<h2>mig jimenez faq 2 pdf download</h2><br /><p><b><b>DOWNLOAD</b> &#10042; <a href="https://urlcod.com/2uHxU4">https://urlcod.com/2uHxU4</a></b></p><br /><br />
-<p>But what if you could download Mig Jimenez FAQ 2 PDF for free? Wouldn't that be great? Well, you are in luck, because there are some ways to get this book without paying a dime. In this article, we will show you how to do it legally and safely.</p>
-<h2>Method 1: Visit the Official Website of Mig Jimenez</h2>
-<p>One of the easiest ways to download Mig Jimenez FAQ 2 PDF for free is to visit his official website: <a href="https://www.migjimenez.com/">https://www.migjimenez.com/</a>. There, you will find a section called Downloads AMMO of Mig Jimenez[^2^], where you can find a lot of material to download in PDF format. You can find catalogs, leaflets, step by step guides, and The Weathering Magazine specials.</p>
-<p>To download Mig Jimenez FAQ 2 PDF for free, you just need to click on the link that says "Download it in this LINK" under the image of the book cover. You will be redirected to a Google Drive page where you can view or download the file. The file size is about 300 MB, so make sure you have enough space on your device and a stable internet connection.</p>
-<p></p>
-<p>This method is legal and safe, as you are downloading the file directly from the author's website. However, there are some drawbacks. First, the quality of the PDF file is not very good, as it is a scanned copy of the book. Some pages may be blurry or distorted. Second, the file may not be available forever, as it depends on the author's permission and Google Drive's storage limit. Third, you may not be able to access the website or the file if you are in a country where they are blocked or restricted.</p>
-<h2>Method 2: Search for Other Websites that Offer Free PDF Downloads</h2>
-<p>Another way to download Mig Jimenez FAQ 2 PDF for free is to search for other websites that offer free PDF downloads of books and magazines. There are many websites that claim to have thousands of PDF files for free download, but not all of them are reliable or safe. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Some of them may also have broken links, pop-up ads, or surveys that waste your time and money.</p>
-<p>To avoid these risks, you need to be careful and selective when choosing a website to download Mig Jimenez FAQ 2 PDF for free. Here are some tips to help you find a trustworthy website:</p>
-<ul>
-<li>Check the domain name and extension of the website. Avoid websites that have strange or suspicious names or extensions, such as .ru, .cn, .tk, .biz, etc.</li>
-<li>Check the design and layout of the website. Avoid websites that look outdated, cluttered, or unprofessional.</li>
-<li>Check the reviews and ratings of the website. Avoid websites that have negative feedback or low ratings from users or experts.</li>
-<li>Check the content and quality of the PDF file. Avoid websites that offer low-quality or incomplete PDF files.</li>
-<li>Check the security and privacy of the website. Avoid websites that do not have HTTPS encryption or ask for your personal information or payment details.</li>
-</ul>
-<p>One example of a website that meets these criteria is <a href</p> 81aa517590<br />
-<br />
-<br />
\ No newline at end of file
diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/bar.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/bar.py
deleted file mode 100644
index ed86a552d1ca6baa0cfd48ec73a7a5c952d047c9..0000000000000000000000000000000000000000
--- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/bar.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from typing import Optional, Union
-
-from .color import Color
-from .console import Console, ConsoleOptions, RenderResult
-from .jupyter import JupyterMixin
-from .measure import Measurement
-from .segment import Segment
-from .style import Style
-
-# There are left-aligned characters for 1/8 to 7/8, but
-# the right-aligned characters exist only for 1/8 and 4/8.
-BEGIN_BLOCK_ELEMENTS = ["█", "█", "█", "▐", "▐", "▐", "▕", "▕"]
-END_BLOCK_ELEMENTS = [" ", "▏", "▎", "▍", "▌", "▋", "▊", "▉"]
-FULL_BLOCK = "█"
-
-
-class Bar(JupyterMixin):
-    """Renders a solid block bar.
-
-    Args:
-        size (float): Value for the end of the bar.
-        begin (float): Begin point (between 0 and size, inclusive).
-        end (float): End point (between 0 and size, inclusive).
-        width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
-        color (Union[Color, str], optional): Color of the bar. Defaults to "default".
-        bgcolor (Union[Color, str], optional): Color of bar background. Defaults to "default".
-    """
-
-    def __init__(
-        self,
-        size: float,
-        begin: float,
-        end: float,
-        *,
-        width: Optional[int] = None,
-        color: Union[Color, str] = "default",
-        bgcolor: Union[Color, str] = "default",
-    ):
-        self.size = size
-        self.begin = max(begin, 0)
-        self.end = min(end, size)
-        self.width = width
-        self.style = Style(color=color, bgcolor=bgcolor)
-
-    def __repr__(self) -> str:
-        return f"Bar({self.size}, {self.begin}, {self.end})"
-
-    def __rich_console__(
-        self, console: Console, options: ConsoleOptions
-    ) -> RenderResult:
-
-        width = min(
-            self.width if self.width is not None else options.max_width,
-            options.max_width,
-        )
-
-        if self.begin >= self.end:
-            yield Segment(" " * width, self.style)
-            yield Segment.line()
-            return
-
-        prefix_complete_eights = int(width * 8 * self.begin / self.size)
-        prefix_bar_count = prefix_complete_eights // 8
-        prefix_eights_count = prefix_complete_eights % 8
-
-        body_complete_eights = int(width * 8 * self.end / self.size)
-        body_bar_count = body_complete_eights // 8
-        body_eights_count = body_complete_eights % 8
-
-        # When start and end fall into the same cell, we ideally should render
-        # a symbol that's "center-aligned", but there is no good symbol in Unicode.
-        # In this case, we fall back to right-aligned block symbol for simplicity.
-
-        prefix = " " * prefix_bar_count
-        if prefix_eights_count:
-            prefix += BEGIN_BLOCK_ELEMENTS[prefix_eights_count]
-
-        body = FULL_BLOCK * body_bar_count
-        if body_eights_count:
-            body += END_BLOCK_ELEMENTS[body_eights_count]
-
-        suffix = " " * (width - len(body))
-
-        yield Segment(prefix + body[len(prefix) :] + suffix, self.style)
-        yield Segment.line()
-
-    def __rich_measure__(
-        self, console: Console, options: ConsoleOptions
-    ) -> Measurement:
-        return (
-            Measurement(self.width, self.width)
-            if self.width is not None
-            else Measurement(4, options.max_width)
-        )
diff --git a/spaces/tom-doerr/logo_generator/app/gradio/app_gradio.py b/spaces/tom-doerr/logo_generator/app/gradio/app_gradio.py
deleted file mode 100644
index 40013735519c4f0bab10dce4a6466af236454151..0000000000000000000000000000000000000000
--- a/spaces/tom-doerr/logo_generator/app/gradio/app_gradio.py
+++ /dev/null
@@ -1,179 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Uncomment to run on cpu
-# import os
-# os.environ["JAX_PLATFORM_NAME"] = "cpu"
-
-import random
-
-import gradio as gr
-import jax
-import numpy as np
-from flax.jax_utils import replicate
-from flax.training.common_utils import shard
-from PIL import Image, ImageDraw, ImageFont
-
-# ## CLIP Scoring
-from transformers import BartTokenizer, CLIPProcessor, FlaxCLIPModel
-from vqgan_jax.modeling_flax_vqgan import VQModel
-
-from dalle_mini.model import CustomFlaxBartForConditionalGeneration
-
-DALLE_REPO = "flax-community/dalle-mini"
-DALLE_COMMIT_ID = "4d34126d0df8bc4a692ae933e3b902a1fa8b6114"
-
-VQGAN_REPO = "flax-community/vqgan_f16_16384"
-VQGAN_COMMIT_ID = "90cc46addd2dd8f5be21586a9a23e1b95aa506a9"
-
-tokenizer = BartTokenizer.from_pretrained(DALLE_REPO, revision=DALLE_COMMIT_ID)
-model = CustomFlaxBartForConditionalGeneration.from_pretrained(
-    DALLE_REPO, revision=DALLE_COMMIT_ID
-)
-vqgan = VQModel.from_pretrained(VQGAN_REPO, revision=VQGAN_COMMIT_ID)
-
-
-def captioned_strip(images, caption=None, rows=1):
-    increased_h = 0 if caption is None else 48
-    w, h = images[0].size[0], images[0].size[1]
-    img = Image.new("RGB", (len(images) * w // rows, h * rows + increased_h))
-    for i, img_ in enumerate(images):
-        img.paste(img_, (i // rows * w, increased_h + (i % rows) * h))
-
-    if caption is not None:
-        draw = ImageDraw.Draw(img)
-        font = ImageFont.truetype(
-            "/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 40
-        )
-        draw.text((20, 3), caption, (255, 255, 255), font=font)
-    return img
-
-
-def custom_to_pil(x):
-    x = np.clip(x, 0.0, 1.0)
-    x = (255 * x).astype(np.uint8)
-    x = Image.fromarray(x)
-    if not x.mode == "RGB":
-        x = x.convert("RGB")
-    return x
-
-
-def generate(input, rng, params):
-    return model.generate(
-        **input,
-        max_length=257,
-        num_beams=1,
-        do_sample=True,
-        prng_key=rng,
-        eos_token_id=50000,
-        pad_token_id=50000,
-        params=params,
-    )
-
-
-def get_images(indices, params):
-    return vqgan.decode_code(indices, params=params)
-
-
-p_generate = jax.pmap(generate, "batch")
-p_get_images = jax.pmap(get_images, "batch")
-
-bart_params = replicate(model.params)
-vqgan_params = replicate(vqgan.params)
-
-clip = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32")
-print("Initialize FlaxCLIPModel")
-processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
-print("Initialize CLIPProcessor")
-
-
-def hallucinate(prompt, num_images=64):
-    prompt = [prompt] * jax.device_count()
-    inputs = tokenizer(
-        prompt,
-        return_tensors="jax",
-        padding="max_length",
-        truncation=True,
-        max_length=128,
-    ).data
-    inputs = shard(inputs)
-
-    all_images = []
-    for i in range(num_images // jax.device_count()):
-        key = random.randint(0, 1e7)
-        rng = jax.random.PRNGKey(key)
-        rngs = jax.random.split(rng, jax.local_device_count())
-        indices = p_generate(inputs, rngs, bart_params).sequences
-        indices = indices[:, :, 1:]
-
-        images = p_get_images(indices, vqgan_params)
-        images = np.squeeze(np.asarray(images), 1)
-        for image in images:
-            all_images.append(custom_to_pil(image))
-    return all_images
-
-
-def clip_top_k(prompt, images, k=8):
-    inputs = processor(text=prompt, images=images, return_tensors="np", padding=True)
-    outputs = clip(**inputs)
-    logits = outputs.logits_per_text
-    scores = np.array(logits[0]).argsort()[-k:][::-1]
-    return [images[score] for score in scores]
-
-
-def compose_predictions(images, caption=None):
-    increased_h = 0 if caption is None else 48
-    w, h = images[0].size[0], images[0].size[1]
-    img = Image.new("RGB", (len(images) * w, h + increased_h))
-    for i, img_ in enumerate(images):
-        img.paste(img_, (i * w, increased_h))
-
-    if caption is not None:
-        draw = ImageDraw.Draw(img)
-        font = ImageFont.truetype(
-            "/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 40
-        )
-        draw.text((20, 3), caption, (255, 255, 255), font=font)
-    return img
-
-
-def top_k_predictions(prompt, num_candidates=32, k=8):
-    images = hallucinate(prompt, num_images=num_candidates)
-    images = clip_top_k(prompt, images, k=k)
-    return images
-
-
-def run_inference(prompt, num_images=32, num_preds=8):
-    images = top_k_predictions(prompt, num_candidates=num_images, k=num_preds)
-    predictions = captioned_strip(images)
-    output_title = f"""
-    <b>{prompt}</b>
-    """
-    return (output_title, predictions)
-
-
-outputs = [
-    gr.outputs.HTML(label=""),  # To be used as title
-    gr.outputs.Image(label=""),
-]
-
-description = """
-DALL·E-mini is an AI model that generates images from any prompt you give! Generate images from text:
-"""
-gr.Interface(
-    run_inference,
-    inputs=[gr.inputs.Textbox(label="What do you want to see?")],
-    outputs=outputs,
-    title="DALL·E mini",
-    description=description,
-    article="<p style='text-align: center'> Created by Boris Dayma et al. 2021 | <a href='https://github.com/borisdayma/dalle-mini'>GitHub</a> | <a href='https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA'>Report</a></p>",
-    layout="vertical",
-    theme="huggingface",
-    examples=[
-        ["an armchair in the shape of an avocado"],
-        ["snowy mountains by the sea"],
-    ],
-    allow_flagging=False,
-    live=False,
-    # server_port=8999
-).launch(share=True)
diff --git a/spaces/tomofi/MMOCR/configs/kie/sdmgr/README.md b/spaces/tomofi/MMOCR/configs/kie/sdmgr/README.md
deleted file mode 100644
index 10d3ab6cc45f58d8e278971cccf8dd32365aff94..0000000000000000000000000000000000000000
--- a/spaces/tomofi/MMOCR/configs/kie/sdmgr/README.md
+++ /dev/null
@@ -1,52 +0,0 @@
-# SDMGR
->[Spatial Dual-Modality Graph Reasoning for Key Information Extraction](https://arxiv.org/abs/2103.14470)
-
-<!-- [ALGORITHM] -->
-
-## Abstract
-
-Key information extraction from document images is of paramount importance in office automation. Conventional template matching based approaches fail to generalize well to document images of unseen templates, and are not robust against text recognition errors. In this paper, we propose an end-to-end Spatial Dual-Modality Graph Reasoning method (SDMG-R) to extract key information from unstructured document images. We model document images as dual-modality graphs, nodes of which encode both the visual and textual features of detected text regions, and edges of which represent the spatial relations between neighboring text regions. The key information extraction is solved by iteratively propagating messages along graph edges and reasoning the categories of graph nodes. In order to roundly evaluate our proposed method as well as boost the future research, we release a new dataset named WildReceipt, which is collected and annotated tailored for the evaluation of key information extraction from document images of unseen templates in the wild. It contains 25 key information categories, a total of about 69000 text boxes, and is about 2 times larger than the existing public datasets. Extensive experiments validate that all information including visual features, textual features and spatial relations can benefit key information extraction. It has been shown that SDMG-R can effectively extract key information from document images of unseen templates, and obtain new state-of-the-art results on the recent popular benchmark SROIE and our WildReceipt. Our code and dataset will be publicly released.
-
-<div align=center>
-<img src="https://user-images.githubusercontent.com/22607038/142580689-18edb4d7-f716-475c-b1c1-e2b934658cee.png"/>
-</div>
-
-## Results and models
-
-### WildReceipt
-
-|                                 Method                                 |     Modality     | Macro F1-Score |                                                                                            Download                                                                                            |
-| :--------------------------------------------------------------------: | :--------------: | :------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
-|   [sdmgr_unet16](/configs/kie/sdmgr/sdmgr_unet16_60e_wildreceipt.py)   | Visual + Textual |     0.888      |  [model](https://download.openmmlab.com/mmocr/kie/sdmgr/sdmgr_unet16_60e_wildreceipt_20210520-7489e6de.pth) \| [log](https://download.openmmlab.com/mmocr/kie/sdmgr/20210520_132236.log.json)  |
-| [sdmgr_novisual](/configs/kie/sdmgr/sdmgr_novisual_60e_wildreceipt.py) |     Textual      |     0.870      | [model](https://download.openmmlab.com/mmocr/kie/sdmgr/sdmgr_novisual_60e_wildreceipt_20210517-a44850da.pth) \| [log](https://download.openmmlab.com/mmocr/kie/sdmgr/20210517_205829.log.json) |
-
-:::{note}
-1. For `sdmgr_novisual`, images are not needed for training and testing. So fake `img_prefix` can be used in configs. As well, fake `file_name` can be used in annotation files.
-:::
-
-### WildReceiptOpenset
-
-|                                     Method                                     | Modality | Edge F1-Score | Node Macro F1-Score | Node Micro F1-Score |                                                                                                Download                                                                                                |
-| :----------------------------------------------------------------------------: | :------: | :-----------: | :-----------------: | :-----------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
-| [sdmgr_novisual](/configs/kie/sdmgr/sdmgr_novisual_60e_wildreceipt_openset.py) | Textual  |     0.786     |        0.926        |        0.935        | [model](https://download.openmmlab.com/mmocr/kie/sdmgr/sdmgr_novisual_60e_wildreceipt_openset_20210917-d236b3ea.pth) \| [log](https://download.openmmlab.com/mmocr/kie/sdmgr/20210917_050824.log.json) |
-
-
-:::{note}
-1. In the case of openset, the number of node categories is unknown or unfixed, and more node category can be added.
-2. To show that our method can handle openset problem, we modify the ground truth of `WildReceipt` to `WildReceiptOpenset`. The `nodes` are just classified into 4 classes: `background, key, value, others`, while adding `edge` labels for each box.
-3. The model is used to predict whether two nodes are a pair connecting by a valid edge.
-4. You can learn more about the key differences between CloseSet and OpenSet annotations in our [tutorial](tutorials/kie_closeset_openset.md).
-:::
-
-## Citation
-
-```bibtex
-@misc{sun2021spatial,
-      title={Spatial Dual-Modality Graph Reasoning for Key Information Extraction},
-      author={Hongbin Sun and Zhanghui Kuang and Xiaoyu Yue and Chenhao Lin and Wayne Zhang},
-      year={2021},
-      eprint={2103.14470},
-      archivePrefix={arXiv},
-      primaryClass={cs.CV}
-}
-```
diff --git a/spaces/tomofi/MMOCR/tests/test_models/test_modules.py b/spaces/tomofi/MMOCR/tests/test_models/test_modules.py
deleted file mode 100644
index 9e19ea3b2b7b9f9f1429a0ebcc8705b699dbbceb..0000000000000000000000000000000000000000
--- a/spaces/tomofi/MMOCR/tests/test_models/test_modules.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import numpy as np
-import torch
-
-from mmocr.models.textdet.modules import GCN, LocalGraphs, ProposalLocalGraphs
-from mmocr.models.textdet.modules.utils import (feature_embedding,
-                                                normalize_adjacent_matrix)
-
-
-def test_local_graph_forward_train():
-    geo_feat_len = 24
-    pooling_h, pooling_w = pooling_out_size = (2, 2)
-    num_rois = 32
-
-    local_graph_generator = LocalGraphs((4, 4), 3, geo_feat_len, 1.0,
-                                        pooling_out_size, 0.5)
-
-    feature_maps = torch.randn((2, 3, 128, 128), dtype=torch.float)
-    x = np.random.randint(4, 124, (num_rois, 1))
-    y = np.random.randint(4, 124, (num_rois, 1))
-    h = 4 * np.ones((num_rois, 1))
-    w = 4 * np.ones((num_rois, 1))
-    angle = (np.random.random_sample((num_rois, 1)) * 2 - 1) * np.pi / 2
-    cos, sin = np.cos(angle), np.sin(angle)
-    comp_labels = np.random.randint(1, 3, (num_rois, 1))
-    num_rois = num_rois * np.ones((num_rois, 1))
-    comp_attribs = np.hstack([num_rois, x, y, h, w, cos, sin, comp_labels])
-    comp_attribs = comp_attribs.astype(np.float32)
-    comp_attribs_ = comp_attribs.copy()
-    comp_attribs = np.stack([comp_attribs, comp_attribs_])
-
-    (node_feats, adjacent_matrix, knn_inds,
-     linkage_labels) = local_graph_generator(feature_maps, comp_attribs)
-    feat_len = geo_feat_len + feature_maps.size()[1] * pooling_h * pooling_w
-
-    assert node_feats.dim() == adjacent_matrix.dim() == 3
-    assert node_feats.size()[-1] == feat_len
-    assert knn_inds.size()[-1] == 4
-    assert linkage_labels.size()[-1] == 4
-    assert (node_feats.size()[0] == adjacent_matrix.size()[0] ==
-            knn_inds.size()[0] == linkage_labels.size()[0])
-    assert (node_feats.size()[1] == adjacent_matrix.size()[1] ==
-            adjacent_matrix.size()[2])
-
-
-def test_local_graph_forward_test():
-    geo_feat_len = 24
-    pooling_h, pooling_w = pooling_out_size = (2, 2)
-
-    local_graph_generator = ProposalLocalGraphs(
-        (4, 4), 2, geo_feat_len, 1., pooling_out_size, 0.1, 3., 6., 1., 0.5,
-        0.3, 0.5, 0.5, 2)
-
-    maps = torch.zeros((1, 6, 224, 224), dtype=torch.float)
-    maps[:, 0:2, :, :] = -10.
-    maps[:, 0, 60:100, 50:170] = 10.
-    maps[:, 1, 75:85, 60:160] = 10.
-    maps[:, 2, 75:85, 60:160] = 0.
-    maps[:, 3, 75:85, 60:160] = 1.
-    maps[:, 4, 75:85, 60:160] = 10.
-    maps[:, 5, 75:85, 60:160] = 10.
-    feature_maps = torch.randn((2, 6, 224, 224), dtype=torch.float)
-    feat_len = geo_feat_len + feature_maps.size()[1] * pooling_h * pooling_w
-
-    none_flag, graph_data = local_graph_generator(maps, feature_maps)
-    (node_feats, adjacent_matrices, knn_inds, local_graphs,
-     text_comps) = graph_data
-
-    assert none_flag is False
-    assert text_comps.ndim == 2
-    assert text_comps.shape[0] > 0
-    assert text_comps.shape[1] == 9
-    assert (node_feats.size()[0] == adjacent_matrices.size()[0] ==
-            knn_inds.size()[0] == local_graphs.size()[0] ==
-            text_comps.shape[0])
-    assert (node_feats.size()[1] == adjacent_matrices.size()[1] ==
-            adjacent_matrices.size()[2] == local_graphs.size()[1])
-    assert node_feats.size()[-1] == feat_len
-
-    # test proposal local graphs with area of center region less than threshold
-    maps[:, 1, 75:85, 60:160] = -10.
-    maps[:, 1, 80, 80] = 10.
-    none_flag, _ = local_graph_generator(maps, feature_maps)
-    assert none_flag
-
-    # test proposal local graphs with one text component
-    local_graph_generator = ProposalLocalGraphs(
-        (4, 4), 2, geo_feat_len, 1., pooling_out_size, 0.1, 8., 20., 1., 0.5,
-        0.3, 0.5, 0.5, 2)
-    maps[:, 1, 78:82, 78:82] = 10.
-    none_flag, _ = local_graph_generator(maps, feature_maps)
-    assert none_flag
-
-    # test proposal local graphs with text components out of text region
-    maps[:, 0, 60:100, 50:170] = -10.
-    maps[:, 0, 78:82, 78:82] = 10.
-    none_flag, _ = local_graph_generator(maps, feature_maps)
-    assert none_flag
-
-
-def test_gcn():
-    num_local_graphs = 32
-    num_max_graph_nodes = 16
-    input_feat_len = 512
-    k = 8
-    gcn = GCN(input_feat_len)
-    node_feat = torch.randn(
-        (num_local_graphs, num_max_graph_nodes, input_feat_len))
-    adjacent_matrix = torch.rand(
-        (num_local_graphs, num_max_graph_nodes, num_max_graph_nodes))
-    knn_inds = torch.randint(1, num_max_graph_nodes, (num_local_graphs, k))
-    output = gcn(node_feat, adjacent_matrix, knn_inds)
-    assert output.size() == (num_local_graphs * k, 2)
-
-
-def test_normalize_adjacent_matrix():
-    adjacent_matrix = np.random.randint(0, 2, (16, 16))
-    normalized_matrix = normalize_adjacent_matrix(adjacent_matrix)
-    assert normalized_matrix.shape == adjacent_matrix.shape
-
-
-def test_feature_embedding():
-    out_feat_len = 48
-
-    # test without residue dimensions
-    feats = np.random.randn(10, 8)
-    embed_feats = feature_embedding(feats, out_feat_len)
-    assert embed_feats.shape == (10, out_feat_len)
-
-    # test with residue dimensions
-    feats = np.random.randn(10, 9)
-    embed_feats = feature_embedding(feats, out_feat_len)
-    assert embed_feats.shape == (10, out_feat_len)
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py
deleted file mode 100644
index 13a4645bfdb50d5a2f04cee49ecc5f7647d10acf..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py
+++ /dev/null
@@ -1,13 +0,0 @@
-_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
-model = dict(
-    backbone=dict(plugins=[
-        dict(
-            cfg=dict(
-                type='GeneralizedAttention',
-                spatial_range=-1,
-                num_heads=8,
-                attention_type='1111',
-                kv_stride=2),
-            stages=(False, False, True, True),
-            position='after_conv2')
-    ]))
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py
deleted file mode 100644
index b0164c75a976fa4dfd729147f9656d4e01c3529c..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py
+++ /dev/null
@@ -1,9 +0,0 @@
-_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
-model = dict(roi_head=dict(bbox_head=dict(num_classes=1)))
-classes = ('person', )
-data = dict(
-    train=dict(classes=classes),
-    val=dict(classes=classes),
-    test=dict(classes=classes))
-
-load_from = 'http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth'  # noqa
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py
deleted file mode 100644
index 452b0fe2d89566a998744d9c7812e550596462e3..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py'
-model = dict(
-    pretrained='open-mmlab://msra/hrnetv2_w40',
-    backbone=dict(
-        type='HRNet',
-        extra=dict(
-            stage2=dict(num_channels=(40, 80)),
-            stage3=dict(num_channels=(40, 80, 160)),
-            stage4=dict(num_channels=(40, 80, 160, 320)))),
-    neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
deleted file mode 100644
index e4107e7f8985deaaf0287d6b7347521970babf1e..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
+++ /dev/null
@@ -1,65 +0,0 @@
-_base_ = [
-    '../_base_/models/mask_rcnn_r50_fpn.py',
-    '../_base_/datasets/coco_instance.py',
-    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
-]
-model = dict(
-    pretrained='open-mmlab://regnetx_3.2gf',
-    backbone=dict(
-        _delete_=True,
-        type='RegNet',
-        arch='regnetx_3.2gf',
-        out_indices=(0, 1, 2, 3),
-        frozen_stages=1,
-        norm_cfg=dict(type='BN', requires_grad=True),
-        norm_eval=True,
-        style='pytorch'),
-    neck=dict(
-        type='FPN',
-        in_channels=[96, 192, 432, 1008],
-        out_channels=256,
-        num_outs=5))
-img_norm_cfg = dict(
-    # The mean and std are used in PyCls when training RegNets
-    mean=[103.53, 116.28, 123.675],
-    std=[57.375, 57.12, 58.395],
-    to_rgb=False)
-train_pipeline = [
-    dict(type='LoadImageFromFile'),
-    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
-    dict(
-        type='Resize',
-        img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
-                   (1333, 768), (1333, 800)],
-        multiscale_mode='value',
-        keep_ratio=True),
-    dict(type='RandomFlip', flip_ratio=0.5),
-    dict(type='Normalize', **img_norm_cfg),
-    dict(type='Pad', size_divisor=32),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
-]
-test_pipeline = [
-    dict(type='LoadImageFromFile'),
-    dict(
-        type='MultiScaleFlipAug',
-        img_scale=(1333, 800),
-        flip=False,
-        transforms=[
-            dict(type='Resize', keep_ratio=True),
-            dict(type='RandomFlip'),
-            dict(type='Normalize', **img_norm_cfg),
-            dict(type='Pad', size_divisor=32),
-            dict(type='ImageToTensor', keys=['img']),
-            dict(type='Collect', keys=['img']),
-        ])
-]
-data = dict(
-    train=dict(pipeline=train_pipeline),
-    val=dict(pipeline=test_pipeline),
-    test=dict(pipeline=test_pipeline))
-optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
-lr_config = dict(step=[28, 34])
-runner = dict(type='EpochBasedRunner', max_epochs=36)
-optimizer_config = dict(
-    _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/mask/mask_target.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/mask/mask_target.py
deleted file mode 100644
index 15d26a88bbf3710bd92813335918407db8c4e053..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/mask/mask_target.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import numpy as np
-import torch
-from torch.nn.modules.utils import _pair
-
-
-def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list,
-                cfg):
-    """Compute mask target for positive proposals in multiple images.
-
-    Args:
-        pos_proposals_list (list[Tensor]): Positive proposals in multiple
-            images.
-        pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each
-            positive proposals.
-        gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of
-            each image.
-        cfg (dict): Config dict that specifies the mask size.
-
-    Returns:
-        list[Tensor]: Mask target of each image.
-
-    Example:
-        >>> import mmcv
-        >>> import mmdet
-        >>> from mmdet.core.mask import BitmapMasks
-        >>> from mmdet.core.mask.mask_target import *
-        >>> H, W = 17, 18
-        >>> cfg = mmcv.Config({'mask_size': (13, 14)})
-        >>> rng = np.random.RandomState(0)
-        >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image
-        >>> pos_proposals_list = [
-        >>>     torch.Tensor([
-        >>>         [ 7.2425,  5.5929, 13.9414, 14.9541],
-        >>>         [ 7.3241,  3.6170, 16.3850, 15.3102],
-        >>>     ]),
-        >>>     torch.Tensor([
-        >>>         [ 4.8448, 6.4010, 7.0314, 9.7681],
-        >>>         [ 5.9790, 2.6989, 7.4416, 4.8580],
-        >>>         [ 0.0000, 0.0000, 0.1398, 9.8232],
-        >>>     ]),
-        >>> ]
-        >>> # Corresponding class index for each proposal for each image
-        >>> pos_assigned_gt_inds_list = [
-        >>>     torch.LongTensor([7, 0]),
-        >>>     torch.LongTensor([5, 4, 1]),
-        >>> ]
-        >>> # Ground truth mask for each true object for each image
-        >>> gt_masks_list = [
-        >>>     BitmapMasks(rng.rand(8, H, W), height=H, width=W),
-        >>>     BitmapMasks(rng.rand(6, H, W), height=H, width=W),
-        >>> ]
-        >>> mask_targets = mask_target(
-        >>>     pos_proposals_list, pos_assigned_gt_inds_list,
-        >>>     gt_masks_list, cfg)
-        >>> assert mask_targets.shape == (5,) + cfg['mask_size']
-    """
-    cfg_list = [cfg for _ in range(len(pos_proposals_list))]
-    mask_targets = map(mask_target_single, pos_proposals_list,
-                       pos_assigned_gt_inds_list, gt_masks_list, cfg_list)
-    mask_targets = list(mask_targets)
-    if len(mask_targets) > 0:
-        mask_targets = torch.cat(mask_targets)
-    return mask_targets
-
-
-def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):
-    """Compute mask target for each positive proposal in the image.
-
-    Args:
-        pos_proposals (Tensor): Positive proposals.
-        pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals.
-        gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap
-            or Polygon.
-        cfg (dict): Config dict that indicate the mask size.
-
-    Returns:
-        Tensor: Mask target of each positive proposals in the image.
-
-    Example:
-        >>> import mmcv
-        >>> import mmdet
-        >>> from mmdet.core.mask import BitmapMasks
-        >>> from mmdet.core.mask.mask_target import *  # NOQA
-        >>> H, W = 32, 32
-        >>> cfg = mmcv.Config({'mask_size': (7, 11)})
-        >>> rng = np.random.RandomState(0)
-        >>> # Masks for each ground truth box (relative to the image)
-        >>> gt_masks_data = rng.rand(3, H, W)
-        >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W)
-        >>> # Predicted positive boxes in one image
-        >>> pos_proposals = torch.FloatTensor([
-        >>>     [ 16.2,   5.5, 19.9, 20.9],
-        >>>     [ 17.3,  13.6, 19.3, 19.3],
-        >>>     [ 14.8,  16.4, 17.0, 23.7],
-        >>>     [  0.0,   0.0, 16.0, 16.0],
-        >>>     [  4.0,   0.0, 20.0, 16.0],
-        >>> ])
-        >>> # For each predicted proposal, its assignment to a gt mask
-        >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1])
-        >>> mask_targets = mask_target_single(
-        >>>     pos_proposals, pos_assigned_gt_inds, gt_masks, cfg)
-        >>> assert mask_targets.shape == (5,) + cfg['mask_size']
-    """
-    device = pos_proposals.device
-    mask_size = _pair(cfg.mask_size)
-    num_pos = pos_proposals.size(0)
-    if num_pos > 0:
-        proposals_np = pos_proposals.cpu().numpy()
-        maxh, maxw = gt_masks.height, gt_masks.width
-        proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw)
-        proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh)
-        pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
-
-        mask_targets = gt_masks.crop_and_resize(
-            proposals_np, mask_size, device=device,
-            inds=pos_assigned_gt_inds).to_ndarray()
-
-        mask_targets = torch.from_numpy(mask_targets).float().to(device)
-    else:
-        mask_targets = pos_proposals.new_zeros((0, ) + mask_size)
-
-    return mask_targets
diff --git a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/lr_scheduler.py b/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/lr_scheduler.py
deleted file mode 100644
index be39da9ca6dacc22bf3df9c7389bbb403a4a3ade..0000000000000000000000000000000000000000
--- a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/lr_scheduler.py
+++ /dev/null
@@ -1,98 +0,0 @@
-import numpy as np
-
-
-class LambdaWarmUpCosineScheduler:
-    """
-    note: use with a base_lr of 1.0
-    """
-    def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
-        self.lr_warm_up_steps = warm_up_steps
-        self.lr_start = lr_start
-        self.lr_min = lr_min
-        self.lr_max = lr_max
-        self.lr_max_decay_steps = max_decay_steps
-        self.last_lr = 0.
-        self.verbosity_interval = verbosity_interval
-
-    def schedule(self, n, **kwargs):
-        if self.verbosity_interval > 0:
-            if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}")
-        if n < self.lr_warm_up_steps:
-            lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start
-            self.last_lr = lr
-            return lr
-        else:
-            t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)
-            t = min(t, 1.0)
-            lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (
-                    1 + np.cos(t * np.pi))
-            self.last_lr = lr
-            return lr
-
-    def __call__(self, n, **kwargs):
-        return self.schedule(n,**kwargs)
-
-
-class LambdaWarmUpCosineScheduler2:
-    """
-    supports repeated iterations, configurable via lists
-    note: use with a base_lr of 1.0.
-    """
-    def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0):
-        assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths)
-        self.lr_warm_up_steps = warm_up_steps
-        self.f_start = f_start
-        self.f_min = f_min
-        self.f_max = f_max
-        self.cycle_lengths = cycle_lengths
-        self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths))
-        self.last_f = 0.
-        self.verbosity_interval = verbosity_interval
-
-    def find_in_interval(self, n):
-        interval = 0
-        for cl in self.cum_cycles[1:]:
-            if n <= cl:
-                return interval
-            interval += 1
-
-    def schedule(self, n, **kwargs):
-        cycle = self.find_in_interval(n)
-        n = n - self.cum_cycles[cycle]
-        if self.verbosity_interval > 0:
-            if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
-                                                       f"current cycle {cycle}")
-        if n < self.lr_warm_up_steps[cycle]:
-            f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
-            self.last_f = f
-            return f
-        else:
-            t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle])
-            t = min(t, 1.0)
-            f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * (
-                    1 + np.cos(t * np.pi))
-            self.last_f = f
-            return f
-
-    def __call__(self, n, **kwargs):
-        return self.schedule(n, **kwargs)
-
-
-class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
-
-    def schedule(self, n, **kwargs):
-        cycle = self.find_in_interval(n)
-        n = n - self.cum_cycles[cycle]
-        if self.verbosity_interval > 0:
-            if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
-                                                       f"current cycle {cycle}")
-
-        if n < self.lr_warm_up_steps[cycle]:
-            f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
-            self.last_f = f
-            return f
-        else:
-            f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle])
-            self.last_f = f
-            return f
-
diff --git a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/modules/image_degradation/__init__.py b/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/modules/image_degradation/__init__.py
deleted file mode 100644
index 7836cada81f90ded99c58d5942eea4c3477f58fc..0000000000000000000000000000000000000000
--- a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/modules/image_degradation/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr
-from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light
diff --git a/spaces/triggah61/chingu-music/audiocraft/modules/conv.py b/spaces/triggah61/chingu-music/audiocraft/modules/conv.py
deleted file mode 100644
index 972938ab84712eb06e1b10cea25444eee51d6637..0000000000000000000000000000000000000000
--- a/spaces/triggah61/chingu-music/audiocraft/modules/conv.py
+++ /dev/null
@@ -1,245 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-import typing as tp
-import warnings
-
-import torch
-from torch import nn
-from torch.nn import functional as F
-from torch.nn.utils import spectral_norm, weight_norm
-
-
-CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm',
-                                 'time_group_norm'])
-
-
-def apply_parametrization_norm(module: nn.Module, norm: str = 'none'):
-    assert norm in CONV_NORMALIZATIONS
-    if norm == 'weight_norm':
-        return weight_norm(module)
-    elif norm == 'spectral_norm':
-        return spectral_norm(module)
-    else:
-        # We already check was in CONV_NORMALIZATION, so any other choice
-        # doesn't need reparametrization.
-        return module
-
-
-def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs):
-    """Return the proper normalization module. If causal is True, this will ensure the returned
-    module is causal, or return an error if the normalization doesn't support causal evaluation.
-    """
-    assert norm in CONV_NORMALIZATIONS
-    if norm == 'time_group_norm':
-        if causal:
-            raise ValueError("GroupNorm doesn't support causal evaluation.")
-        assert isinstance(module, nn.modules.conv._ConvNd)
-        return nn.GroupNorm(1, module.out_channels, **norm_kwargs)
-    else:
-        return nn.Identity()
-
-
-def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int,
-                                 padding_total: int = 0) -> int:
-    """See `pad_for_conv1d`.
-    """
-    length = x.shape[-1]
-    n_frames = (length - kernel_size + padding_total) / stride + 1
-    ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
-    return ideal_length - length
-
-
-def pad_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0):
-    """Pad for a convolution to make sure that the last window is full.
-    Extra padding is added at the end. This is required to ensure that we can rebuild
-    an output of the same length, as otherwise, even with padding, some time steps
-    might get removed.
-    For instance, with total padding = 4, kernel size = 4, stride = 2:
-        0 0 1 2 3 4 5 0 0   # (0s are padding)
-        1   2   3           # (output frames of a convolution, last 0 is never used)
-        0 0 1 2 3 4 5 0     # (output of tr. conv., but pos. 5 is going to get removed as padding)
-            1 2 3 4         # once you removed padding, we are missing one time step !
-    """
-    extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
-    return F.pad(x, (0, extra_padding))
-
-
-def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'constant', value: float = 0.):
-    """Tiny wrapper around F.pad, just to allow for reflect padding on small input.
-    If this is the case, we insert extra 0 padding to the right before the reflection happen.
-    """
-    length = x.shape[-1]
-    padding_left, padding_right = paddings
-    assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
-    if mode == 'reflect':
-        max_pad = max(padding_left, padding_right)
-        extra_pad = 0
-        if length <= max_pad:
-            extra_pad = max_pad - length + 1
-            x = F.pad(x, (0, extra_pad))
-        padded = F.pad(x, paddings, mode, value)
-        end = padded.shape[-1] - extra_pad
-        return padded[..., :end]
-    else:
-        return F.pad(x, paddings, mode, value)
-
-
-def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]):
-    """Remove padding from x, handling properly zero padding. Only for 1d!
-    """
-    padding_left, padding_right = paddings
-    assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
-    assert (padding_left + padding_right) <= x.shape[-1]
-    end = x.shape[-1] - padding_right
-    return x[..., padding_left: end]
-
-
-class NormConv1d(nn.Module):
-    """Wrapper around Conv1d and normalization applied to this conv
-    to provide a uniform interface across normalization approaches.
-    """
-    def __init__(self, *args, causal: bool = False, norm: str = 'none',
-                 norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
-        super().__init__()
-        self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm)
-        self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs)
-        self.norm_type = norm
-
-    def forward(self, x):
-        x = self.conv(x)
-        x = self.norm(x)
-        return x
-
-
-class NormConv2d(nn.Module):
-    """Wrapper around Conv2d and normalization applied to this conv
-    to provide a uniform interface across normalization approaches.
-    """
-    def __init__(self, *args, norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
-        super().__init__()
-        self.conv = apply_parametrization_norm(nn.Conv2d(*args, **kwargs), norm)
-        self.norm = get_norm_module(self.conv, causal=False, norm=norm, **norm_kwargs)
-        self.norm_type = norm
-
-    def forward(self, x):
-        x = self.conv(x)
-        x = self.norm(x)
-        return x
-
-
-class NormConvTranspose1d(nn.Module):
-    """Wrapper around ConvTranspose1d and normalization applied to this conv
-    to provide a uniform interface across normalization approaches.
-    """
-    def __init__(self, *args, causal: bool = False, norm: str = 'none',
-                 norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
-        super().__init__()
-        self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm)
-        self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs)
-        self.norm_type = norm
-
-    def forward(self, x):
-        x = self.convtr(x)
-        x = self.norm(x)
-        return x
-
-
-class NormConvTranspose2d(nn.Module):
-    """Wrapper around ConvTranspose2d and normalization applied to this conv
-    to provide a uniform interface across normalization approaches.
-    """
-    def __init__(self, *args, norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
-        super().__init__()
-        self.convtr = apply_parametrization_norm(nn.ConvTranspose2d(*args, **kwargs), norm)
-        self.norm = get_norm_module(self.convtr, causal=False, norm=norm, **norm_kwargs)
-
-    def forward(self, x):
-        x = self.convtr(x)
-        x = self.norm(x)
-        return x
-
-
-class StreamableConv1d(nn.Module):
-    """Conv1d with some builtin handling of asymmetric or causal padding
-    and normalization.
-    """
-    def __init__(self, in_channels: int, out_channels: int,
-                 kernel_size: int, stride: int = 1, dilation: int = 1,
-                 groups: int = 1, bias: bool = True, causal: bool = False,
-                 norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {},
-                 pad_mode: str = 'reflect'):
-        super().__init__()
-        # warn user on unusual setup between dilation and stride
-        if stride > 1 and dilation > 1:
-            warnings.warn('StreamableConv1d has been initialized with stride > 1 and dilation > 1'
-                          f' (kernel_size={kernel_size} stride={stride}, dilation={dilation}).')
-        self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride,
-                               dilation=dilation, groups=groups, bias=bias, causal=causal,
-                               norm=norm, norm_kwargs=norm_kwargs)
-        self.causal = causal
-        self.pad_mode = pad_mode
-
-    def forward(self, x):
-        B, C, T = x.shape
-        kernel_size = self.conv.conv.kernel_size[0]
-        stride = self.conv.conv.stride[0]
-        dilation = self.conv.conv.dilation[0]
-        kernel_size = (kernel_size - 1) * dilation + 1  # effective kernel size with dilations
-        padding_total = kernel_size - stride
-        extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
-        if self.causal:
-            # Left padding for causal
-            x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode)
-        else:
-            # Asymmetric padding required for odd strides
-            padding_right = padding_total // 2
-            padding_left = padding_total - padding_right
-            x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode)
-        return self.conv(x)
-
-
-class StreamableConvTranspose1d(nn.Module):
-    """ConvTranspose1d with some builtin handling of asymmetric or causal padding
-    and normalization.
-    """
-    def __init__(self, in_channels: int, out_channels: int,
-                 kernel_size: int, stride: int = 1, causal: bool = False,
-                 norm: str = 'none', trim_right_ratio: float = 1.,
-                 norm_kwargs: tp.Dict[str, tp.Any] = {}):
-        super().__init__()
-        self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride,
-                                          causal=causal, norm=norm, norm_kwargs=norm_kwargs)
-        self.causal = causal
-        self.trim_right_ratio = trim_right_ratio
-        assert self.causal or self.trim_right_ratio == 1., \
-            "`trim_right_ratio` != 1.0 only makes sense for causal convolutions"
-        assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1.
-
-    def forward(self, x):
-        kernel_size = self.convtr.convtr.kernel_size[0]
-        stride = self.convtr.convtr.stride[0]
-        padding_total = kernel_size - stride
-
-        y = self.convtr(x)
-
-        # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
-        # removed at the very end, when keeping only the right length for the output,
-        # as removing it here would require also passing the length at the matching layer
-        # in the encoder.
-        if self.causal:
-            # Trim the padding on the right according to the specified ratio
-            # if trim_right_ratio = 1.0, trim everything from right
-            padding_right = math.ceil(padding_total * self.trim_right_ratio)
-            padding_left = padding_total - padding_right
-            y = unpad1d(y, (padding_left, padding_right))
-        else:
-            # Asymmetric padding required for odd strides
-            padding_right = padding_total // 2
-            padding_left = padding_total - padding_right
-            y = unpad1d(y, (padding_left, padding_right))
-        return y
diff --git a/spaces/tumuyan/RealSR/waifu2x/README.md b/spaces/tumuyan/RealSR/waifu2x/README.md
deleted file mode 100644
index 635697e4b77476267b044a3217abe816a8dc78a6..0000000000000000000000000000000000000000
--- a/spaces/tumuyan/RealSR/waifu2x/README.md
+++ /dev/null
@@ -1,179 +0,0 @@
-# waifu2x ncnn Vulkan
-
-![CI](https://github.com/nihui/waifu2x-ncnn-vulkan/workflows/CI/badge.svg)
-![download](https://img.shields.io/github/downloads/nihui/waifu2x-ncnn-vulkan/total.svg)
-
-ncnn implementation of waifu2x converter. Runs fast on Intel / AMD / Nvidia / Apple-Silicon with Vulkan API.
-
-waifu2x-ncnn-vulkan uses [ncnn project](https://github.com/Tencent/ncnn) as the universal neural network inference framework.
-
-## [Download](https://github.com/nihui/waifu2x-ncnn-vulkan/releases)
-
-Download Windows/Linux/MacOS Executable for Intel/AMD/Nvidia GPU
-
-**https://github.com/nihui/waifu2x-ncnn-vulkan/releases**
-
-This package includes all the binaries and models required. It is portable, so no CUDA or Caffe runtime environment is needed :)
-
-## Usages
-
-### Example Command
-
-```shell
-waifu2x-ncnn-vulkan.exe -i input.jpg -o output.png -n 2 -s 2
-```
-
-### Full Usages
-
-```console
-Usage: waifu2x-ncnn-vulkan -i infile -o outfile [options]...
-
-  -h                   show this help
-  -v                   verbose output
-  -i input-path        input image path (jpg/png/webp) or directory
-  -o output-path       output image path (jpg/png/webp) or directory
-  -n noise-level       denoise level (-1/0/1/2/3, default=0)
-  -s scale             upscale ratio (1/2/4/8/16/32, default=2)
-  -t tile-size         tile size (>=32/0=auto, default=0) can be 0,0,0 for multi-gpu
-  -m model-path        waifu2x model path (default=models-cunet)
-  -g gpu-id            gpu device to use (-1=cpu, default=auto) can be 0,1,2 for multi-gpu
-  -j load:proc:save    thread count for load/proc/save (default=1:2:2) can be 1:2,2,2:2 for multi-gpu
-  -x                   enable tta mode
-  -f format            output image format (jpg/png/webp, default=ext/png)
-```
-
-- `input-path` and `output-path` accept either file path or directory path
-- `noise-level` = noise level, large value means strong denoise effect, -1 = no effect
-- `scale` = scale level, 1 = no scaling, 2 = upscale 2x
-- `tile-size` = tile size, use smaller value to reduce GPU memory usage, default selects automatically
-- `load:proc:save` = thread count for the three stages (image decoding + waifu2x upscaling + image encoding), using larger values may increase GPU usage and consume more GPU memory. You can tune this configuration with "4:4:4" for many small-size images, and "2:2:2" for large-size images. The default setting usually works fine for most situations. If you find that your GPU is hungry, try increasing thread count to achieve faster processing.
-- `format` = the format of the image to be output, png is better supported, however webp generally yields smaller file sizes, both are losslessly encoded
-
-If you encounter a crash or error, try upgrading your GPU driver:
-
-- Intel: https://downloadcenter.intel.com/product/80939/Graphics-Drivers
-- AMD: https://www.amd.com/en/support
-- NVIDIA: https://www.nvidia.com/Download/index.aspx
-
-## Build from Source
-
-1. Download and setup the Vulkan SDK from https://vulkan.lunarg.com/
-  - For Linux distributions, you can either get the essential build requirements from package manager
-```shell
-dnf install vulkan-headers vulkan-loader-devel
-```
-```shell
-apt-get install libvulkan-dev
-```
-```shell
-pacman -S vulkan-headers vulkan-icd-loader
-```
-
-2. Clone this project with all submodules
-
-```shell
-git clone https://github.com/nihui/waifu2x-ncnn-vulkan.git
-cd waifu2x-ncnn-vulkan
-git submodule update --init --recursive
-```
-
-3. Build with CMake
-  - You can pass -DUSE_STATIC_MOLTENVK=ON option to avoid linking the vulkan loader library on MacOS
-
-```shell
-mkdir build
-cd build
-cmake ../src
-cmake --build . -j 4
-```
-
-## Speed Comparison with waifu2x-caffe-cui
-
-### Environment
-
-- Windows 10 1809
-- AMD R7-1700
-- Nvidia GTX-1070
-- Nvidia driver 419.67
-- CUDA 10.1.105
-- cuDNN 10.1
-
-```powershell
-Measure-Command { waifu2x-ncnn-vulkan.exe -i input.png -o output.png -n 2 -s 2 -t [block size] -m [model dir] }
-```
-
-```powershell
-Measure-Command { waifu2x-caffe-cui.exe -t 0 --gpu 0 -b 1 -c [block size] -p cudnn --model_dir [model dir] -s 2 -n 2 -m noise_scale -i input.png -o output.png }
-```
-
-### cunet
-
-||Image Size|Target Size|Block Size|Total Time(s)|GPU Memory(MB)|
-|---|---|---|---|---|---|
-|waifu2x-ncnn-vulkan|200x200|400x400|400/200/100|0.86/0.86/0.82|638/638/197|
-|waifu2x-caffe-cui|200x200|400x400|400/200/100|2.54/2.39/2.36|3017/936/843|
-|waifu2x-ncnn-vulkan|400x400|800x800|400/200/100|1.17/1.04/1.02|2430/638/197|
-|waifu2x-caffe-cui|400x400|800x800|400/200/100|2.91/2.43/2.7|3202/1389/1178|
-|waifu2x-ncnn-vulkan|1000x1000|2000x2000|400/200/100|2.35/2.26/2.46|2430/638/197|
-|waifu2x-caffe-cui|1000x1000|2000x2000|400/200/100|4.04/3.79/4.35|3258/1582/1175|
-|waifu2x-ncnn-vulkan|2000x2000|4000x4000|400/200/100|6.46/6.59/7.49|2430/686/213|
-|waifu2x-caffe-cui|2000x2000|4000x4000|400/200/100|7.01/7.54/10.11|3258/1499/1200|
-|waifu2x-ncnn-vulkan|4000x4000|8000x8000|400/200/100|22.78/23.78/27.61|2448/654/213|
-|waifu2x-caffe-cui|4000x4000|8000x8000|400/200/100|18.45/21.85/31.82|3325/1652/1236|
-
-### upconv_7_anime_style_art_rgb
-
-||Image Size|Target Size|Block Size|Total Time(s)|GPU Memory(MB)|
-|---|---|---|---|---|---|
-|waifu2x-ncnn-vulkan|200x200|400x400|400/200/100|0.74/0.75/0.72|482/482/142|
-|waifu2x-caffe-cui|200x200|400x400|400/200/100|2.04/1.99/1.99|995/546/459|
-|waifu2x-ncnn-vulkan|400x400|800x800|400/200/100|0.95/0.83/0.81|1762/482/142|
-|waifu2x-caffe-cui|400x400|800x800|400/200/100|2.08/2.12/2.11|995/546/459|
-|waifu2x-ncnn-vulkan|1000x1000|2000x2000|400/200/100|1.52/1.41/1.44|1778/482/142|
-|waifu2x-caffe-cui|1000x1000|2000x2000|400/200/100|2.72/2.60/2.68|1015/570/459|
-|waifu2x-ncnn-vulkan|2000x2000|4000x4000|400/200/100|3.45/3.42/3.63|1778/482/142|
-|waifu2x-caffe-cui|2000x2000|4000x4000|400/200/100|3.90/4.01/4.35|1015/521/462|
-|waifu2x-ncnn-vulkan|4000x4000|8000x8000|400/200/100|11.16/11.29/12.07|1796/498/158|
-|waifu2x-caffe-cui|4000x4000|8000x8000|400/200/100|9.24/9.81/11.16|995/546/436|
-
-## Sample Images
-
-### Original Image
-
-![origin](images/0.jpg)
-
-### Upscale 2x with ImageMagick
-
-```shell
-convert origin.jpg -resize 200% output.png
-```
-
-![browser](images/1.png)
-
-### Upscale 2x with ImageMagick Lanczo4 Filter
-
-```shell
-convert origin.jpg -filter Lanczos -resize 200% output.png
-```
-
-![browser](images/4.png)
-
-### Upscale 2x with waifu2x noise=2 scale=2
-
-```shell
-waifu2x-ncnn-vulkan.exe -i origin.jpg -o output.png -n 2 -s 2
-```
-
-![waifu2x](images/2.png)
-
-## Original waifu2x Project
-
-- https://github.com/nagadomi/waifu2x
-- https://github.com/lltcggie/waifu2x-caffe
-
-## Other Open-Source Code Used
-
-- https://github.com/Tencent/ncnn for fast neural network inference on ALL PLATFORMS
-- https://github.com/webmproject/libwebp for encoding and decoding Webp images on ALL PLATFORMS
-- https://github.com/nothings/stb for decoding and encoding image on Linux / MacOS
-- https://github.com/tronkko/dirent for listing files in directory on Windows
diff --git a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_GPT_5.py b/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_GPT_5.py
deleted file mode 100644
index 34d68328bc81a46ce4c1fa733da97fb49ec8aa51..0000000000000000000000000000000000000000
--- a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_GPT_5.py
+++ /dev/null
@@ -1,575 +0,0 @@
-'''
-改造自 my_GPT4
-
-注意 RMSNorm 与 my_GPT 的不一样
-
-修改Flash注意力激活函数
-修改廉价 qk 为 普通qk,a注意力不除以任何值,保持原样,然后使用 softplus(x, 4) 为注意力激活函数
-
-与 my_GPT_3 进行比较快速随机序列拟合实验,可以发现本模型比 my_GPT_3 拟合速度快大约1.1倍
-但是训练速度又要慢一些,大约是0.9x
-
-'''
-import math
-import random
-import torch
-from torch.utils.checkpoint import checkpoint
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.backends.cuda
-import model_utils_torch
-from model_utils_torch import make_nlp_self_attn_mask,\
-    make_sinusoidal_position_embedding, make_sinusoidal_position_channel_embedding,\
-    apply_rotary_position_embedding,\
-    T5_RelativePositionEmbedding,\
-    weighted_and_neg_topk_cross_entropy
-import nlg_utils
-from typing import Optional, Union
-
-
-_TensorOptional = Optional[torch.Tensor]
-
-use_torch_softmax_attn = False
-use_xformers_softmax_attn = False
-if use_xformers_softmax_attn:
-    import xformers.ops
-
-
-@torch.jit.script
-def rms(x, dim: int, keepdim: bool, eps: float=1e-8):
-    return x.square().mean(dim=dim, keepdim=keepdim).add(eps).sqrt()
-
-
-class RmsNorm(torch.jit.ScriptModule):
-    def __init__(self, dim, eps=1e-8):
-        super().__init__()
-        self.weight = nn.Parameter(torch.ones(dim))
-        self.eps = eps
-
-    @torch.jit.script_method
-    def forward(self, x: torch.Tensor):
-        y = x / rms(x, -1, True, self.eps) * self.weight
-        return y
-
-
-@torch.jit.script
-def latent_rms_loss(
-    input: torch.Tensor,
-    dim: int,
-    mask: Optional[torch.Tensor]=None,
-    rms_bound: float=12.,
-    eps:float=1e-8,
-):
-    '''
-    用于潜变量的 RMS 损失,用于抑制潜变量的标准差越来越大的问题。
-    在深度加深的过程中,不同层的输出范围有点太大了,某些层的响应占了主要响应。
-    本loss希望通过抑制最终latent的rms,来阻止某些层过度响应
-
-    以下维度缩写,B 代表批量大小,C 代表词向量维度
-    :param input:       FloatTensor shape [B,...] , 模型的潜变量输出
-    :param dim:         int , 词向量在第几个维度
-    :param mask:        BoolTensor shape [B,...] or None , 目标的掩码,True代表参与计算,False代表忽略
-    :param rms_bound:   float, rms边界,rms小于该值的词向量不计算loss
-    :param eps:         float, 用于rms计算的eps
-    :return:
-    '''
-    v = rms(input, dim, False, eps=eps)
-    v = F.relu(v - rms_bound)
-
-    if mask is not None:
-        v = v[mask]
-    v = v[v>0]
-    if len(v) > 0:
-        v = v.mean()
-    else:
-        v = torch.zeros([], dtype=input.dtype, device=input.device)
-    return v
-
-
-@torch.jit.script
-def attn_act_limit_loss(
-    attn_list: list[torch.Tensor],
-):
-    '''
-    限制注意力值过大,仅用于非 softmax 激活,
-    以下维度缩写,B 代表批量大小,C 代表词向量维度
-    :param attn_list:  list[FloatTensor], tensor shape [B,...,H,qL,kL]
-    :return:
-    '''
-    loss = torch.zeros([], dtype=attn_list[0].dtype, device=attn_list[0].device)
-    # 注意,这里不拼接为一个大Tensor,可以重用注意力矩阵的值,从而避免大幅显存消耗
-    for a in attn_list:
-        limit = math.sqrt(sum(a.shape[-2:]))
-        a1 = F.relu(a - limit)
-        loss = loss + a1.flatten(-2).sum(-1).mean()
-    return loss
-
-
-class MultiGroupConvLayer(torch.jit.ScriptModule):
-    def __init__(self, in_dim, out_dim):
-        super().__init__()
-        assert in_dim % 4 == 0
-        assert out_dim % 4 == 0
-        in_sub_dim = in_dim // 4
-        out_sub_dim = out_dim // 4
-        self.conv1 = nn.Conv1d(in_sub_dim, out_sub_dim, 2, bias=False)
-        self.conv2 = nn.Conv1d(in_sub_dim, out_sub_dim, 4, bias=False)
-        self.conv3 = nn.Conv1d(in_sub_dim, out_sub_dim, 8, bias=False)
-        self.conv4 = nn.Conv1d(in_sub_dim, out_sub_dim, 16, bias=False)
-
-    @torch.jit.script_method
-    def forward(self, x: torch.Tensor):
-        # x shape [B, L, C]
-        x1, x2, x3, x4 = x.transpose(1, 2).chunk(4, 1)
-        # xs 4x [B, C//4, L]
-
-        x1 = F.pad(x1, [1, 0], 'replicate')
-        x2 = F.pad(x2, [3, 0], 'replicate')
-        x3 = F.pad(x3, [7, 0], 'replicate')
-        x4 = F.pad(x4, [15, 0], 'replicate')
-        y1 = self.conv1(x1) / math.sqrt(2)
-        y2 = self.conv2(x2) / math.sqrt(4)
-        y3 = self.conv3(x3) / math.sqrt(8)
-        y4 = self.conv4(x4) / math.sqrt(16)
-
-        y = torch.cat([y1,y2,y3,y4], 1).transpose(1, 2)
-        return y
-
-
-@torch.jit.script
-def laplacian_attn_fn(x):
-    """ https://arxiv.org/abs/2209.10655 claims this is more stable than Relu squared """
-    mu = math.sqrt(0.5)
-    std = math.sqrt(0.25 * math.pi)
-    return (1 + torch.special.erf((x - mu) / (std * math.sqrt(2)))) * 0.5
-
-
-@torch.jit.script
-def flash_quad_cross_attention(q, k, v, attn_mul: _TensorOptional=None, attn_bias: _TensorOptional=None, attn_act_fn: str='softplus4'):
-    '''
-    :param q:   shape [B,...,H,qL,C]
-    :param k:   shape [B,...,H,kL,C]
-    :param v:   shape [B,...,H,qL,C]
-    :param attn_mul:    shape [B,...,H,qL,kL]
-    :param attn_bias:   shape [B,...,H,qL,kL]
-    :param attn_act_fn:
-    :return:
-    '''
-    assert q.ndim >= 4
-    assert attn_act_fn in ('softplus4', 'softmax', 'laplacian', 'relu2'), 'Error! Invalid param attn_act_fn: ' + attn_act_fn
-
-    q = q * (q.shape[-1] ** -0.5)
-    a = q @ k.transpose(-1, -2).contiguous()
-    # qk shape [B, ..., H, qL, kL]
-
-    if attn_mul is not None:
-        a *= attn_mul
-
-    if attn_bias is not None:
-        a += attn_bias
-
-    if attn_act_fn == 'softplus4':
-        a = F.softplus(a, 4)
-    elif attn_act_fn == 'softmax':
-        a = F.softmax(a, -1)
-    elif attn_act_fn == 'laplacian':
-        a = laplacian_attn_fn(a)
-    elif attn_act_fn == 'relu2':
-        a = F.relu(a) ** 2
-
-    o = a @ v
-    return o, a
-
-
-@torch.jit.unused
-def xformers_flash_quad_cross_attention(q, k, v, attn_mul: _TensorOptional=None, attn_bias: _TensorOptional=None, attn_act_fn: str='softplus4'):
-    '''
-    :param q:   shape [B,...,H,qL,C]
-    :param k:   shape [B,...,H,kL,C]
-    :param v:   shape [B,...,H,qL,C]
-    :param attn_mul:    shape [B,...,H,qL,kL]
-    :param attn_bias:   shape [B,...,H,qL,kL]
-    :param attn_act_fn:
-    :return:
-    '''
-    assert q.ndim >= 4
-    assert attn_act_fn == 'softmax', 'Error! Only support attn_act_fn=softmax in fast path.'
-    assert attn_mul is None, 'Error! Not support param attn_mul.'
-    # qk shape [B, ..., H, qL, kL]
-
-    q = q.transpose(-2, -3)
-    k = k.transpose(-2, -3)
-    v = v.transpose(-2, -3)
-
-    if k.shape[1] % 8 != 0:
-        need_pad = 8 - k.shape[1] % 8
-        k = F.pad(k, [0, 0, 0, 0, 0, need_pad], 'replicate')
-        v = F.pad(v, [0, 0, 0, 0, 0, need_pad], 'replicate')
-        attn_bias = F.pad(attn_bias, [0, need_pad], 'constant', -1e6)
-
-    o = xformers.ops.memory_efficient_attention(q, k, v, attn_bias.type_as(q), 0.)
-    a = torch.zeros([0], device=q.device, dtype=q.dtype)
-
-    o = o.transpose(-2, -3)
-    return o, a
-
-
-@torch.jit.unused
-def torch_flash_quad_cross_attention(q, k, v, attn_mul: _TensorOptional=None, attn_bias: _TensorOptional=None, attn_act_fn: str='softplus4'):
-    '''
-    :param q:   shape [B,...,H,qL,C]
-    :param k:   shape [B,...,H,kL,C]
-    :param v:   shape [B,...,H,qL,C]
-    :param attn_mul:    shape [B,...,H,qL,kL]
-    :param attn_bias:   shape [B,...,H,qL,kL]
-    :param attn_act_fn:
-    :return:
-    '''
-    assert q.ndim >= 4
-    assert attn_act_fn == 'softmax', 'Error! Only support attn_act_fn=softmax in fast path.'
-    assert attn_mul is None, 'Error! Not support param attn_mul.'
-    # assert attn_bias is None, 'Error! Not support param attn_bias.'
-    # qk shape [B, ..., H, qL, kL]
-
-    with torch.backends.cuda.sdp_kernel(enable_mem_efficient=True, enable_flash=False, enable_math=False):
-        # o = F.scaled_dot_product_attention(q, k, v, attn_bias)
-        o = F.scaled_dot_product_attention(q, k, v, None, is_causal=True)
-    a = torch.zeros([0], device=q.device, dtype=q.dtype)
-
-    return o, a
-
-
-class FlashQuadSelfAttention(torch.jit.ScriptModule):
-    __constants__ = ['attn_act_fn']
-
-    def __init__(self, in_dim, out_dim, n_head, expand_head_dim, squeeze_head_dim, attn_act_fn='softplus4', use_rotary_pos_emb=False, linear_layer_nobias=False, use_group_conv=True):
-        super().__init__()
-
-        self.attn_act_fn = attn_act_fn
-        self.use_rotary_pos_emb = use_rotary_pos_emb
-
-        self.n_head = n_head
-        self.expand_head_dim = expand_head_dim
-        self.squeeze_head_dim = squeeze_head_dim
-
-        expand_dim = n_head * expand_head_dim
-        squeeze_dim = n_head * squeeze_head_dim
-
-        if use_group_conv:
-            self.u_m = MultiGroupConvLayer(in_dim, expand_dim)
-        else:
-            self.u_m = nn.Linear(in_dim, expand_dim)
-
-        self.qkv_dims = (squeeze_dim, squeeze_dim, expand_dim)
-        self.qkv_m = nn.Linear(in_dim, sum(self.qkv_dims), bias=not linear_layer_nobias)
-
-        # 不缩放了,发现缩放后收敛很困难
-        # 缩放 qkv 到足够小,并且均为正数
-        # nn.init.uniform_(self.qkv_m.weight.data[:squeeze_dim], 0, 1 / math.sqrt(squeeze_dim))
-        # nn.init.uniform_(self.qkv_m.weight.data[squeeze_dim:squeeze_dim*2], 0, 1 / math.sqrt(squeeze_dim))
-        # nn.init.uniform_(self.qkv_m.weight.data[squeeze_dim*2:], 0, 1 / math.sqrt(expand_dim))
-
-        self.out = nn.Linear(expand_dim, out_dim, bias=not linear_layer_nobias)
-
-        if use_rotary_pos_emb:
-            self.register_buffer('rotary_ch_emb', make_sinusoidal_position_channel_embedding(squeeze_head_dim, 10000))
-        else:
-            self.register_buffer('rotary_ch_emb', None)
-
-    # @torch.jit.script_method
-    def forward(self, x, attn_mul: _TensorOptional=None, attn_bias: _TensorOptional=None, rotary_pos_start: int=0, rotary_pos_scale: int=1):
-        B, L, C = x.shape
-        y = x
-
-        u = self.u_m(y)
-
-        qkv = self.qkv_m(y)
-        q, k, v = torch.split_with_sizes(qkv, self.qkv_dims, -1)
-
-        q = q.reshape(B, L, self.n_head, self.squeeze_head_dim).transpose(1, 2)
-        k = k.reshape(B, L, self.n_head, self.squeeze_head_dim).transpose(1, 2)
-        v = v.reshape(B, L, self.n_head, self.expand_head_dim).transpose(1, 2)
-
-        if self.use_rotary_pos_emb:
-            qL, kL = q.shape[-2], k.shape[-2]
-            pos_emb = make_sinusoidal_position_embedding(max(qL, kL), pos_ch=max(q.shape[-1], k.shape[-1]), pos_start=rotary_pos_start, pos_scale=rotary_pos_scale, device=q.device, ch_emb=self.rotary_ch_emb)
-            q = apply_rotary_position_embedding(q, pos_emb[:qL, :q.shape[-1]])
-            k = apply_rotary_position_embedding(k, pos_emb[:kL, :k.shape[-1]])
-
-        if self.attn_act_fn == 'softmax' and use_xformers_softmax_attn:
-            o, a = xformers_flash_quad_cross_attention(q, k, v, attn_mul, attn_bias, self.attn_act_fn)
-        elif self.attn_act_fn == 'softmax' and use_torch_softmax_attn and self.use_rotary_pos_emb:
-            o, a = torch_flash_quad_cross_attention(q, k, v, attn_mul, attn_bias, self.attn_act_fn)
-        else:
-            o, a = flash_quad_cross_attention(q, k, v, attn_mul, attn_bias, self.attn_act_fn)
-        o = o.transpose(1, 2).reshape(B, L, self.n_head * self.expand_head_dim)
-        y = F.silu(u, inplace=True) * o
-        y = self.out(y)
-        return y, a
-
-
-class myT5_DecoderBlock(nn.Module):
-    def __init__(self, in_dim, n_head, expand_head_dim, squeeze_head_dim, attn_act_fn, use_rotary_pos_emb, use_group_conv):
-        super().__init__()
-        self.attn_norm = RmsNorm(in_dim)
-        self.attn = FlashQuadSelfAttention(in_dim, in_dim, n_head, expand_head_dim, squeeze_head_dim, attn_act_fn, use_rotary_pos_emb, True, use_group_conv=use_group_conv)
-
-    def forward(self, x, x_attn_bias, rotary_pos_start, rotary_pos_scale):
-        y = self.attn_norm(x)
-        y2, a = self.attn(y, attn_bias=x_attn_bias, rotary_pos_start=rotary_pos_start, rotary_pos_scale=rotary_pos_scale)
-        y3 = x + y2
-        # if y.isnan().sum() > 0:
-        #     print('1', y.shape)
-        return y3, a
-
-
-class GPT(nn.Module):
-    def __init__(self, token_dim=-1, hidden_dim=512, n_head=8, expand_head_dim=48, squeeze_head_dim=24, attn_act_fn='softmax', vocab_size=5000, n_decoder=4,
-                 use_rotary_pos_emb=False, use_rel_pos_emb=True, use_random_pos=False, checkpoint_group_size=None, use_group_conv=True, *, _debug=False):
-        super().__init__()
-        self._debug = _debug
-
-        self.token_emb = nn.Embedding(vocab_size, hidden_dim)
-        # 使用 F.normalize 效果更好
-        # self.token_emb.weight.data = torch.randn_like(self.token_emb.weight.data) * 0.02
-        self.token_emb.weight.data = F.normalize(self.token_emb.weight.data, 2, -1)
-
-        # self.emb_up = nn.Linear(token_dim, hidden_dim)
-        # self.emb_down = nn.Linear(hidden_dim, token_dim)
-
-        self.use_rotary_pos_emb = use_rotary_pos_emb
-        self.use_rel_pos_emb = use_rel_pos_emb
-        self.use_random_pos = use_random_pos
-
-        self.float_min = -1e6
-        self.n_head = n_head
-        self.attn_act_fn = attn_act_fn
-
-        self.decoders = nn.ModuleList([
-            myT5_DecoderBlock(hidden_dim, n_head, expand_head_dim, squeeze_head_dim, attn_act_fn, use_rotary_pos_emb=use_rotary_pos_emb, use_group_conv=use_group_conv)
-            for _ in range(n_decoder)
-        ])
-        self.decoder_norm = RmsNorm(hidden_dim)
-
-        # self.lm_head = nn.Linear(hidden_dim, vocab_size, False)
-
-        if use_rel_pos_emb:
-            self.dec_rel_pos_emb = T5_RelativePositionEmbedding(n_head, 64, 256, bidirectional=False)
-
-        # 使用 checkpoint 节省显存
-        if checkpoint_group_size is not None:
-            checkpoint_group = []
-            for idx in range(int(math.ceil(len(self.decoders) / checkpoint_group_size))):
-                checkpoint_group.append(list(self.decoders[idx:idx+checkpoint_group_size]))
-            self.__dict__['checkpoint_group'] = checkpoint_group
-
-        # 使用共享嵌入效果更好
-
-    def decode_func(self, x, x_mask, pos_scale, pos_bias):
-        # 解码器部分
-        _, dec_L = x.shape
-
-        # 生成解码器掩码
-        dec_mask_self = make_nlp_self_attn_mask(x_mask, bidirectional=False)[:,None].repeat(1, self.n_head, 1, 1)
-        # B, H, L, L
-
-        # 应用解码器相对位置偏置
-        if self.use_rel_pos_emb:
-            dec_pos_emb_self = self.dec_rel_pos_emb(dec_L, dec_L, rel_scale=pos_scale, rel_bias=pos_bias).permute(2, 0, 1)[None]
-        else:
-            dec_pos_emb_self = 0
-
-        dec_mask_self = torch.where(dec_mask_self, dec_pos_emb_self, self.float_min)
-
-        y = self.token_emb(x)
-        # y = self.emb_up(y)
-
-        attn_out = []
-
-        if y.requires_grad and y.device.type != 'cpu' and self.training and hasattr(self, 'checkpoint_group'):
-            for g in self.checkpoint_group:
-                def ckp(y, dec_mask_self, pos_bias, pos_scale):
-                    for m in g:
-                        y, a = m(y, dec_mask_self, rotary_pos_start=pos_bias, rotary_pos_scale=pos_scale)
-                        attn_out.append(a)
-                    return y
-                y = checkpoint(ckp, y, dec_mask_self, pos_bias, pos_scale, use_reentrant=False, preserve_rng_state=False)
-
-        else:
-            for m_i, m in enumerate(self.decoders):
-                c_y, c_a = m(y, dec_mask_self, rotary_pos_start=pos_bias, rotary_pos_scale=pos_scale)
-                # if ty.isnan().sum() >0:
-                #     print(ty.shape)
-                #     print(m(y, dec_mask_self, rotary_pos_start=pos_bias, rotary_pos_scale=pos_scale).isnan().sum())
-                attn_out.append(c_a)
-                y = c_y
-
-        # y2 = self.emb_down(y2)
-        dec_latent = y
-        dec_out = self.decoder_norm(y)
-        return dec_out, dec_latent, attn_out
-
-    def decode_func_cycle(self, x, x_mask, stop_tokens, pos_scale, pos_bias, max_len=512, top_k=10, top_p=0.9, temperature=1.):
-        # 解码器自解码部分
-        # 要求批量为 1
-        assert x.shape[0] == x_mask.shape[0] == 1
-        assert x_mask.dtype == torch.bool
-
-        x = x[:, x_mask.reshape(-1)]
-        # shape [1, L]
-
-        out_tokens = torch.zeros([0], dtype=torch.long, device=x.device)
-
-        while True:
-            cat_x = torch.cat([x, out_tokens[None,]], 1)
-            cat_x_mask = torch.ones_like(cat_x, dtype=torch.bool)
-
-            dec_out, _, _ = self.decode_func(cat_x, cat_x_mask, pos_scale, pos_bias)
-
-            out_logit = F.linear(dec_out[0, -1, :], self.token_emb.weight)
-            # 进行采样
-            out_prob = nlg_utils.nlg_softmax_prob(out_logit, temperature)
-            out_prob = nlg_utils.nlg_prob_decay(out_prob, out_tokens, watch_len=10)
-            out_char = nlg_utils.nlg_sample(out_prob, top_k, top_p)
-
-            out_tokens = torch.cat([out_tokens, out_char], 0)
-
-            if out_char.item() in stop_tokens or out_tokens.shape[0] >= max_len:
-                break
-
-        return out_tokens
-
-    def gen(self, x, stop_tokens, max_len, pad_token, x_mask=None, top_k=10, top_p=0.9, temperature=1.):
-        # 时间步 生成
-        if isinstance(stop_tokens, int):
-            stop_tokens = {stop_tokens}
-
-        with torch.inference_mode():
-            pos_scale = 1
-            pos_bias = 0
-
-            if x_mask is None:
-                x_mask = torch.ones_like(x, dtype=torch.bool)
-
-            out = []
-            for x_1, x_mask_1 in zip(x, x_mask):
-                out_1 = self.decode_func_cycle(x_1[None], x_mask_1[None], stop_tokens=stop_tokens,
-                                               pos_scale=pos_scale, pos_bias=pos_bias, max_len=max_len, top_k=top_k, top_p=top_p, temperature=temperature)
-                out.append(out_1)
-
-            out = torch.nested.as_nested_tensor(out)
-            out = torch.nested.to_padded_tensor(out, pad_token)
-        return out
-
-    pred = gen
-
-    def forward(self, x, label=None, label_mask=None, label_weight=None, label_smoothing=0., x_mask=None):
-        pos_scale = 1
-        pos_bias = 0
-        if self.training and self.use_random_pos:
-            # pos_scale = random.randint(1, 8)
-            pos_bias = random.randint(0, 8)
-
-        if x_mask is None:
-            x_mask = torch.ones_like(x, dtype=torch.bool)
-
-        dec_out, dec_latent, attn_out = self.decode_func(x, x_mask, pos_scale=pos_scale, pos_bias=pos_bias)
-        out = F.linear(dec_out, self.token_emb.weight)
-
-        loss_dict = None
-        if label is not None:
-
-            assert label.shape == x.shape
-
-            topk = 10
-            cls_loss = weighted_and_neg_topk_cross_entropy(out.transpose(1, 2), label.long(), topk, label_weight, label_mask, label_smoothing)
-            # cls_loss 损失。准确率-损失,参考表:8.9-0% | 5.6-1% | 3.6-22% | 1.8-60% | 0.7-90% | 0.05-98%
-            # 该参考表用于考虑下方的调和函数。
-            loss = cls_loss
-
-            z_loss = latent_rms_loss(dec_latent, dim=-1, mask=label_mask, rms_bound=len(self.decoders))
-            # 由于 rms_loss 内部会进行选择,往往一旦出现,rms_loss值就为0.2-0.7,使用0.02的加权值
-            loss = loss + z_loss * 0.02
-
-            # softmax 激活时,注意力损失将始终为 0,跳过计算
-            if self.attn_act_fn == 'softmax':
-                a_loss = 0.
-            else:
-                a_loss = attn_act_limit_loss(attn_out)
-                # a_loss 的值往往在 0.004-0.01,所以使用0.1的加权值
-                loss = loss + a_loss * 0.1
-
-            loss_dict = {
-                'loss': loss,
-                'cls_loss': cls_loss,
-                'z_loss': z_loss,
-                'a_loss': a_loss,
-            }
-
-            if self._debug:
-                acc = (out.data.argmax(-1) == label).type(torch.float32).mean().item()
-                print(f'{cls_loss=:.5f} {z_loss=:.5f} {a_loss=:.5f} {acc=:.5f}', flush=False)
-
-        return out, loss_dict
-
-
-if __name__ == '__main__':
-    device = 'cuda:0'
-
-    # 1.13 False
-
-    # net = GPT(token_dim=-1, hidden_dim=512, n_head=12, expand_head_dim=64, squeeze_head_dim=22, attn_act_fn='softmax', vocab_size=5000, n_decoder=12,
-    #           use_rotary_pos_emb=False, use_rel_pos_emb=True, use_random_pos=True, checkpoint_group_size=None, _debug=True)
-    net = GPT(token_dim=-1, hidden_dim=512, n_head=8, expand_head_dim=128, squeeze_head_dim=64, attn_act_fn='relu2', vocab_size=5000, n_decoder=12,
-              use_rotary_pos_emb=True, use_rel_pos_emb=False, use_random_pos=True, checkpoint_group_size=None, use_group_conv=True, _debug=True)
-
-    net.to(device)
-
-    model_utils_torch.print_params_size(net)
-    model_utils_torch.print_buffers_size(net)
-
-    x = torch.as_tensor([[0,1,2,3,4,5,6,7], [1,2,3,4,5,6,7,8]], dtype=torch.long, device=device)
-
-    with torch.no_grad():
-        y_vec, _ = net(x)
-
-    with torch.no_grad():
-        # 时间步 循环生成
-        y_vec = net.gen(x, 0, 12, 0)
-
-    print(y_vec.shape)
-
-    # ---------
-    from model_utils_torch import Adan
-
-    optim = Adan(net.parameters(), 1e-4)
-
-    # 7.8G 1.13.1 False
-    # train_xs = torch.randint(1, 100, [5000, 950]).cuda()
-    # 7.?G -> 5.9G 1.13.1 True
-    # train_xs = torch.randint(1, 100, [5000, 900]).cuda()
-
-    # 7.9G 2.0.0 False
-    # train_xs = torch.randint(1, 100, [5000, 950]).cuda()
-    # 7.9G 2.0.0 True
-    train_xs = torch.randint(1, 100, [5000, 320]).cuda()
-
-    train_ys = torch.roll(train_xs, -1, 1)
-
-    net.cuda()
-
-    for it in range(200000):
-        print(it, end=' ', flush=False)
-        ids = torch.randint(0, 2000, [16]).cuda()
-
-        xs = train_xs[ids]
-        ys = train_ys[ids]
-
-        y, loss = net(xs, ys)
-        optim.zero_grad()
-        loss['loss'].backward()
-        optim.step()
-        # print(it, loss.item())
-
-        if it > 7000:
-            break
diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Airo Wizard 1.0 Beta Revision 250 13 What You Need to Know About the Latest Version of Airo.md b/spaces/usbethFlerru/sovits-modelsV2/example/Airo Wizard 1.0 Beta Revision 250 13 What You Need to Know About the Latest Version of Airo.md
deleted file mode 100644
index a741fd2f710150581bd61a26fdbab21ee9a64c0c..0000000000000000000000000000000000000000
--- a/spaces/usbethFlerru/sovits-modelsV2/example/Airo Wizard 1.0 Beta Revision 250 13 What You Need to Know About the Latest Version of Airo.md	
+++ /dev/null
@@ -1,6 +0,0 @@
-<h2>airo wizard 1.0 beta revision 250 13</h2><br /><p><b><b>DOWNLOAD</b> ---> <a href="https://urlcod.com/2uyU98">https://urlcod.com/2uyU98</a></b></p><br /><br />
-<br />
- aaccfb2cb3<br />
-<br />
-<br />
-<p></p>
diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/tracker/__init__.py b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/tracker/__init__.py
deleted file mode 100644
index 13d3903e763e1fbb21c1064f6bffdafcca08e9e6..0000000000000000000000000000000000000000
--- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/tracker/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# Ultralytics YOLO 🚀, AGPL-3.0 license
-
-from .track import register_tracker
-from .trackers import BOTSORT, BYTETracker
-
-__all__ = 'register_tracker', 'BOTSORT', 'BYTETracker'  # allow simpler import
diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/tracker/trackers/basetrack.py b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/tracker/trackers/basetrack.py
deleted file mode 100644
index 3c7b0f707508d92699b9a2f5c3d4500006e9faa5..0000000000000000000000000000000000000000
--- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/tracker/trackers/basetrack.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Ultralytics YOLO 🚀, AGPL-3.0 license
-
-from collections import OrderedDict
-
-import numpy as np
-
-
-class TrackState:
-    """Enumeration of possible object tracking states."""
-
-    New = 0
-    Tracked = 1
-    Lost = 2
-    Removed = 3
-
-
-class BaseTrack:
-    """Base class for object tracking, handling basic track attributes and operations."""
-
-    _count = 0
-
-    track_id = 0
-    is_activated = False
-    state = TrackState.New
-
-    history = OrderedDict()
-    features = []
-    curr_feature = None
-    score = 0
-    start_frame = 0
-    frame_id = 0
-    time_since_update = 0
-
-    # Multi-camera
-    location = (np.inf, np.inf)
-
-    @property
-    def end_frame(self):
-        """Return the last frame ID of the track."""
-        return self.frame_id
-
-    @staticmethod
-    def next_id():
-        """Increment and return the global track ID counter."""
-        BaseTrack._count += 1
-        return BaseTrack._count
-
-    def activate(self, *args):
-        """Activate the track with the provided arguments."""
-        raise NotImplementedError
-
-    def predict(self):
-        """Predict the next state of the track."""
-        raise NotImplementedError
-
-    def update(self, *args, **kwargs):
-        """Update the track with new observations."""
-        raise NotImplementedError
-
-    def mark_lost(self):
-        """Mark the track as lost."""
-        self.state = TrackState.Lost
-
-    def mark_removed(self):
-        """Mark the track as removed."""
-        self.state = TrackState.Removed
-
-    @staticmethod
-    def reset_id():
-        """Reset the global track ID counter."""
-        BaseTrack._count = 0
diff --git a/spaces/vinay123/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/ms_deform_attn.py b/spaces/vinay123/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/ms_deform_attn.py
deleted file mode 100644
index 489d501bef364020212306d81e9b85c8daa27491..0000000000000000000000000000000000000000
--- a/spaces/vinay123/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/ms_deform_attn.py
+++ /dev/null
@@ -1,413 +0,0 @@
-# ------------------------------------------------------------------------
-# Grounding DINO
-# url: https://github.com/IDEA-Research/GroundingDINO
-# Copyright (c) 2023 IDEA. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------
-# Deformable DETR
-# Copyright (c) 2020 SenseTime. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------------------------------
-# Modified from:
-# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/functions/ms_deform_attn_func.py
-# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py
-# https://github.com/open-mmlab/mmcv/blob/master/mmcv/ops/multi_scale_deform_attn.py
-# ------------------------------------------------------------------------------------------------
-
-import math
-import warnings
-from typing import Optional
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torch.autograd import Function
-from torch.autograd.function import once_differentiable
-from torch.nn.init import constant_, xavier_uniform_
-
-try:
-    from groundingdino import _C
-except:
-    warnings.warn("Failed to load custom C++ ops. Running on CPU mode Only!")
-
-
-# helpers
-def _is_power_of_2(n):
-    if (not isinstance(n, int)) or (n < 0):
-        raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n)))
-    return (n & (n - 1) == 0) and n != 0
-
-
-class MultiScaleDeformableAttnFunction(Function):
-    @staticmethod
-    def forward(
-        ctx,
-        value,
-        value_spatial_shapes,
-        value_level_start_index,
-        sampling_locations,
-        attention_weights,
-        im2col_step,
-    ):
-        ctx.im2col_step = im2col_step
-        output = _C.ms_deform_attn_forward(
-            value,
-            value_spatial_shapes,
-            value_level_start_index,
-            sampling_locations,
-            attention_weights,
-            ctx.im2col_step,
-        )
-        ctx.save_for_backward(
-            value,
-            value_spatial_shapes,
-            value_level_start_index,
-            sampling_locations,
-            attention_weights,
-        )
-        return output
-
-    @staticmethod
-    @once_differentiable
-    def backward(ctx, grad_output):
-        (
-            value,
-            value_spatial_shapes,
-            value_level_start_index,
-            sampling_locations,
-            attention_weights,
-        ) = ctx.saved_tensors
-        grad_value, grad_sampling_loc, grad_attn_weight = _C.ms_deform_attn_backward(
-            value,
-            value_spatial_shapes,
-            value_level_start_index,
-            sampling_locations,
-            attention_weights,
-            grad_output,
-            ctx.im2col_step,
-        )
-
-        return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
-
-
-def multi_scale_deformable_attn_pytorch(
-    value: torch.Tensor,
-    value_spatial_shapes: torch.Tensor,
-    sampling_locations: torch.Tensor,
-    attention_weights: torch.Tensor,
-) -> torch.Tensor:
-
-    bs, _, num_heads, embed_dims = value.shape
-    _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape
-    value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
-    sampling_grids = 2 * sampling_locations - 1
-    sampling_value_list = []
-    for level, (H_, W_) in enumerate(value_spatial_shapes):
-        # bs, H_*W_, num_heads, embed_dims ->
-        # bs, H_*W_, num_heads*embed_dims ->
-        # bs, num_heads*embed_dims, H_*W_ ->
-        # bs*num_heads, embed_dims, H_, W_
-        value_l_ = (
-            value_list[level].flatten(2).transpose(1, 2).reshape(bs * num_heads, embed_dims, H_, W_)
-        )
-        # bs, num_queries, num_heads, num_points, 2 ->
-        # bs, num_heads, num_queries, num_points, 2 ->
-        # bs*num_heads, num_queries, num_points, 2
-        sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(1, 2).flatten(0, 1)
-        # bs*num_heads, embed_dims, num_queries, num_points
-        sampling_value_l_ = F.grid_sample(
-            value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False
-        )
-        sampling_value_list.append(sampling_value_l_)
-    # (bs, num_queries, num_heads, num_levels, num_points) ->
-    # (bs, num_heads, num_queries, num_levels, num_points) ->
-    # (bs, num_heads, 1, num_queries, num_levels*num_points)
-    attention_weights = attention_weights.transpose(1, 2).reshape(
-        bs * num_heads, 1, num_queries, num_levels * num_points
-    )
-    output = (
-        (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights)
-        .sum(-1)
-        .view(bs, num_heads * embed_dims, num_queries)
-    )
-    return output.transpose(1, 2).contiguous()
-
-
-class MultiScaleDeformableAttention(nn.Module):
-    """Multi-Scale Deformable Attention Module used in Deformable-DETR
-
-    `Deformable DETR: Deformable Transformers for End-to-End Object Detection.
-    <https://arxiv.org/pdf/2010.04159.pdf>`_.
-
-    Args:
-        embed_dim (int): The embedding dimension of Attention. Default: 256.
-        num_heads (int): The number of attention heads. Default: 8.
-        num_levels (int): The number of feature map used in Attention. Default: 4.
-        num_points (int): The number of sampling points for each query
-            in each head. Default: 4.
-        img2col_steps (int): The step used in image_to_column. Defualt: 64.
-            dropout (float): Dropout layer used in output. Default: 0.1.
-        batch_first (bool): if ``True``, then the input and output tensor will be
-            provided as `(bs, n, embed_dim)`. Default: False. `(n, bs, embed_dim)`
-    """
-
-    def __init__(
-        self,
-        embed_dim: int = 256,
-        num_heads: int = 8,
-        num_levels: int = 4,
-        num_points: int = 4,
-        img2col_step: int = 64,
-        batch_first: bool = False,
-    ):
-        super().__init__()
-        if embed_dim % num_heads != 0:
-            raise ValueError(
-                "embed_dim must be divisible by num_heads, but got {} and {}".format(
-                    embed_dim, num_heads
-                )
-            )
-        head_dim = embed_dim // num_heads
-
-        self.batch_first = batch_first
-
-        if not _is_power_of_2(head_dim):
-            warnings.warn(
-                """
-                You'd better set d_model in MSDeformAttn to make sure that
-                each dim of the attention head a power of 2, which is more efficient.
-                """
-            )
-
-        self.im2col_step = img2col_step
-        self.embed_dim = embed_dim
-        self.num_heads = num_heads
-        self.num_levels = num_levels
-        self.num_points = num_points
-        self.sampling_offsets = nn.Linear(embed_dim, num_heads * num_levels * num_points * 2)
-        self.attention_weights = nn.Linear(embed_dim, num_heads * num_levels * num_points)
-        self.value_proj = nn.Linear(embed_dim, embed_dim)
-        self.output_proj = nn.Linear(embed_dim, embed_dim)
-
-        self.init_weights()
-
-    def _reset_parameters(self):
-        return self.init_weights()
-
-    def init_weights(self):
-        """
-        Default initialization for Parameters of Module.
-        """
-        constant_(self.sampling_offsets.weight.data, 0.0)
-        thetas = torch.arange(self.num_heads, dtype=torch.float32) * (
-            2.0 * math.pi / self.num_heads
-        )
-        grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
-        grid_init = (
-            (grid_init / grid_init.abs().max(-1, keepdim=True)[0])
-            .view(self.num_heads, 1, 1, 2)
-            .repeat(1, self.num_levels, self.num_points, 1)
-        )
-        for i in range(self.num_points):
-            grid_init[:, :, i, :] *= i + 1
-        with torch.no_grad():
-            self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
-        constant_(self.attention_weights.weight.data, 0.0)
-        constant_(self.attention_weights.bias.data, 0.0)
-        xavier_uniform_(self.value_proj.weight.data)
-        constant_(self.value_proj.bias.data, 0.0)
-        xavier_uniform_(self.output_proj.weight.data)
-        constant_(self.output_proj.bias.data, 0.0)
-
-    def freeze_sampling_offsets(self):
-        print("Freeze sampling offsets")
-        self.sampling_offsets.weight.requires_grad = False
-        self.sampling_offsets.bias.requires_grad = False
-
-    def freeze_attention_weights(self):
-        print("Freeze attention weights")
-        self.attention_weights.weight.requires_grad = False
-        self.attention_weights.bias.requires_grad = False
-
-    def forward(
-        self,
-        query: torch.Tensor,
-        key: Optional[torch.Tensor] = None,
-        value: Optional[torch.Tensor] = None,
-        query_pos: Optional[torch.Tensor] = None,
-        key_padding_mask: Optional[torch.Tensor] = None,
-        reference_points: Optional[torch.Tensor] = None,
-        spatial_shapes: Optional[torch.Tensor] = None,
-        level_start_index: Optional[torch.Tensor] = None,
-        **kwargs
-    ) -> torch.Tensor:
-
-        """Forward Function of MultiScaleDeformableAttention
-
-        Args:
-            query (torch.Tensor): Query embeddings with shape
-                `(num_query, bs, embed_dim)`
-            key (torch.Tensor): Key embeddings with shape
-                `(num_key, bs, embed_dim)`
-            value (torch.Tensor): Value embeddings with shape
-                `(num_key, bs, embed_dim)`
-            query_pos (torch.Tensor): The position embedding for `query`. Default: None.
-            key_padding_mask (torch.Tensor): ByteTensor for `query`, with shape `(bs, num_key)`,
-                indicating which elements within `key` to be ignored in attention.
-            reference_points (torch.Tensor): The normalized reference points
-                with shape `(bs, num_query, num_levels, 2)`,
-                all elements is range in [0, 1], top-left (0, 0),
-                bottom-right (1, 1), including padding are.
-                or `(N, Length_{query}, num_levels, 4)`, add additional
-                two dimensions `(h, w)` to form reference boxes.
-            spatial_shapes (torch.Tensor): Spatial shape of features in different levels.
-                With shape `(num_levels, 2)`, last dimension represents `(h, w)`.
-            level_start_index (torch.Tensor): The start index of each level. A tensor with
-                shape `(num_levels, )` which can be represented as
-                `[0, h_0 * w_0, h_0 * w_0 + h_1 * w_1, ...]`.
-
-        Returns:
-            torch.Tensor: forward results with shape `(num_query, bs, embed_dim)`
-        """
-
-        if value is None:
-            value = query
-
-        if query_pos is not None:
-            query = query + query_pos
-
-        if not self.batch_first:
-            # change to (bs, num_query ,embed_dims)
-            query = query.permute(1, 0, 2)
-            value = value.permute(1, 0, 2)
-
-        bs, num_query, _ = query.shape
-        bs, num_value, _ = value.shape
-
-        assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value
-
-        value = self.value_proj(value)
-        if key_padding_mask is not None:
-            value = value.masked_fill(key_padding_mask[..., None], float(0))
-        value = value.view(bs, num_value, self.num_heads, -1)
-        sampling_offsets = self.sampling_offsets(query).view(
-            bs, num_query, self.num_heads, self.num_levels, self.num_points, 2
-        )
-        attention_weights = self.attention_weights(query).view(
-            bs, num_query, self.num_heads, self.num_levels * self.num_points
-        )
-        attention_weights = attention_weights.softmax(-1)
-        attention_weights = attention_weights.view(
-            bs,
-            num_query,
-            self.num_heads,
-            self.num_levels,
-            self.num_points,
-        )
-
-        # bs, num_query, num_heads, num_levels, num_points, 2
-        if reference_points.shape[-1] == 2:
-            offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
-            sampling_locations = (
-                reference_points[:, :, None, :, None, :]
-                + sampling_offsets / offset_normalizer[None, None, None, :, None, :]
-            )
-        elif reference_points.shape[-1] == 4:
-            sampling_locations = (
-                reference_points[:, :, None, :, None, :2]
-                + sampling_offsets
-                / self.num_points
-                * reference_points[:, :, None, :, None, 2:]
-                * 0.5
-            )
-        else:
-            raise ValueError(
-                "Last dim of reference_points must be 2 or 4, but get {} instead.".format(
-                    reference_points.shape[-1]
-                )
-            )
-    
-        if torch.cuda.is_available() and value.is_cuda:
-            halffloat = False
-            if value.dtype == torch.float16:
-                halffloat = True
-                value = value.float()
-                sampling_locations = sampling_locations.float()
-                attention_weights = attention_weights.float()
-
-            output = MultiScaleDeformableAttnFunction.apply(
-                value,
-                spatial_shapes,
-                level_start_index,
-                sampling_locations,
-                attention_weights,
-                self.im2col_step,
-            )
-
-            if halffloat:
-                output = output.half()
-        else:
-            output = multi_scale_deformable_attn_pytorch(
-                value, spatial_shapes, sampling_locations, attention_weights
-            )
-
-        output = self.output_proj(output)
-
-        if not self.batch_first:
-            output = output.permute(1, 0, 2)
-
-        return output
-
-
-def create_dummy_class(klass, dependency, message=""):
-    """
-    When a dependency of a class is not available, create a dummy class which throws ImportError
-    when used.
-
-    Args:
-        klass (str): name of the class.
-        dependency (str): name of the dependency.
-        message: extra message to print
-    Returns:
-        class: a class object
-    """
-    err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, klass)
-    if message:
-        err = err + " " + message
-
-    class _DummyMetaClass(type):
-        # throw error on class attribute access
-        def __getattr__(_, __):  # noqa: B902
-            raise ImportError(err)
-
-    class _Dummy(object, metaclass=_DummyMetaClass):
-        # throw error on constructor
-        def __init__(self, *args, **kwargs):
-            raise ImportError(err)
-
-    return _Dummy
-
-
-def create_dummy_func(func, dependency, message=""):
-    """
-    When a dependency of a function is not available, create a dummy function which throws
-    ImportError when used.
-
-    Args:
-        func (str): name of the function.
-        dependency (str or list[str]): name(s) of the dependency.
-        message: extra message to print
-    Returns:
-        function: a function object
-    """
-    err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, func)
-    if message:
-        err = err + " " + message
-
-    if isinstance(dependency, (list, tuple)):
-        dependency = ",".join(dependency)
-
-    def _dummy(*args, **kwargs):
-        raise ImportError(err)
-
-    return _dummy
diff --git a/spaces/vumichien/Generate_human_motion/pyrender/tests/conftest.py b/spaces/vumichien/Generate_human_motion/pyrender/tests/conftest.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/weizmannscience/multidiffusion-region-based/sketch_helper.py b/spaces/weizmannscience/multidiffusion-region-based/sketch_helper.py
deleted file mode 100644
index d05d5cec954eb73122b19f856d8285cceb4d96d9..0000000000000000000000000000000000000000
--- a/spaces/weizmannscience/multidiffusion-region-based/sketch_helper.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import numpy as np
-import cv2
-from PIL import Image
-from skimage.color import rgb2lab
-from skimage.color import lab2rgb
-from sklearn.cluster import KMeans
-
-    
-def count_high_freq_colors(image):
-    im = image.getcolors(maxcolors=1024*1024)
-    sorted_colors = sorted(im, key=lambda x: x[0], reverse=True)
-
-    freqs = [c[0] for c in sorted_colors]
-    mean_freq = sum(freqs) / len(freqs)
-
-    high_freq_colors = [c for c in sorted_colors if c[0] > max(2, mean_freq*1.25)]
-    return high_freq_colors
-
-def get_high_freq_colors(image, similarity_threshold=30):
-    image_copy = image.copy()
-    high_freq_colors = count_high_freq_colors(image)
-    # Check for similar colors and replace the lower frequency color with the higher frequency color in the image
-    for i, (freq1, color1) in enumerate(high_freq_colors):
-        for j, (freq2, color2) in enumerate(high_freq_colors):      
-            if (color_distance(color1, color2) < similarity_threshold) or (color_distance(color1, opaque_color_on_white(color2, 0.5)) < 5):
-              if(freq2 > freq1):
-                replace_color(image_copy, color1, color2)
-            
-    high_freq_colors = count_high_freq_colors(image_copy)
-    print(high_freq_colors)
-    return [high_freq_colors, image_copy]
-    
-def color_quantization(image, color_frequency_list):
-    # Convert the color frequency list to a set of unique colors
-    unique_colors = set([color for _, color in color_frequency_list])
-    
-    # Create a mask for the image with True where the color is in the unique colors set
-    mask = np.any(np.all(image.reshape(-1, 1, 3) == np.array(list(unique_colors)), axis=2), axis=1).reshape(image.shape[:2])
-    
-    # Create a new image with all pixels set to white
-    new_image = np.full_like(image, 255)
-    
-    # Copy the pixels from the original image that have a color in the color frequency list
-    new_image[mask] = image[mask]
-    return new_image
-
-def create_binary_matrix(img_arr, target_color):
-    # Create mask of pixels with target color
-    mask = np.all(img_arr == target_color, axis=-1)
-    
-    # Convert mask to binary matrix
-    binary_matrix = mask.astype(int)
-    from datetime import datetime
-    binary_file_name = f'mask-{datetime.now().timestamp()}.png'
-    cv2.imwrite(binary_file_name, binary_matrix * 255)
-    
-    #binary_matrix = torch.from_numpy(binary_matrix).unsqueeze(0).unsqueeze(0)
-    return binary_file_name
-
-def color_distance(color1, color2):
-    return sum((a - b) ** 2 for a, b in zip(color1, color2)) ** 0.5
-
-def replace_color(image, old_color, new_color):
-    pixels = image.load()
-    width, height = image.size
-    for x in range(width):
-        for y in range(height):
-            if pixels[x, y] == old_color:
-                pixels[x, y] = new_color
-
-def opaque_color_on_white(color, a):
-    r, g, b = color
-    opaque_red = int((1 - a) * 255 + a * r)
-    opaque_green = int((1 - a) * 255 + a * g)
-    opaque_blue = int((1 - a) * 255 + a * b)
-    return (opaque_red, opaque_green, opaque_blue)
\ No newline at end of file
diff --git a/spaces/williamzhou2023/GPT2/utils.py b/spaces/williamzhou2023/GPT2/utils.py
deleted file mode 100644
index 8eeabfe5bfc3a80e4c875c778426608f66ce41da..0000000000000000000000000000000000000000
--- a/spaces/williamzhou2023/GPT2/utils.py
+++ /dev/null
@@ -1,389 +0,0 @@
-# -*- coding:utf-8 -*-
-from __future__ import annotations
-from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
-import logging
-import json
-import os
-import datetime
-import hashlib
-import csv
-import requests
-import re
-
-import gradio as gr
-from pypinyin import lazy_pinyin
-import tiktoken
-import mdtex2html
-from markdown import markdown
-from pygments import highlight
-from pygments.lexers import get_lexer_by_name
-from pygments.formatters import HtmlFormatter
-
-from presets import *
-
-# logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s")
-
-if TYPE_CHECKING:
-    from typing import TypedDict
-
-    class DataframeData(TypedDict):
-        headers: List[str]
-        data: List[List[str | int | bool]]
-
-
-def count_token(message):
-    encoding = tiktoken.get_encoding("cl100k_base")
-    input_str = f"role: {message['role']}, content: {message['content']}"
-    length = len(encoding.encode(input_str))
-    return length
-
-
-def markdown_to_html_with_syntax_highlight(md_str):
-    def replacer(match):
-        lang = match.group(1) or "text"
-        code = match.group(2)
-
-        try:
-            lexer = get_lexer_by_name(lang, stripall=True)
-        except ValueError:
-            lexer = get_lexer_by_name("text", stripall=True)
-
-        formatter = HtmlFormatter()
-        highlighted_code = highlight(code, lexer, formatter)
-
-        return f'<pre><code class="{lang}">{highlighted_code}</code></pre>'
-
-    code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```"
-    md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE)
-
-    html_str = markdown(md_str)
-    return html_str
-
-
-def normalize_markdown(md_text: str) -> str:
-    lines = md_text.split("\n")
-    normalized_lines = []
-    inside_list = False
-
-    for i, line in enumerate(lines):
-        if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()):
-            if not inside_list and i > 0 and lines[i - 1].strip() != "":
-                normalized_lines.append("")
-            inside_list = True
-            normalized_lines.append(line)
-        elif inside_list and line.strip() == "":
-            if i < len(lines) - 1 and not re.match(
-                r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip()
-            ):
-                normalized_lines.append(line)
-            continue
-        else:
-            inside_list = False
-            normalized_lines.append(line)
-
-    return "\n".join(normalized_lines)
-
-
-def convert_mdtext(md_text):
-    code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL)
-    inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL)
-    code_blocks = code_block_pattern.findall(md_text)
-    non_code_parts = code_block_pattern.split(md_text)[::2]
-
-    result = []
-    for non_code, code in zip(non_code_parts, code_blocks + [""]):
-        if non_code.strip():
-            non_code = normalize_markdown(non_code)
-            if inline_code_pattern.search(non_code):
-                result.append(markdown(non_code, extensions=["tables"]))
-            else:
-                result.append(mdtex2html.convert(non_code, extensions=["tables"]))
-        if code.strip():
-            # _, code = detect_language(code)  # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题
-            # code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题
-            code = f"```{code}\n\n```"
-            code = markdown_to_html_with_syntax_highlight(code)
-            result.append(code)
-    result = "".join(result)
-    return result
-
-def convert_user(userinput):
-    userinput = userinput.replace("\n", "<br>")
-    return f"<pre>{userinput}</pre>"
-
-def detect_language(code):
-    if code.startswith("\n"):
-        first_line = ""
-    else:
-        first_line = code.strip().split("\n", 1)[0]
-    language = first_line.lower() if first_line else ""
-    code_without_language = code[len(first_line) :].lstrip() if first_line else code
-    return language, code_without_language
-
-
-def construct_text(role, text):
-    return {"role": role, "content": text}
-
-
-def construct_user(text):
-    return construct_text("user", text)
-
-
-def construct_system(text):
-    return construct_text("system", text)
-
-
-def construct_assistant(text):
-    return construct_text("assistant", text)
-
-
-def construct_token_message(token, stream=False):
-    return f"Token 计数: {token}"
-
-
-def delete_last_conversation(chatbot, history, previous_token_count):
-    if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
-        logging.info("由于包含报错信息,只删除chatbot记录")
-        chatbot.pop()
-        return chatbot, history
-    if len(history) > 0:
-        logging.info("删除了一组对话历史")
-        history.pop()
-        history.pop()
-    if len(chatbot) > 0:
-        logging.info("删除了一组chatbot对话")
-        chatbot.pop()
-    if len(previous_token_count) > 0:
-        logging.info("删除了一组对话的token计数记录")
-        previous_token_count.pop()
-    return (
-        chatbot,
-        history,
-        previous_token_count,
-        construct_token_message(sum(previous_token_count)),
-    )
-
-
-def save_file(filename, system, history, chatbot):
-    logging.info("保存对话历史中……")
-    os.makedirs(HISTORY_DIR, exist_ok=True)
-    if filename.endswith(".json"):
-        json_s = {"system": system, "history": history, "chatbot": chatbot}
-        print(json_s)
-        with open(os.path.join(HISTORY_DIR, filename), "w") as f:
-            json.dump(json_s, f)
-    elif filename.endswith(".md"):
-        md_s = f"system: \n- {system} \n"
-        for data in history:
-            md_s += f"\n{data['role']}: \n- {data['content']} \n"
-        with open(os.path.join(HISTORY_DIR, filename), "w", encoding="utf8") as f:
-            f.write(md_s)
-    logging.info("保存对话历史完毕")
-    return os.path.join(HISTORY_DIR, filename)
-
-
-def save_chat_history(filename, system, history, chatbot):
-    if filename == "":
-        return
-    if not filename.endswith(".json"):
-        filename += ".json"
-    return save_file(filename, system, history, chatbot)
-
-
-def export_markdown(filename, system, history, chatbot):
-    if filename == "":
-        return
-    if not filename.endswith(".md"):
-        filename += ".md"
-    return save_file(filename, system, history, chatbot)
-
-
-def load_chat_history(filename, system, history, chatbot):
-    logging.info("加载对话历史中……")
-    if type(filename) != str:
-        filename = filename.name
-    try:
-        with open(os.path.join(HISTORY_DIR, filename), "r") as f:
-            json_s = json.load(f)
-        try:
-            if type(json_s["history"][0]) == str:
-                logging.info("历史记录格式为旧版,正在转换……")
-                new_history = []
-                for index, item in enumerate(json_s["history"]):
-                    if index % 2 == 0:
-                        new_history.append(construct_user(item))
-                    else:
-                        new_history.append(construct_assistant(item))
-                json_s["history"] = new_history
-                logging.info(new_history)
-        except:
-            # 没有对话历史
-            pass
-        logging.info("加载对话历史完毕")
-        return filename, json_s["system"], json_s["history"], json_s["chatbot"]
-    except FileNotFoundError:
-        logging.info("没有找到对话历史文件,不执行任何操作")
-        return filename, system, history, chatbot
-
-
-def sorted_by_pinyin(list):
-    return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
-
-
-def get_file_names(dir, plain=False, filetypes=[".json"]):
-    logging.info(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
-    files = []
-    try:
-        for type in filetypes:
-            files += [f for f in os.listdir(dir) if f.endswith(type)]
-    except FileNotFoundError:
-        files = []
-    files = sorted_by_pinyin(files)
-    if files == []:
-        files = [""]
-    if plain:
-        return files
-    else:
-        return gr.Dropdown.update(choices=files)
-
-
-def get_history_names(plain=False):
-    logging.info("获取历史记录文件名列表")
-    return get_file_names(HISTORY_DIR, plain)
-
-
-def load_template(filename, mode=0):
-    logging.info(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
-    lines = []
-    logging.info("Loading template...")
-    if filename.endswith(".json"):
-        with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f:
-            lines = json.load(f)
-        lines = [[i["act"], i["prompt"]] for i in lines]
-    else:
-        with open(
-            os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8"
-        ) as csvfile:
-            reader = csv.reader(csvfile)
-            lines = list(reader)
-        lines = lines[1:]
-    if mode == 1:
-        return sorted_by_pinyin([row[0] for row in lines])
-    elif mode == 2:
-        return {row[0]: row[1] for row in lines}
-    else:
-        choices = sorted_by_pinyin([row[0] for row in lines])
-        return {row[0]: row[1] for row in lines}, gr.Dropdown.update(
-            choices=choices, value=choices[0]
-        )
-
-
-def get_template_names(plain=False):
-    logging.info("获取模板文件名列表")
-    return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
-
-
-def get_template_content(templates, selection, original_system_prompt):
-    logging.info(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
-    try:
-        return templates[selection]
-    except:
-        return original_system_prompt
-
-
-def reset_state():
-    logging.info("重置状态")
-    return [], [], [], construct_token_message(0)
-
-
-def reset_textbox():
-    return gr.update(value="")
-
-
-def reset_default():
-    global API_URL
-    API_URL = "https://api.openai.com/v1/chat/completions"
-    os.environ.pop("HTTPS_PROXY", None)
-    os.environ.pop("https_proxy", None)
-    return gr.update(value=API_URL), gr.update(value=""), "API URL 和代理已重置"
-
-
-def change_api_url(url):
-    global API_URL
-    API_URL = url
-    msg = f"API地址更改为了{url}"
-    logging.info(msg)
-    return msg
-
-
-def change_proxy(proxy):
-    os.environ["HTTPS_PROXY"] = proxy
-    msg = f"代理更改为了{proxy}"
-    logging.info(msg)
-    return msg
-
-
-def hide_middle_chars(s):
-    if len(s) <= 8:
-        return s
-    else:
-        head = s[:4]
-        tail = s[-4:]
-        hidden = "*" * (len(s) - 8)
-        return head + hidden + tail
-
-
-def submit_key(key):
-    key = key.strip()
-    msg = f"API密钥更改为了{hide_middle_chars(key)}"
-    logging.info(msg)
-    return key, msg
-
-
-def sha1sum(filename):
-    sha1 = hashlib.sha1()
-    sha1.update(filename.encode("utf-8"))
-    return sha1.hexdigest()
-
-
-def replace_today(prompt):
-    today = datetime.datetime.today().strftime("%Y-%m-%d")
-    return prompt.replace("{current_date}", today)
-
-
-def get_geoip():
-    response = requests.get("https://ipapi.co/json/", timeout=5)
-    try:
-        data = response.json()
-    except:
-        data = {"error": True, "reason": "连接ipapi失败"}
-    if "error" in data.keys():
-        logging.warning(f"无法获取IP地址信息。\n{data}")
-        if data["reason"] == "RateLimited":
-            return (
-                f"获取IP地理位置失败,因为达到了检测IP的速率限制。聊天功能可能仍然可用,但请注意,如果您的IP地址在不受支持的地区,您可能会遇到问题。"
-            )
-        else:
-            return f"获取IP地理位置失败。原因:{data['reason']}。你仍然可以使用聊天功能。"
-    else:
-        country = data["country_name"]
-        if country == "China":
-            text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**"
-        else:
-            text = f"您的IP区域:{country}。"
-        logging.info(text)
-        return text
-
-
-def find_n(lst, max_num):
-    n = len(lst)
-    total = sum(lst)
-
-    if total < max_num:
-        return n
-
-    for i in range(len(lst)):
-        if total - lst[i] < max_num:
-            return n - i -1
-        total = total - lst[i]
-    return 1
diff --git a/spaces/wyysf/GenMM/utils/transforms.py b/spaces/wyysf/GenMM/utils/transforms.py
deleted file mode 100644
index 22f7bf4bd9d74145667023e0aad282c349590e6a..0000000000000000000000000000000000000000
--- a/spaces/wyysf/GenMM/utils/transforms.py
+++ /dev/null
@@ -1,399 +0,0 @@
-import numpy as np
-import torch
-
-
-def batch_mm(matrix, matrix_batch):
-    """
-    https://github.com/pytorch/pytorch/issues/14489#issuecomment-607730242
-    :param matrix: Sparse or dense matrix, size (m, n).
-    :param matrix_batch: Batched dense matrices, size (b, n, k).
-    :return: The batched matrix-matrix product, size (m, n) x (b, n, k) = (b, m, k).
-    """
-    batch_size = matrix_batch.shape[0]
-    # Stack the vector batch into columns. (b, n, k) -> (n, b, k) -> (n, b*k)
-    vectors = matrix_batch.transpose(0, 1).reshape(matrix.shape[1], -1)
-
-    # A matrix-matrix product is a batched matrix-vector product of the columns.
-    # And then reverse the reshaping. (m, n) x (n, b*k) = (m, b*k) -> (m, b, k) -> (b, m, k)
-    return matrix.mm(vectors).reshape(matrix.shape[0], batch_size, -1).transpose(1, 0)
-
-
-def aa2quat(rots, form='wxyz', unified_orient=True):
-    """
-    Convert angle-axis representation to wxyz quaternion and to the half plan (w >= 0)
-    @param rots: angle-axis rotations, (*, 3)
-    @param form: quaternion format, either 'wxyz' or 'xyzw'
-    @param unified_orient: Use unified orientation for quaternion (quaternion is dual cover of SO3)
-    :return:
-    """
-    angles = rots.norm(dim=-1, keepdim=True)
-    norm = angles.clone()
-    norm[norm < 1e-8] = 1
-    axis = rots / norm
-    quats = torch.empty(rots.shape[:-1] + (4,), device=rots.device, dtype=rots.dtype)
-    angles = angles * 0.5
-    if form == 'wxyz':
-        quats[..., 0] = torch.cos(angles.squeeze(-1))
-        quats[..., 1:] = torch.sin(angles) * axis
-    elif form == 'xyzw':
-        quats[..., :3] = torch.sin(angles) * axis
-        quats[..., 3] = torch.cos(angles.squeeze(-1))
-
-    if unified_orient:
-        idx = quats[..., 0] < 0
-        quats[idx, :] *= -1
-
-    return quats
-
-
-def quat2aa(quats):
-    """
-    Convert wxyz quaternions to angle-axis representation
-    :param quats:
-    :return:
-    """
-    _cos = quats[..., 0]
-    xyz = quats[..., 1:]
-    _sin = xyz.norm(dim=-1)
-    norm = _sin.clone()
-    norm[norm < 1e-7] = 1
-    axis = xyz / norm.unsqueeze(-1)
-    angle = torch.atan2(_sin, _cos) * 2
-    return axis * angle.unsqueeze(-1)
-
-
-def quat2mat(quats: torch.Tensor):
-    """
-    Convert (w, x, y, z) quaternions to 3x3 rotation matrix
-    :param quats: quaternions of shape (..., 4)
-    :return:  rotation matrices of shape (..., 3, 3)
-    """
-    qw = quats[..., 0]
-    qx = quats[..., 1]
-    qy = quats[..., 2]
-    qz = quats[..., 3]
-
-    x2 = qx + qx
-    y2 = qy + qy
-    z2 = qz + qz
-    xx = qx * x2
-    yy = qy * y2
-    wx = qw * x2
-    xy = qx * y2
-    yz = qy * z2
-    wy = qw * y2
-    xz = qx * z2
-    zz = qz * z2
-    wz = qw * z2
-
-    m = torch.empty(quats.shape[:-1] + (3, 3), device=quats.device, dtype=quats.dtype)
-    m[..., 0, 0] = 1.0 - (yy + zz)
-    m[..., 0, 1] = xy - wz
-    m[..., 0, 2] = xz + wy
-    m[..., 1, 0] = xy + wz
-    m[..., 1, 1] = 1.0 - (xx + zz)
-    m[..., 1, 2] = yz - wx
-    m[..., 2, 0] = xz - wy
-    m[..., 2, 1] = yz + wx
-    m[..., 2, 2] = 1.0 - (xx + yy)
-
-    return m
-
-
-def quat2euler(q, order='xyz', degrees=True):
-    """
-    Convert (w, x, y, z) quaternions to xyz euler angles. This is  used for bvh output.
-    """
-    q0 = q[..., 0]
-    q1 = q[..., 1]
-    q2 = q[..., 2]
-    q3 = q[..., 3]
-    es = torch.empty(q0.shape + (3,), device=q.device, dtype=q.dtype)
-
-    if order == 'xyz':
-        es[..., 2] = torch.atan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
-        es[..., 1] = torch.asin((2 * (q1 * q3 + q0 * q2)).clip(-1, 1))
-        es[..., 0] = torch.atan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
-    else:
-        raise NotImplementedError('Cannot convert to ordering %s' % order)
-
-    if degrees:
-        es = es * 180 / np.pi
-
-    return es
-
-
-def euler2mat(rots, order='xyz'):
-    axis = {'x': torch.tensor((1, 0, 0), device=rots.device),
-            'y': torch.tensor((0, 1, 0), device=rots.device),
-            'z': torch.tensor((0, 0, 1), device=rots.device)}
-
-    rots = rots / 180 * np.pi
-    mats = []
-    for i in range(3):
-        aa = axis[order[i]] * rots[..., i].unsqueeze(-1)
-        mats.append(aa2mat(aa))
-    return mats[0] @ (mats[1] @ mats[2])
-
-
-def aa2mat(rots):
-    """
-    Convert angle-axis representation to rotation matrix
-    :param rots: angle-axis representation
-    :return:
-    """
-    quat = aa2quat(rots)
-    mat = quat2mat(quat)
-    return mat
-
-
-def mat2quat(R) -> torch.Tensor:
-    '''
-    https://github.com/duolu/pyrotation/blob/master/pyrotation/pyrotation.py
-    Convert a rotation matrix to a unit quaternion.
-
-    This uses the Shepperd’s method for numerical stability.
-    '''
-
-    # The rotation matrix must be orthonormal
-
-    w2 = (1 + R[..., 0, 0] + R[..., 1, 1] + R[..., 2, 2])
-    x2 = (1 + R[..., 0, 0] - R[..., 1, 1] - R[..., 2, 2])
-    y2 = (1 - R[..., 0, 0] + R[..., 1, 1] - R[..., 2, 2])
-    z2 = (1 - R[..., 0, 0] - R[..., 1, 1] + R[..., 2, 2])
-
-    yz = (R[..., 1, 2] + R[..., 2, 1])
-    xz = (R[..., 2, 0] + R[..., 0, 2])
-    xy = (R[..., 0, 1] + R[..., 1, 0])
-
-    wx = (R[..., 2, 1] - R[..., 1, 2])
-    wy = (R[..., 0, 2] - R[..., 2, 0])
-    wz = (R[..., 1, 0] - R[..., 0, 1])
-
-    w = torch.empty_like(x2)
-    x = torch.empty_like(x2)
-    y = torch.empty_like(x2)
-    z = torch.empty_like(x2)
-
-    flagA = (R[..., 2, 2] < 0) * (R[..., 0, 0] > R[..., 1, 1])
-    flagB = (R[..., 2, 2] < 0) * (R[..., 0, 0] <= R[..., 1, 1])
-    flagC = (R[..., 2, 2] >= 0) * (R[..., 0, 0] < -R[..., 1, 1])
-    flagD = (R[..., 2, 2] >= 0) * (R[..., 0, 0] >= -R[..., 1, 1])
-
-    x[flagA] = torch.sqrt(x2[flagA])
-    w[flagA] = wx[flagA] / x[flagA]
-    y[flagA] = xy[flagA] / x[flagA]
-    z[flagA] = xz[flagA] / x[flagA]
-
-    y[flagB] = torch.sqrt(y2[flagB])
-    w[flagB] = wy[flagB] / y[flagB]
-    x[flagB] = xy[flagB] / y[flagB]
-    z[flagB] = yz[flagB] / y[flagB]
-
-    z[flagC] = torch.sqrt(z2[flagC])
-    w[flagC] = wz[flagC] / z[flagC]
-    x[flagC] = xz[flagC] / z[flagC]
-    y[flagC] = yz[flagC] / z[flagC]
-
-    w[flagD] = torch.sqrt(w2[flagD])
-    x[flagD] = wx[flagD] / w[flagD]
-    y[flagD] = wy[flagD] / w[flagD]
-    z[flagD] = wz[flagD] / w[flagD]
-
-    # if R[..., 2, 2] < 0:
-    #
-    #     if R[..., 0, 0] > R[..., 1, 1]:
-    #
-    #         x = torch.sqrt(x2)
-    #         w = wx / x
-    #         y = xy / x
-    #         z = xz / x
-    #
-    #     else:
-    #
-    #         y = torch.sqrt(y2)
-    #         w = wy / y
-    #         x = xy / y
-    #         z = yz / y
-    #
-    # else:
-    #
-    #     if R[..., 0, 0] < -R[..., 1, 1]:
-    #
-    #         z = torch.sqrt(z2)
-    #         w = wz / z
-    #         x = xz / z
-    #         y = yz / z
-    #
-    #     else:
-    #
-    #         w = torch.sqrt(w2)
-    #         x = wx / w
-    #         y = wy / w
-    #         z = wz / w
-
-    res = [w, x, y, z]
-    res = [z.unsqueeze(-1) for z in res]
-
-    return torch.cat(res, dim=-1) / 2
-
-
-def quat2repr6d(quat):
-    mat = quat2mat(quat)
-    res = mat[..., :2, :]
-    res = res.reshape(res.shape[:-2] + (6, ))
-    return res
-
-
-def repr6d2mat(repr):
-    x = repr[..., :3]
-    y = repr[..., 3:]
-    x = x / x.norm(dim=-1, keepdim=True)
-    z = torch.cross(x, y)
-    z = z / z.norm(dim=-1, keepdim=True)
-    y = torch.cross(z, x)
-    res = [x, y, z]
-    res = [v.unsqueeze(-2) for v in res]
-    mat = torch.cat(res, dim=-2)
-    return mat
-
-
-def repr6d2quat(repr) -> torch.Tensor:
-    x = repr[..., :3]
-    y = repr[..., 3:]
-    x = x / x.norm(dim=-1, keepdim=True)
-    z = torch.cross(x, y)
-    z = z / z.norm(dim=-1, keepdim=True)
-    y = torch.cross(z, x)
-    res = [x, y, z]
-    res = [v.unsqueeze(-2) for v in res]
-    mat = torch.cat(res, dim=-2)
-    return mat2quat(mat)
-
-
-def inv_affine(mat):
-    """
-    Calculate the inverse of any affine transformation
-    """
-    affine = torch.zeros((mat.shape[:2] + (1, 4)))
-    affine[..., 3] = 1
-    vert_mat = torch.cat((mat, affine), dim=2)
-    vert_mat_inv = torch.inverse(vert_mat)
-    return vert_mat_inv[..., :3, :]
-
-
-def inv_rigid_affine(mat):
-    """
-    Calculate the inverse of a rigid affine transformation
-    """
-    res = mat.clone()
-    res[..., :3] = mat[..., :3].transpose(-2, -1)
-    res[..., 3] = -torch.matmul(res[..., :3], mat[..., 3].unsqueeze(-1)).squeeze(-1)
-    return res
-
-
-def generate_pose(batch_size, device, uniform=False, factor=1, root_rot=False, n_bone=None, ee=None):
-    if n_bone is None: n_bone = 24
-    if ee is not None:
-        if root_rot:
-            ee.append(0)
-        n_bone_ = n_bone
-        n_bone = len(ee)
-    axis = torch.randn((batch_size, n_bone, 3), device=device)
-    axis /= axis.norm(dim=-1, keepdim=True)
-    if uniform:
-        angle = torch.rand((batch_size, n_bone, 1), device=device) * np.pi
-    else:
-        angle = torch.randn((batch_size, n_bone, 1), device=device) * np.pi / 6 * factor
-        angle.clamp(-np.pi, np.pi)
-    poses = axis * angle
-    if ee is not None:
-        res = torch.zeros((batch_size, n_bone_, 3), device=device)
-        for i, id in enumerate(ee):
-            res[:, id] = poses[:, i]
-        poses = res
-    poses = poses.reshape(batch_size, -1)
-    if not root_rot:
-        poses[..., :3] = 0
-    return poses
-
-
-def slerp(l, r, t, unit=True):
-    """
-    :param l: shape = (*, n)
-    :param r: shape = (*, n)
-    :param t: shape = (*)
-    :param unit: If l and h are unit vectors
-    :return:
-    """
-    eps = 1e-8
-    if not unit:
-        l_n = l / torch.norm(l, dim=-1, keepdim=True)
-        r_n = r / torch.norm(r, dim=-1, keepdim=True)
-    else:
-        l_n = l
-        r_n = r
-    omega = torch.acos((l_n * r_n).sum(dim=-1).clamp(-1, 1))
-    dom = torch.sin(omega)
-
-    flag = dom < eps
-
-    res = torch.empty_like(l_n)
-    t_t = t[flag].unsqueeze(-1)
-    res[flag] = (1 - t_t) * l_n[flag] + t_t * r_n[flag]
-
-    flag = ~ flag
-
-    t_t = t[flag]
-    d_t = dom[flag]
-    va = torch.sin((1 - t_t) * omega[flag]) / d_t
-    vb = torch.sin(t_t * omega[flag]) / d_t
-    res[flag] = (va.unsqueeze(-1) * l_n[flag] + vb.unsqueeze(-1) * r_n[flag])
-    return res
-
-
-def slerp_quat(l, r, t):
-    """
-    slerp for unit quaternions
-    :param l: (*, 4) unit quaternion
-    :param r: (*, 4) unit quaternion
-    :param t: (*) scalar between 0 and 1
-    """
-    t = t.expand(l.shape[:-1])
-    flag = (l * r).sum(dim=-1) >= 0
-    res = torch.empty_like(l)
-    res[flag] = slerp(l[flag], r[flag], t[flag])
-    flag = ~ flag
-    res[flag] = slerp(-l[flag], r[flag], t[flag])
-    return res
-
-
-# def slerp_6d(l, r, t):
-#     l_q = repr6d2quat(l)
-#     r_q = repr6d2quat(r)
-#     res_q = slerp_quat(l_q, r_q, t)
-#     return quat2repr6d(res_q)
-
-
-def interpolate_6d(input, size):
-    """
-    :param input: (batch_size, n_channels, length)
-    :param size: required output size for temporal axis
-    :return:
-    """
-    batch = input.shape[0]
-    length = input.shape[-1]
-    input = input.reshape((batch, -1, 6, length))
-    input = input.permute(0, 1, 3, 2)     # (batch_size, n_joint, length, 6)
-    input_q = repr6d2quat(input)
-    idx = torch.tensor(list(range(size)), device=input_q.device, dtype=torch.float) / size * (length - 1)
-    idx_l = torch.floor(idx)
-    t = idx - idx_l
-    idx_l = idx_l.long()
-    idx_r = idx_l + 1
-    t = t.reshape((1, 1, -1))
-    res_q = slerp_quat(input_q[..., idx_l, :], input_q[..., idx_r, :], t)
-    res = quat2repr6d(res_q)  # shape = (batch_size, n_joint, t, 6)
-    res = res.permute(0, 1, 3, 2)
-    res = res.reshape((batch, -1, size))
-    return res
diff --git a/spaces/xiaorong/fork2-so-vits/models.py b/spaces/xiaorong/fork2-so-vits/models.py
deleted file mode 100644
index 8353b867f441de7e4d05aef980e672899c3a8889..0000000000000000000000000000000000000000
--- a/spaces/xiaorong/fork2-so-vits/models.py
+++ /dev/null
@@ -1,533 +0,0 @@
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-import attentions
-import monotonic_align
-
-from torch.nn import Conv1d, ConvTranspose1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from commons import init_weights, get_padding
-
-
-class StochasticDurationPredictor(nn.Module):
-  def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
-    super().__init__()
-    filter_channels = in_channels # it needs to be removed from future version.
-    self.in_channels = in_channels
-    self.filter_channels = filter_channels
-    self.kernel_size = kernel_size
-    self.p_dropout = p_dropout
-    self.n_flows = n_flows
-    self.gin_channels = gin_channels
-
-    self.log_flow = modules.Log()
-    self.flows = nn.ModuleList()
-    self.flows.append(modules.ElementwiseAffine(2))
-    for i in range(n_flows):
-      self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
-      self.flows.append(modules.Flip())
-
-    self.post_pre = nn.Conv1d(1, filter_channels, 1)
-    self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
-    self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
-    self.post_flows = nn.ModuleList()
-    self.post_flows.append(modules.ElementwiseAffine(2))
-    for i in range(4):
-      self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
-      self.post_flows.append(modules.Flip())
-
-    self.pre = nn.Conv1d(in_channels, filter_channels, 1)
-    self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
-    self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
-    if gin_channels != 0:
-      self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
-
-  def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
-    x = torch.detach(x)
-    x = self.pre(x)
-    if g is not None:
-      g = torch.detach(g)
-      x = x + self.cond(g)
-    x = self.convs(x, x_mask)
-    x = self.proj(x) * x_mask
-
-    if not reverse:
-      flows = self.flows
-      assert w is not None
-
-      logdet_tot_q = 0 
-      h_w = self.post_pre(w)
-      h_w = self.post_convs(h_w, x_mask)
-      h_w = self.post_proj(h_w) * x_mask
-      e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
-      z_q = e_q
-      for flow in self.post_flows:
-        z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
-        logdet_tot_q += logdet_q
-      z_u, z1 = torch.split(z_q, [1, 1], 1) 
-      u = torch.sigmoid(z_u) * x_mask
-      z0 = (w - u) * x_mask
-      logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
-      logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
-
-      logdet_tot = 0
-      z0, logdet = self.log_flow(z0, x_mask)
-      logdet_tot += logdet
-      z = torch.cat([z0, z1], 1)
-      for flow in flows:
-        z, logdet = flow(z, x_mask, g=x, reverse=reverse)
-        logdet_tot = logdet_tot + logdet
-      nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
-      return nll + logq # [b]
-    else:
-      flows = list(reversed(self.flows))
-      flows = flows[:-2] + [flows[-1]] # remove a useless vflow
-      z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
-      for flow in flows:
-        z = flow(z, x_mask, g=x, reverse=reverse)
-      z0, z1 = torch.split(z, [1, 1], 1)
-      logw = z0
-      return logw
-
-
-class DurationPredictor(nn.Module):
-  def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
-    super().__init__()
-
-    self.in_channels = in_channels
-    self.filter_channels = filter_channels
-    self.kernel_size = kernel_size
-    self.p_dropout = p_dropout
-    self.gin_channels = gin_channels
-
-    self.drop = nn.Dropout(p_dropout)
-    self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
-    self.norm_1 = modules.LayerNorm(filter_channels)
-    self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
-    self.norm_2 = modules.LayerNorm(filter_channels)
-    self.proj = nn.Conv1d(filter_channels, 1, 1)
-
-    if gin_channels != 0:
-      self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
-  def forward(self, x, x_mask, g=None):
-    x = torch.detach(x)
-    if g is not None:
-      g = torch.detach(g)
-      x = x + self.cond(g)
-    x = self.conv_1(x * x_mask)
-    x = torch.relu(x)
-    x = self.norm_1(x)
-    x = self.drop(x)
-    x = self.conv_2(x * x_mask)
-    x = torch.relu(x)
-    x = self.norm_2(x)
-    x = self.drop(x)
-    x = self.proj(x * x_mask)
-    return x * x_mask
-
-
-class TextEncoder(nn.Module):
-  def __init__(self,
-      n_vocab,
-      out_channels,
-      hidden_channels,
-      filter_channels,
-      n_heads,
-      n_layers,
-      kernel_size,
-      p_dropout):
-    super().__init__()
-    self.n_vocab = n_vocab
-    self.out_channels = out_channels
-    self.hidden_channels = hidden_channels
-    self.filter_channels = filter_channels
-    self.n_heads = n_heads
-    self.n_layers = n_layers
-    self.kernel_size = kernel_size
-    self.p_dropout = p_dropout
-
-    self.emb = nn.Embedding(n_vocab, hidden_channels)
-    nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
-
-    self.encoder = attentions.Encoder(
-      hidden_channels,
-      filter_channels,
-      n_heads,
-      n_layers,
-      kernel_size,
-      p_dropout)
-    self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
-  def forward(self, x, x_lengths):
-    x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
-    x = torch.transpose(x, 1, -1) # [b, h, t]
-    x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-
-    x = self.encoder(x * x_mask, x_mask)
-    stats = self.proj(x) * x_mask
-
-    m, logs = torch.split(stats, self.out_channels, dim=1)
-    return x, m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
-  def __init__(self,
-      channels,
-      hidden_channels,
-      kernel_size,
-      dilation_rate,
-      n_layers,
-      n_flows=4,
-      gin_channels=0):
-    super().__init__()
-    self.channels = channels
-    self.hidden_channels = hidden_channels
-    self.kernel_size = kernel_size
-    self.dilation_rate = dilation_rate
-    self.n_layers = n_layers
-    self.n_flows = n_flows
-    self.gin_channels = gin_channels
-
-    self.flows = nn.ModuleList()
-    for i in range(n_flows):
-      self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
-      self.flows.append(modules.Flip())
-
-  def forward(self, x, x_mask, g=None, reverse=False):
-    if not reverse:
-      for flow in self.flows:
-        x, _ = flow(x, x_mask, g=g, reverse=reverse)
-    else:
-      for flow in reversed(self.flows):
-        x = flow(x, x_mask, g=g, reverse=reverse)
-    return x
-
-
-class PosteriorEncoder(nn.Module):
-  def __init__(self,
-      in_channels,
-      out_channels,
-      hidden_channels,
-      kernel_size,
-      dilation_rate,
-      n_layers,
-      gin_channels=0):
-    super().__init__()
-    self.in_channels = in_channels
-    self.out_channels = out_channels
-    self.hidden_channels = hidden_channels
-    self.kernel_size = kernel_size
-    self.dilation_rate = dilation_rate
-    self.n_layers = n_layers
-    self.gin_channels = gin_channels
-
-    self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
-    self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
-    self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
-  def forward(self, x, x_lengths, g=None):
-    x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-    x = self.pre(x) * x_mask
-    x = self.enc(x, x_mask, g=g)
-    stats = self.proj(x) * x_mask
-    m, logs = torch.split(stats, self.out_channels, dim=1)
-    z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
-    return z, m, logs, x_mask
-
-
-class Generator(torch.nn.Module):
-    def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
-        super(Generator, self).__init__()
-        self.num_kernels = len(resblock_kernel_sizes)
-        self.num_upsamples = len(upsample_rates)
-        self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
-        resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
-
-        self.ups = nn.ModuleList()
-        for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
-            self.ups.append(weight_norm(
-                ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
-                                k, u, padding=(k-u)//2)))
-
-        self.resblocks = nn.ModuleList()
-        for i in range(len(self.ups)):
-            ch = upsample_initial_channel//(2**(i+1))
-            for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
-                self.resblocks.append(resblock(ch, k, d))
-
-        self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
-        self.ups.apply(init_weights)
-
-        if gin_channels != 0:
-            self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
-    def forward(self, x, g=None):
-        x = self.conv_pre(x)
-        if g is not None:
-          x = x + self.cond(g)
-
-        for i in range(self.num_upsamples):
-            x = F.leaky_relu(x, modules.LRELU_SLOPE)
-            x = self.ups[i](x)
-            xs = None
-            for j in range(self.num_kernels):
-                if xs is None:
-                    xs = self.resblocks[i*self.num_kernels+j](x)
-                else:
-                    xs += self.resblocks[i*self.num_kernels+j](x)
-            x = xs / self.num_kernels
-        x = F.leaky_relu(x)
-        x = self.conv_post(x)
-        x = torch.tanh(x)
-
-        return x
-
-    def remove_weight_norm(self):
-        print('Removing weight norm...')
-        for l in self.ups:
-            remove_weight_norm(l)
-        for l in self.resblocks:
-            l.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
-    def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
-        super(DiscriminatorP, self).__init__()
-        self.period = period
-        self.use_spectral_norm = use_spectral_norm
-        norm_f = weight_norm if use_spectral_norm == False else spectral_norm
-        self.convs = nn.ModuleList([
-            norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
-            norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
-            norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
-            norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
-            norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
-        ])
-        self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
-    def forward(self, x):
-        fmap = []
-
-        # 1d to 2d
-        b, c, t = x.shape
-        if t % self.period != 0: # pad first
-            n_pad = self.period - (t % self.period)
-            x = F.pad(x, (0, n_pad), "reflect")
-            t = t + n_pad
-        x = x.view(b, c, t // self.period, self.period)
-
-        for l in self.convs:
-            x = l(x)
-            x = F.leaky_relu(x, modules.LRELU_SLOPE)
-            fmap.append(x)
-        x = self.conv_post(x)
-        fmap.append(x)
-        x = torch.flatten(x, 1, -1)
-
-        return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
-    def __init__(self, use_spectral_norm=False):
-        super(DiscriminatorS, self).__init__()
-        norm_f = weight_norm if use_spectral_norm == False else spectral_norm
-        self.convs = nn.ModuleList([
-            norm_f(Conv1d(1, 16, 15, 1, padding=7)),
-            norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
-            norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
-            norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
-            norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
-            norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
-        ])
-        self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
-    def forward(self, x):
-        fmap = []
-
-        for l in self.convs:
-            x = l(x)
-            x = F.leaky_relu(x, modules.LRELU_SLOPE)
-            fmap.append(x)
-        x = self.conv_post(x)
-        fmap.append(x)
-        x = torch.flatten(x, 1, -1)
-
-        return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
-    def __init__(self, use_spectral_norm=False):
-        super(MultiPeriodDiscriminator, self).__init__()
-        periods = [2,3,5,7,11]
-
-        discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
-        discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
-        self.discriminators = nn.ModuleList(discs)
-
-    def forward(self, y, y_hat):
-        y_d_rs = []
-        y_d_gs = []
-        fmap_rs = []
-        fmap_gs = []
-        for i, d in enumerate(self.discriminators):
-            y_d_r, fmap_r = d(y)
-            y_d_g, fmap_g = d(y_hat)
-            y_d_rs.append(y_d_r)
-            y_d_gs.append(y_d_g)
-            fmap_rs.append(fmap_r)
-            fmap_gs.append(fmap_g)
-
-        return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-
-class SynthesizerTrn(nn.Module):
-  """
-  Synthesizer for Training
-  """
-
-  def __init__(self, 
-    n_vocab,
-    spec_channels,
-    segment_size,
-    inter_channels,
-    hidden_channels,
-    filter_channels,
-    n_heads,
-    n_layers,
-    kernel_size,
-    p_dropout,
-    resblock, 
-    resblock_kernel_sizes, 
-    resblock_dilation_sizes, 
-    upsample_rates, 
-    upsample_initial_channel, 
-    upsample_kernel_sizes,
-    n_speakers=0,
-    gin_channels=0,
-    use_sdp=True,
-    **kwargs):
-
-    super().__init__()
-    self.n_vocab = n_vocab
-    self.spec_channels = spec_channels
-    self.inter_channels = inter_channels
-    self.hidden_channels = hidden_channels
-    self.filter_channels = filter_channels
-    self.n_heads = n_heads
-    self.n_layers = n_layers
-    self.kernel_size = kernel_size
-    self.p_dropout = p_dropout
-    self.resblock = resblock
-    self.resblock_kernel_sizes = resblock_kernel_sizes
-    self.resblock_dilation_sizes = resblock_dilation_sizes
-    self.upsample_rates = upsample_rates
-    self.upsample_initial_channel = upsample_initial_channel
-    self.upsample_kernel_sizes = upsample_kernel_sizes
-    self.segment_size = segment_size
-    self.n_speakers = n_speakers
-    self.gin_channels = gin_channels
-
-    self.use_sdp = use_sdp
-
-    self.enc_p = TextEncoder(n_vocab,
-        inter_channels,
-        hidden_channels,
-        filter_channels,
-        n_heads,
-        n_layers,
-        kernel_size,
-        p_dropout)
-    self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
-    self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
-    self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
-
-    if use_sdp:
-      self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
-    else:
-      self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
-
-    if n_speakers > 1:
-      self.emb_g = nn.Embedding(n_speakers, gin_channels)
-
-  def forward(self, x, x_lengths, y, y_lengths, sid=None):
-
-    x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
-    if self.n_speakers > 0:
-      g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
-    else:
-      g = None
-
-    z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
-    z_p = self.flow(z, y_mask, g=g)
-
-    with torch.no_grad():
-      # negative cross-entropy
-      s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
-      neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
-      neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
-      neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
-      neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
-      neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
-
-      attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
-      attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
-
-    w = attn.sum(2)
-    if self.use_sdp:
-      l_length = self.dp(x, x_mask, w, g=g)
-      l_length = l_length / torch.sum(x_mask)
-    else:
-      logw_ = torch.log(w + 1e-6) * x_mask
-      logw = self.dp(x, x_mask, g=g)
-      l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging 
-
-    # expand prior
-    m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
-    logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
-
-    z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
-    o = self.dec(z_slice, g=g)
-    return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
-  def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
-    x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
-    if self.n_speakers > 0:
-      g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
-    else:
-      g = None
-
-    if self.use_sdp:
-      logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
-    else:
-      logw = self.dp(x, x_mask, g=g)
-    w = torch.exp(logw) * x_mask * length_scale
-    w_ceil = torch.ceil(w)
-    y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
-    y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
-    attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
-    attn = commons.generate_path(w_ceil, attn_mask)
-
-    m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
-    logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
-
-    z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
-    z = self.flow(z_p, y_mask, g=g, reverse=True)
-    o = self.dec((z * y_mask)[:,:,:max_len], g=g)
-    return o, attn, y_mask, (z, z_p, m_p, logs_p)
-
-  def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
-    assert self.n_speakers > 0, "n_speakers have to be larger than 0."
-    g_src = self.emb_g(sid_src).unsqueeze(-1)
-    g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
-    z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
-    z_p = self.flow(z, y_mask, g=g_src)
-    z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
-    o_hat = self.dec(z_hat * y_mask, g=g_tgt)
-    return o_hat, y_mask, (z, z_p, z_hat)
-
diff --git a/spaces/xxie92/antibody_visulization/diffab/tools/renumber/__init__.py b/spaces/xxie92/antibody_visulization/diffab/tools/renumber/__init__.py
deleted file mode 100644
index 95c78181a7c9dd2ba964b3c44be1bce1cba46f60..0000000000000000000000000000000000000000
--- a/spaces/xxie92/antibody_visulization/diffab/tools/renumber/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .run import renumber
diff --git a/spaces/yangogo/bingo/src/components/chat-history.tsx b/spaces/yangogo/bingo/src/components/chat-history.tsx
deleted file mode 100644
index feb81de66562edda8f40d3c0cc717202c92b6509..0000000000000000000000000000000000000000
--- a/spaces/yangogo/bingo/src/components/chat-history.tsx
+++ /dev/null
@@ -1,48 +0,0 @@
-import { IconEdit, IconTrash, IconMore, IconDownload } from "./ui/icons"
-
-export function ChatHistory() {
-  return (
-    <div className="chat-history fixed top-18 right-4">
-      <div className="chat-history-header text-sm font-semibold text-left w-[280px] px-4 py-6">
-        历史记录
-      </div>
-      <div className="chat-history-main">
-        <div className="scroller">
-          <div className="surface">
-            <div className="threads">
-              <div className="thread">
-                <div className="primary-row">
-                  <button type="button" aria-label="加载聊天">
-
-                  </button>
-                  <div className="description">
-                    <h3 className="name">无标题的聊天</h3>
-                  </div>
-                  <h4 className="time">上午1:42</h4>
-                  <div className="controls">
-
-                    <button className="edit icon-button" type="button" aria-label="重命名">
-                      <IconEdit />
-                    </button>
-
-                    <button className="delete icon-button" type="button" aria-label="删除">
-                      <IconTrash />
-                    </button>
-
-                    <button className="more icon-button" type="button" aria-haspopup="true" aria-expanded="false" aria-label="更多">
-                      <IconMore />
-                    </button>
-
-                    <button className="export icon-button" type="button" aria-label="导出">
-                      <IconDownload />
-                    </button>
-                  </div>
-                </div>
-              </div>
-            </div>
-          </div>
-        </div>
-      </div>
-    </div>
-  )
-}
diff --git a/spaces/yeqingmei123/face-test/e4e/criteria/lpips/networks.py b/spaces/yeqingmei123/face-test/e4e/criteria/lpips/networks.py
deleted file mode 100644
index 3a0d13ad2d560278f16586da68d3a5eadb26e746..0000000000000000000000000000000000000000
--- a/spaces/yeqingmei123/face-test/e4e/criteria/lpips/networks.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from typing import Sequence
-
-from itertools import chain
-
-import torch
-import torch.nn as nn
-from torchvision import models
-
-from criteria.lpips.utils import normalize_activation
-
-
-def get_network(net_type: str):
-    if net_type == 'alex':
-        return AlexNet()
-    elif net_type == 'squeeze':
-        return SqueezeNet()
-    elif net_type == 'vgg':
-        return VGG16()
-    else:
-        raise NotImplementedError('choose net_type from [alex, squeeze, vgg].')
-
-
-class LinLayers(nn.ModuleList):
-    def __init__(self, n_channels_list: Sequence[int]):
-        super(LinLayers, self).__init__([
-            nn.Sequential(
-                nn.Identity(),
-                nn.Conv2d(nc, 1, 1, 1, 0, bias=False)
-            ) for nc in n_channels_list
-        ])
-
-        for param in self.parameters():
-            param.requires_grad = False
-
-
-class BaseNet(nn.Module):
-    def __init__(self):
-        super(BaseNet, self).__init__()
-
-        # register buffer
-        self.register_buffer(
-            'mean', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
-        self.register_buffer(
-            'std', torch.Tensor([.458, .448, .450])[None, :, None, None])
-
-    def set_requires_grad(self, state: bool):
-        for param in chain(self.parameters(), self.buffers()):
-            param.requires_grad = state
-
-    def z_score(self, x: torch.Tensor):
-        return (x - self.mean) / self.std
-
-    def forward(self, x: torch.Tensor):
-        x = self.z_score(x)
-
-        output = []
-        for i, (_, layer) in enumerate(self.layers._modules.items(), 1):
-            x = layer(x)
-            if i in self.target_layers:
-                output.append(normalize_activation(x))
-            if len(output) == len(self.target_layers):
-                break
-        return output
-
-
-class SqueezeNet(BaseNet):
-    def __init__(self):
-        super(SqueezeNet, self).__init__()
-
-        self.layers = models.squeezenet1_1(True).features
-        self.target_layers = [2, 5, 8, 10, 11, 12, 13]
-        self.n_channels_list = [64, 128, 256, 384, 384, 512, 512]
-
-        self.set_requires_grad(False)
-
-
-class AlexNet(BaseNet):
-    def __init__(self):
-        super(AlexNet, self).__init__()
-
-        self.layers = models.alexnet(True).features
-        self.target_layers = [2, 5, 8, 10, 12]
-        self.n_channels_list = [64, 192, 384, 256, 256]
-
-        self.set_requires_grad(False)
-
-
-class VGG16(BaseNet):
-    def __init__(self):
-        super(VGG16, self).__init__()
-
-        self.layers = models.vgg16(True).features
-        self.target_layers = [4, 9, 16, 23, 30]
-        self.n_channels_list = [64, 128, 256, 512, 512]
-
-        self.set_requires_grad(False)
\ No newline at end of file
diff --git a/spaces/ygangang/VToonify/vtoonify/model/stylegan/lpips/dist_model.py b/spaces/ygangang/VToonify/vtoonify/model/stylegan/lpips/dist_model.py
deleted file mode 100644
index d8a14a61ca36f2562e16feb66c9625dd2f5e0469..0000000000000000000000000000000000000000
--- a/spaces/ygangang/VToonify/vtoonify/model/stylegan/lpips/dist_model.py
+++ /dev/null
@@ -1,284 +0,0 @@
-
-from __future__ import absolute_import
-
-import sys
-import numpy as np
-import torch
-from torch import nn
-import os
-from collections import OrderedDict
-from torch.autograd import Variable
-import itertools
-from model.stylegan.lpips.base_model import BaseModel
-from scipy.ndimage import zoom
-import fractions
-import functools
-import skimage.transform
-from tqdm import tqdm
-
-from IPython import embed
-
-from model.stylegan.lpips import networks_basic as networks
-import model.stylegan.lpips as util
-
-class DistModel(BaseModel):
-    def name(self):
-        return self.model_name
-
-    def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None,
-            use_gpu=True, printNet=False, spatial=False, 
-            is_train=False, lr=.0001, beta1=0.5, version='0.1', gpu_ids=[0]):
-        '''
-        INPUTS
-            model - ['net-lin'] for linearly calibrated network
-                    ['net'] for off-the-shelf network
-                    ['L2'] for L2 distance in Lab colorspace
-                    ['SSIM'] for ssim in RGB colorspace
-            net - ['squeeze','alex','vgg']
-            model_path - if None, will look in weights/[NET_NAME].pth
-            colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
-            use_gpu - bool - whether or not to use a GPU
-            printNet - bool - whether or not to print network architecture out
-            spatial - bool - whether to output an array containing varying distances across spatial dimensions
-            spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below).
-            spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images.
-            spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear).
-            is_train - bool - [True] for training mode
-            lr - float - initial learning rate
-            beta1 - float - initial momentum term for adam
-            version - 0.1 for latest, 0.0 was original (with a bug)
-            gpu_ids - int array - [0] by default, gpus to use
-        '''
-        BaseModel.initialize(self, use_gpu=use_gpu, gpu_ids=gpu_ids)
-
-        self.model = model
-        self.net = net
-        self.is_train = is_train
-        self.spatial = spatial
-        self.gpu_ids = gpu_ids
-        self.model_name = '%s [%s]'%(model,net)
-
-        if(self.model == 'net-lin'): # pretrained net + linear layer
-            self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,
-                use_dropout=True, spatial=spatial, version=version, lpips=True)
-            kw = {}
-            if not use_gpu:
-                kw['map_location'] = 'cpu'
-            if(model_path is None):
-                import inspect
-                model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'weights/v%s/%s.pth'%(version,net)))
-
-            if(not is_train):
-                print('Loading model from: %s'%model_path)
-                self.net.load_state_dict(torch.load(model_path, **kw), strict=False)
-
-        elif(self.model=='net'): # pretrained network
-            self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False)
-        elif(self.model in ['L2','l2']):
-            self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing
-            self.model_name = 'L2'
-        elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
-            self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
-            self.model_name = 'SSIM'
-        else:
-            raise ValueError("Model [%s] not recognized." % self.model)
-
-        self.parameters = list(self.net.parameters())
-
-        if self.is_train: # training mode
-            # extra network on top to go from distances (d0,d1) => predicted human judgment (h*)
-            self.rankLoss = networks.BCERankingLoss()
-            self.parameters += list(self.rankLoss.net.parameters())
-            self.lr = lr
-            self.old_lr = lr
-            self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
-        else: # test mode
-            self.net.eval()
-
-        if(use_gpu):
-            self.net.to(gpu_ids[0])
-            self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids)
-            if(self.is_train):
-                self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0
-
-        if(printNet):
-            print('---------- Networks initialized -------------')
-            networks.print_network(self.net)
-            print('-----------------------------------------------')
-
-    def forward(self, in0, in1, retPerLayer=False):
-        ''' Function computes the distance between image patches in0 and in1
-        INPUTS
-            in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
-        OUTPUT
-            computed distances between in0 and in1
-        '''
-
-        return self.net.forward(in0, in1, retPerLayer=retPerLayer)
-
-    # ***** TRAINING FUNCTIONS *****
-    def optimize_parameters(self):
-        self.forward_train()
-        self.optimizer_net.zero_grad()
-        self.backward_train()
-        self.optimizer_net.step()
-        self.clamp_weights()
-
-    def clamp_weights(self):
-        for module in self.net.modules():
-            if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
-                module.weight.data = torch.clamp(module.weight.data,min=0)
-
-    def set_input(self, data):
-        self.input_ref = data['ref']
-        self.input_p0 = data['p0']
-        self.input_p1 = data['p1']
-        self.input_judge = data['judge']
-
-        if(self.use_gpu):
-            self.input_ref = self.input_ref.to(device=self.gpu_ids[0])
-            self.input_p0 = self.input_p0.to(device=self.gpu_ids[0])
-            self.input_p1 = self.input_p1.to(device=self.gpu_ids[0])
-            self.input_judge = self.input_judge.to(device=self.gpu_ids[0])
-
-        self.var_ref = Variable(self.input_ref,requires_grad=True)
-        self.var_p0 = Variable(self.input_p0,requires_grad=True)
-        self.var_p1 = Variable(self.input_p1,requires_grad=True)
-
-    def forward_train(self): # run forward pass
-        # print(self.net.module.scaling_layer.shift)
-        # print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item())
-
-        self.d0 = self.forward(self.var_ref, self.var_p0)
-        self.d1 = self.forward(self.var_ref, self.var_p1)
-        self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
-
-        self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
-
-        self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
-
-        return self.loss_total
-
-    def backward_train(self):
-        torch.mean(self.loss_total).backward()
-
-    def compute_accuracy(self,d0,d1,judge):
-        ''' d0, d1 are Variables, judge is a Tensor '''
-        d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
-        judge_per = judge.cpu().numpy().flatten()
-        return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
-
-    def get_current_errors(self):
-        retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
-                            ('acc_r', self.acc_r)])
-
-        for key in retDict.keys():
-            retDict[key] = np.mean(retDict[key])
-
-        return retDict
-
-    def get_current_visuals(self):
-        zoom_factor = 256/self.var_ref.data.size()[2]
-
-        ref_img = util.tensor2im(self.var_ref.data)
-        p0_img = util.tensor2im(self.var_p0.data)
-        p1_img = util.tensor2im(self.var_p1.data)
-
-        ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
-        p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
-        p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
-
-        return OrderedDict([('ref', ref_img_vis),
-                            ('p0', p0_img_vis),
-                            ('p1', p1_img_vis)])
-
-    def save(self, path, label):
-        if(self.use_gpu):
-            self.save_network(self.net.module, path, '', label)
-        else:
-            self.save_network(self.net, path, '', label)
-        self.save_network(self.rankLoss.net, path, 'rank', label)
-
-    def update_learning_rate(self,nepoch_decay):
-        lrd = self.lr / nepoch_decay
-        lr = self.old_lr - lrd
-
-        for param_group in self.optimizer_net.param_groups:
-            param_group['lr'] = lr
-
-        print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
-        self.old_lr = lr
-
-def score_2afc_dataset(data_loader, func, name=''):
-    ''' Function computes Two Alternative Forced Choice (2AFC) score using
-        distance function 'func' in dataset 'data_loader'
-    INPUTS
-        data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
-        func - callable distance function - calling d=func(in0,in1) should take 2
-            pytorch tensors with shape Nx3xXxY, and return numpy array of length N
-    OUTPUTS
-        [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
-        [1] - dictionary with following elements
-            d0s,d1s - N arrays containing distances between reference patch to perturbed patches 
-            gts - N array in [0,1], preferred patch selected by human evaluators
-                (closer to "0" for left patch p0, "1" for right patch p1,
-                "0.6" means 60pct people preferred right patch, 40pct preferred left)
-            scores - N array in [0,1], corresponding to what percentage function agreed with humans
-    CONSTS
-        N - number of test triplets in data_loader
-    '''
-
-    d0s = []
-    d1s = []
-    gts = []
-
-    for data in tqdm(data_loader.load_data(), desc=name):
-        d0s+=func(data['ref'],data['p0']).data.cpu().numpy().flatten().tolist()
-        d1s+=func(data['ref'],data['p1']).data.cpu().numpy().flatten().tolist()
-        gts+=data['judge'].cpu().numpy().flatten().tolist()
-
-    d0s = np.array(d0s)
-    d1s = np.array(d1s)
-    gts = np.array(gts)
-    scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
-
-    return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
-
-def score_jnd_dataset(data_loader, func, name=''):
-    ''' Function computes JND score using distance function 'func' in dataset 'data_loader'
-    INPUTS
-        data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
-        func - callable distance function - calling d=func(in0,in1) should take 2
-            pytorch tensors with shape Nx3xXxY, and return pytorch array of length N
-    OUTPUTS
-        [0] - JND score in [0,1], mAP score (area under precision-recall curve)
-        [1] - dictionary with following elements
-            ds - N array containing distances between two patches shown to human evaluator
-            sames - N array containing fraction of people who thought the two patches were identical
-    CONSTS
-        N - number of test triplets in data_loader
-    '''
-
-    ds = []
-    gts = []
-
-    for data in tqdm(data_loader.load_data(), desc=name):
-        ds+=func(data['p0'],data['p1']).data.cpu().numpy().tolist()
-        gts+=data['same'].cpu().numpy().flatten().tolist()
-
-    sames = np.array(gts)
-    ds = np.array(ds)
-
-    sorted_inds = np.argsort(ds)
-    ds_sorted = ds[sorted_inds]
-    sames_sorted = sames[sorted_inds]
-
-    TPs = np.cumsum(sames_sorted)
-    FPs = np.cumsum(1-sames_sorted)
-    FNs = np.sum(sames_sorted)-TPs
-
-    precs = TPs/(TPs+FPs)
-    recs = TPs/(TPs+FNs)
-    score = util.voc_ap(recs,precs)
-
-    return(score, dict(ds=ds,sames=sames))
diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deberta_v2/modeling_tf_deberta_v2.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deberta_v2/modeling_tf_deberta_v2.py
deleted file mode 100644
index fa2cf1df74d09c9d971d04ec05af91e989ee0633..0000000000000000000000000000000000000000
--- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deberta_v2/modeling_tf_deberta_v2.py
+++ /dev/null
@@ -1,1630 +0,0 @@
-# coding=utf-8
-# Copyright 2021 Microsoft and The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" TF 2.0 DeBERTa-v2 model."""
-
-from __future__ import annotations
-
-from typing import Dict, Optional, Tuple, Union
-
-import numpy as np
-import tensorflow as tf
-
-from ...activations_tf import get_tf_activation
-from ...modeling_tf_outputs import (
-    TFBaseModelOutput,
-    TFMaskedLMOutput,
-    TFMultipleChoiceModelOutput,
-    TFQuestionAnsweringModelOutput,
-    TFSequenceClassifierOutput,
-    TFTokenClassifierOutput,
-)
-from ...modeling_tf_utils import (
-    TFMaskedLanguageModelingLoss,
-    TFModelInputType,
-    TFMultipleChoiceLoss,
-    TFPreTrainedModel,
-    TFQuestionAnsweringLoss,
-    TFSequenceClassificationLoss,
-    TFTokenClassificationLoss,
-    get_initializer,
-    unpack_inputs,
-)
-from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
-from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
-from .configuration_deberta_v2 import DebertaV2Config
-
-
-logger = logging.get_logger(__name__)
-
-_CONFIG_FOR_DOC = "DebertaV2Config"
-_CHECKPOINT_FOR_DOC = "kamalkraj/deberta-v2-xlarge"
-
-TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [
-    "kamalkraj/deberta-v2-xlarge",
-    # See all DeBERTa models at https://huggingface.co/models?filter=deberta-v2
-]
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaContextPooler with Deberta->DebertaV2
-class TFDebertaV2ContextPooler(tf.keras.layers.Layer):
-    def __init__(self, config: DebertaV2Config, **kwargs):
-        super().__init__(**kwargs)
-        self.dense = tf.keras.layers.Dense(config.pooler_hidden_size, name="dense")
-        self.dropout = TFDebertaV2StableDropout(config.pooler_dropout, name="dropout")
-        self.config = config
-
-    def call(self, hidden_states, training: bool = False):
-        # We "pool" the model by simply taking the hidden state corresponding
-        # to the first token.
-        context_token = hidden_states[:, 0]
-        context_token = self.dropout(context_token, training=training)
-        pooled_output = self.dense(context_token)
-        pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output)
-        return pooled_output
-
-    @property
-    def output_dim(self) -> int:
-        return self.config.hidden_size
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaXSoftmax with Deberta->DebertaV2
-class TFDebertaV2XSoftmax(tf.keras.layers.Layer):
-    """
-    Masked Softmax which is optimized for saving memory
-
-    Args:
-        input (`tf.Tensor`): The input tensor that will apply softmax.
-        mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
-        dim (int): The dimension that will apply softmax
-    """
-
-    def __init__(self, axis=-1, **kwargs):
-        super().__init__(**kwargs)
-        self.axis = axis
-
-    def call(self, inputs: tf.Tensor, mask: tf.Tensor):
-        rmask = tf.logical_not(tf.cast(mask, tf.bool))
-        output = tf.where(rmask, float("-inf"), inputs)
-        output = stable_softmax(output, self.axis)
-        output = tf.where(rmask, 0.0, output)
-        return output
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaStableDropout with Deberta->DebertaV2
-class TFDebertaV2StableDropout(tf.keras.layers.Layer):
-    """
-    Optimized dropout module for stabilizing the training
-
-    Args:
-        drop_prob (float): the dropout probabilities
-    """
-
-    def __init__(self, drop_prob, **kwargs):
-        super().__init__(**kwargs)
-        self.drop_prob = drop_prob
-
-    @tf.custom_gradient
-    def xdropout(self, inputs):
-        """
-        Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.
-        """
-        mask = tf.cast(
-            1
-            - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)),
-            tf.bool,
-        )
-        scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32)
-        if self.drop_prob > 0:
-            inputs = tf.where(mask, 0.0, inputs) * scale
-
-        def grad(upstream):
-            if self.drop_prob > 0:
-                return tf.where(mask, 0.0, upstream) * scale
-            else:
-                return upstream
-
-        return inputs, grad
-
-    def call(self, inputs: tf.Tensor, training: tf.Tensor = False):
-        if training:
-            return self.xdropout(inputs)
-        return inputs
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaSelfOutput with Deberta->DebertaV2
-class TFDebertaV2SelfOutput(tf.keras.layers.Layer):
-    def __init__(self, config: DebertaV2Config, **kwargs):
-        super().__init__(**kwargs)
-        self.dense = tf.keras.layers.Dense(config.hidden_size, name="dense")
-        self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
-        self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
-
-    def call(self, hidden_states, input_tensor, training: bool = False):
-        hidden_states = self.dense(hidden_states)
-        hidden_states = self.dropout(hidden_states, training=training)
-        hidden_states = self.LayerNorm(hidden_states + input_tensor)
-        return hidden_states
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaAttention with Deberta->DebertaV2
-class TFDebertaV2Attention(tf.keras.layers.Layer):
-    def __init__(self, config: DebertaV2Config, **kwargs):
-        super().__init__(**kwargs)
-        self.self = TFDebertaV2DisentangledSelfAttention(config, name="self")
-        self.dense_output = TFDebertaV2SelfOutput(config, name="output")
-        self.config = config
-
-    def call(
-        self,
-        input_tensor: tf.Tensor,
-        attention_mask: tf.Tensor,
-        query_states: tf.Tensor = None,
-        relative_pos: tf.Tensor = None,
-        rel_embeddings: tf.Tensor = None,
-        output_attentions: bool = False,
-        training: bool = False,
-    ) -> Tuple[tf.Tensor]:
-        self_outputs = self.self(
-            hidden_states=input_tensor,
-            attention_mask=attention_mask,
-            query_states=query_states,
-            relative_pos=relative_pos,
-            rel_embeddings=rel_embeddings,
-            output_attentions=output_attentions,
-            training=training,
-        )
-        if query_states is None:
-            query_states = input_tensor
-        attention_output = self.dense_output(
-            hidden_states=self_outputs[0], input_tensor=query_states, training=training
-        )
-
-        output = (attention_output,) + self_outputs[1:]
-
-        return output
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaIntermediate with Deberta->DebertaV2
-class TFDebertaV2Intermediate(tf.keras.layers.Layer):
-    def __init__(self, config: DebertaV2Config, **kwargs):
-        super().__init__(**kwargs)
-
-        self.dense = tf.keras.layers.Dense(
-            units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
-        )
-
-        if isinstance(config.hidden_act, str):
-            self.intermediate_act_fn = get_tf_activation(config.hidden_act)
-        else:
-            self.intermediate_act_fn = config.hidden_act
-
-    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
-        hidden_states = self.dense(inputs=hidden_states)
-        hidden_states = self.intermediate_act_fn(hidden_states)
-
-        return hidden_states
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaOutput with Deberta->DebertaV2
-class TFDebertaV2Output(tf.keras.layers.Layer):
-    def __init__(self, config: DebertaV2Config, **kwargs):
-        super().__init__(**kwargs)
-
-        self.dense = tf.keras.layers.Dense(
-            units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
-        )
-        self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
-        self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
-
-    def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
-        hidden_states = self.dense(inputs=hidden_states)
-        hidden_states = self.dropout(hidden_states, training=training)
-        hidden_states = self.LayerNorm(hidden_states + input_tensor)
-
-        return hidden_states
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaLayer with Deberta->DebertaV2
-class TFDebertaV2Layer(tf.keras.layers.Layer):
-    def __init__(self, config: DebertaV2Config, **kwargs):
-        super().__init__(**kwargs)
-
-        self.attention = TFDebertaV2Attention(config, name="attention")
-        self.intermediate = TFDebertaV2Intermediate(config, name="intermediate")
-        self.bert_output = TFDebertaV2Output(config, name="output")
-
-    def call(
-        self,
-        hidden_states: tf.Tensor,
-        attention_mask: tf.Tensor,
-        query_states: tf.Tensor = None,
-        relative_pos: tf.Tensor = None,
-        rel_embeddings: tf.Tensor = None,
-        output_attentions: bool = False,
-        training: bool = False,
-    ) -> Tuple[tf.Tensor]:
-        attention_outputs = self.attention(
-            input_tensor=hidden_states,
-            attention_mask=attention_mask,
-            query_states=query_states,
-            relative_pos=relative_pos,
-            rel_embeddings=rel_embeddings,
-            output_attentions=output_attentions,
-            training=training,
-        )
-        attention_output = attention_outputs[0]
-        intermediate_output = self.intermediate(hidden_states=attention_output)
-        layer_output = self.bert_output(
-            hidden_states=intermediate_output, input_tensor=attention_output, training=training
-        )
-        outputs = (layer_output,) + attention_outputs[1:]  # add attentions if we output them
-
-        return outputs
-
-
-class TFDebertaV2ConvLayer(tf.keras.layers.Layer):
-    def __init__(self, config: DebertaV2Config, **kwargs):
-        super().__init__(**kwargs)
-
-        self.kernel_size = getattr(config, "conv_kernel_size", 3)
-        # groups = getattr(config, "conv_groups", 1)
-        self.conv_act = get_tf_activation(getattr(config, "conv_act", "tanh"))
-        self.padding = (self.kernel_size - 1) // 2
-        self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
-        self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
-        self.config = config
-
-    def build(self, input_shape):
-        with tf.name_scope("conv"):
-            self.conv_kernel = self.add_weight(
-                name="kernel",
-                shape=[self.kernel_size, self.config.hidden_size, self.config.hidden_size],
-                initializer=get_initializer(self.config.initializer_range),
-            )
-            self.conv_bias = self.add_weight(
-                name="bias", shape=[self.config.hidden_size], initializer=tf.zeros_initializer()
-            )
-        return super().build(input_shape)
-
-    def call(
-        self, hidden_states: tf.Tensor, residual_states: tf.Tensor, input_mask: tf.Tensor, training: bool = False
-    ) -> tf.Tensor:
-        out = tf.nn.conv2d(
-            tf.expand_dims(hidden_states, 1),
-            tf.expand_dims(self.conv_kernel, 0),
-            strides=1,
-            padding=[[0, 0], [0, 0], [self.padding, self.padding], [0, 0]],
-        )
-        out = tf.squeeze(tf.nn.bias_add(out, self.conv_bias), 1)
-        rmask = tf.cast(1 - input_mask, tf.bool)
-        out = tf.where(tf.broadcast_to(tf.expand_dims(rmask, -1), shape_list(out)), 0.0, out)
-        out = self.dropout(out, training=training)
-        out = self.conv_act(out)
-
-        layer_norm_input = residual_states + out
-        output = self.LayerNorm(layer_norm_input)
-
-        if input_mask is None:
-            output_states = output
-        else:
-            if len(shape_list(input_mask)) != len(shape_list(layer_norm_input)):
-                if len(shape_list(input_mask)) == 4:
-                    input_mask = tf.squeeze(tf.squeeze(input_mask, axis=1), axis=1)
-                input_mask = tf.cast(tf.expand_dims(input_mask, axis=2), tf.float32)
-
-            output_states = output * input_mask
-
-        return output_states
-
-
-class TFDebertaV2Encoder(tf.keras.layers.Layer):
-    def __init__(self, config: DebertaV2Config, **kwargs):
-        super().__init__(**kwargs)
-
-        self.layer = [TFDebertaV2Layer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
-        self.relative_attention = getattr(config, "relative_attention", False)
-        self.config = config
-        if self.relative_attention:
-            self.max_relative_positions = getattr(config, "max_relative_positions", -1)
-            if self.max_relative_positions < 1:
-                self.max_relative_positions = config.max_position_embeddings
-
-            self.position_buckets = getattr(config, "position_buckets", -1)
-            self.pos_ebd_size = self.max_relative_positions * 2
-
-            if self.position_buckets > 0:
-                self.pos_ebd_size = self.position_buckets * 2
-
-        self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
-
-        if "layer_norm" in self.norm_rel_ebd:
-            self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
-
-        self.conv = TFDebertaV2ConvLayer(config, name="conv") if getattr(config, "conv_kernel_size", 0) > 0 else None
-
-    def build(self, input_shape):
-        if self.relative_attention:
-            self.rel_embeddings = self.add_weight(
-                name="rel_embeddings.weight",
-                shape=[self.pos_ebd_size, self.config.hidden_size],
-                initializer=get_initializer(self.config.initializer_range),
-            )
-        return super().build(input_shape)
-
-    def get_rel_embedding(self):
-        rel_embeddings = self.rel_embeddings if self.relative_attention else None
-        if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
-            rel_embeddings = self.LayerNorm(rel_embeddings)
-        return rel_embeddings
-
-    def get_attention_mask(self, attention_mask):
-        if len(shape_list(attention_mask)) <= 2:
-            extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2)
-            attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1)
-            attention_mask = tf.cast(attention_mask, tf.uint8)
-        elif len(shape_list(attention_mask)) == 3:
-            attention_mask = tf.expand_dims(attention_mask, 1)
-
-        return attention_mask
-
-    def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
-        if self.relative_attention and relative_pos is None:
-            q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2]
-            relative_pos = build_relative_position(
-                q,
-                shape_list(hidden_states)[-2],
-                bucket_size=self.position_buckets,
-                max_position=self.max_relative_positions,
-            )
-        return relative_pos
-
-    def call(
-        self,
-        hidden_states: tf.Tensor,
-        attention_mask: tf.Tensor,
-        query_states: tf.Tensor = None,
-        relative_pos: tf.Tensor = None,
-        output_attentions: bool = False,
-        output_hidden_states: bool = False,
-        return_dict: bool = True,
-        training: bool = False,
-    ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
-        if len(shape_list(attention_mask)) <= 2:
-            input_mask = attention_mask
-        else:
-            input_mask = tf.cast(tf.math.reduce_sum(attention_mask, axis=-2) > 0, dtype=tf.uint8)
-
-        all_hidden_states = () if output_hidden_states else None
-        all_attentions = () if output_attentions else None
-
-        attention_mask = self.get_attention_mask(attention_mask)
-        relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
-
-        next_kv = hidden_states
-
-        rel_embeddings = self.get_rel_embedding()
-        output_states = next_kv
-        for i, layer_module in enumerate(self.layer):
-            if output_hidden_states:
-                all_hidden_states = all_hidden_states + (output_states,)
-
-            layer_outputs = layer_module(
-                hidden_states=next_kv,
-                attention_mask=attention_mask,
-                query_states=query_states,
-                relative_pos=relative_pos,
-                rel_embeddings=rel_embeddings,
-                output_attentions=output_attentions,
-                training=training,
-            )
-            output_states = layer_outputs[0]
-
-            if i == 0 and self.conv is not None:
-                output_states = self.conv(hidden_states, output_states, input_mask)
-
-            next_kv = output_states
-
-            if output_attentions:
-                all_attentions = all_attentions + (layer_outputs[1],)
-
-        # Add last layer
-        if output_hidden_states:
-            all_hidden_states = all_hidden_states + (output_states,)
-
-        if not return_dict:
-            return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
-
-        return TFBaseModelOutput(
-            last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
-        )
-
-
-def make_log_bucket_position(relative_pos, bucket_size, max_position):
-    sign = tf.math.sign(relative_pos)
-    mid = bucket_size // 2
-    abs_pos = tf.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, tf.math.abs(relative_pos))
-    log_pos = (
-        tf.math.ceil(
-            tf.cast(tf.math.log(abs_pos / mid), tf.float32) / tf.math.log((max_position - 1) / mid) * (mid - 1)
-        )
-        + mid
-    )
-    bucket_pos = tf.cast(
-        tf.where(abs_pos <= mid, tf.cast(relative_pos, tf.float32), log_pos * tf.cast(sign, tf.float32)), tf.int32
-    )
-    return bucket_pos
-
-
-def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):
-    """
-    Build relative position according to the query and key
-
-    We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
-    \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
-    P_k\\)
-
-    Args:
-        query_size (int): the length of query
-        key_size (int): the length of key
-        bucket_size (int): the size of position bucket
-        max_position (int): the maximum allowed absolute position
-
-    Return:
-        `tf.Tensor`: A tensor with shape [1, query_size, key_size]
-
-    """
-    q_ids = tf.range(query_size, dtype=tf.int32)
-    k_ids = tf.range(key_size, dtype=tf.int32)
-    rel_pos_ids = q_ids[:, None] - tf.tile(tf.expand_dims(k_ids, axis=0), [shape_list(q_ids)[0], 1])
-    if bucket_size > 0 and max_position > 0:
-        rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
-    rel_pos_ids = rel_pos_ids[:query_size, :]
-    rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
-    return tf.cast(rel_pos_ids, tf.int64)
-
-
-def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
-    shapes = [
-        shape_list(query_layer)[0],
-        shape_list(query_layer)[1],
-        shape_list(query_layer)[2],
-        shape_list(relative_pos)[-1],
-    ]
-    return tf.broadcast_to(c2p_pos, shapes)
-
-
-def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
-    shapes = [
-        shape_list(query_layer)[0],
-        shape_list(query_layer)[1],
-        shape_list(key_layer)[-2],
-        shape_list(key_layer)[-2],
-    ]
-    return tf.broadcast_to(c2p_pos, shapes)
-
-
-def pos_dynamic_expand(pos_index, p2c_att, key_layer):
-    shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]]
-    return tf.broadcast_to(pos_index, shapes)
-
-
-def take_along_axis(x, indices):
-    # Only a valid port of np.take_along_axis when the gather axis is -1
-
-    # TPU + gathers and reshapes don't go along well -- see https://github.com/huggingface/transformers/issues/18239
-    if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy):
-        # [B, S, P] -> [B, S, P, D]
-        one_hot_indices = tf.one_hot(indices, depth=x.shape[-1], dtype=x.dtype)
-
-        # if we ignore the first two dims, this is equivalent to multiplying a matrix (one hot) by a vector (x)
-        # grossly abusing notation: [B, S, P, D] . [B, S, D] = [B, S, P]
-        gathered = tf.einsum("ijkl,ijl->ijk", one_hot_indices, x)
-
-    # GPUs, on the other hand, prefer gathers instead of large one-hot+matmuls
-    else:
-        gathered = tf.gather(x, indices, batch_dims=2)
-
-    return gathered
-
-
-class TFDebertaV2DisentangledSelfAttention(tf.keras.layers.Layer):
-    """
-    Disentangled self-attention module
-
-    Parameters:
-        config (`DebertaV2Config`):
-            A model config class instance with the configuration to build a new model. The schema is similar to
-            *BertConfig*, for more details, please refer [`DebertaV2Config`]
-
-    """
-
-    def __init__(self, config: DebertaV2Config, **kwargs):
-        super().__init__(**kwargs)
-        if config.hidden_size % config.num_attention_heads != 0:
-            raise ValueError(
-                f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
-                f"heads ({config.num_attention_heads})"
-            )
-        self.num_attention_heads = config.num_attention_heads
-        _attention_head_size = config.hidden_size // config.num_attention_heads
-        self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size)
-        self.all_head_size = self.num_attention_heads * self.attention_head_size
-        self.query_proj = tf.keras.layers.Dense(
-            self.all_head_size,
-            kernel_initializer=get_initializer(config.initializer_range),
-            name="query_proj",
-            use_bias=True,
-        )
-        self.key_proj = tf.keras.layers.Dense(
-            self.all_head_size,
-            kernel_initializer=get_initializer(config.initializer_range),
-            name="key_proj",
-            use_bias=True,
-        )
-        self.value_proj = tf.keras.layers.Dense(
-            self.all_head_size,
-            kernel_initializer=get_initializer(config.initializer_range),
-            name="value_proj",
-            use_bias=True,
-        )
-
-        self.share_att_key = getattr(config, "share_att_key", False)
-        self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
-        self.relative_attention = getattr(config, "relative_attention", False)
-
-        if self.relative_attention:
-            self.position_buckets = getattr(config, "position_buckets", -1)
-            self.max_relative_positions = getattr(config, "max_relative_positions", -1)
-            if self.max_relative_positions < 1:
-                self.max_relative_positions = config.max_position_embeddings
-            self.pos_ebd_size = self.max_relative_positions
-            if self.position_buckets > 0:
-                self.pos_ebd_size = self.position_buckets
-
-            self.pos_dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="pos_dropout")
-
-            if not self.share_att_key:
-                if "c2p" in self.pos_att_type:
-                    self.pos_key_proj = tf.keras.layers.Dense(
-                        self.all_head_size,
-                        kernel_initializer=get_initializer(config.initializer_range),
-                        name="pos_proj",
-                        use_bias=True,
-                    )
-                if "p2c" in self.pos_att_type:
-                    self.pos_query_proj = tf.keras.layers.Dense(
-                        self.all_head_size,
-                        kernel_initializer=get_initializer(config.initializer_range),
-                        name="pos_q_proj",
-                    )
-        self.softmax = TFDebertaV2XSoftmax(axis=-1)
-        self.dropout = TFDebertaV2StableDropout(config.attention_probs_dropout_prob, name="dropout")
-
-    def transpose_for_scores(self, tensor: tf.Tensor, attention_heads: int) -> tf.Tensor:
-        tensor_shape = shape_list(tensor)
-        # In graph mode mode, we can't reshape with -1 as the final dimension if the first dimension (batch size) is None
-        shape = tensor_shape[:-1] + [attention_heads, tensor_shape[-1] // attention_heads]
-        # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
-        tensor = tf.reshape(tensor=tensor, shape=shape)
-        tensor = tf.transpose(tensor, perm=[0, 2, 1, 3])
-        x_shape = shape_list(tensor)
-        tensor = tf.reshape(tensor, shape=[-1, x_shape[-2], x_shape[-1]])
-        return tensor
-
-    def call(
-        self,
-        hidden_states: tf.Tensor,
-        attention_mask: tf.Tensor,
-        query_states: tf.Tensor = None,
-        relative_pos: tf.Tensor = None,
-        rel_embeddings: tf.Tensor = None,
-        output_attentions: bool = False,
-        training: bool = False,
-    ) -> Tuple[tf.Tensor]:
-        """
-        Call the module
-
-        Args:
-            hidden_states (`tf.Tensor`):
-                Input states to the module usually the output from previous layer, it will be the Q,K and V in
-                *Attention(Q,K,V)*
-
-            attention_mask (`tf.Tensor`):
-                An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
-                sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
-                th token.
-
-            return_att (`bool`, optional):
-                Whether return the attention matrix.
-
-            query_states (`tf.Tensor`, optional):
-                The *Q* state in *Attention(Q,K,V)*.
-
-            relative_pos (`tf.Tensor`):
-                The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
-                values ranging in [*-max_relative_positions*, *max_relative_positions*].
-
-            rel_embeddings (`tf.Tensor`):
-                The embedding of relative distances. It's a tensor of shape [\\(2 \\times
-                \\text{max_relative_positions}\\), *hidden_size*].
-
-
-        """
-        if query_states is None:
-            query_states = hidden_states
-        query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
-        key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
-        value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
-
-        rel_att = None
-        # Take the dot product between "query" and "key" to get the raw attention scores.
-        scale_factor = 1
-        if "c2p" in self.pos_att_type:
-            scale_factor += 1
-        if "p2c" in self.pos_att_type:
-            scale_factor += 1
-        scale = tf.math.sqrt(tf.cast(shape_list(query_layer)[-1] * scale_factor, tf.float32))
-        attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 2, 1]) / scale)
-        if self.relative_attention:
-            rel_embeddings = self.pos_dropout(rel_embeddings)
-            rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
-
-        if rel_att is not None:
-            attention_scores = attention_scores + rel_att
-        attention_scores = tf.reshape(
-            attention_scores,
-            (-1, self.num_attention_heads, shape_list(attention_scores)[-2], shape_list(attention_scores)[-1]),
-        )
-
-        # bsz x height x length x dimension
-        attention_probs = self.softmax(attention_scores, attention_mask)
-        attention_probs = self.dropout(attention_probs, training=training)
-        context_layer = tf.matmul(
-            tf.reshape(attention_probs, [-1, shape_list(attention_probs)[-2], shape_list(attention_probs)[-1]]),
-            value_layer,
-        )
-        context_layer = tf.transpose(
-            tf.reshape(
-                context_layer,
-                [-1, self.num_attention_heads, shape_list(context_layer)[-2], shape_list(context_layer)[-1]],
-            ),
-            [0, 2, 1, 3],
-        )
-        # Set the final dimension here explicitly.
-        # Calling tf.reshape(context_layer, (*context_layer_shape[:-2], -1)) raises an error when executing
-        # the model in graph mode as context_layer is reshaped to (None, 7, None) and Dense layer in TFDebertaV2SelfOutput
-        # requires final input dimension to be defined
-        context_layer_shape = shape_list(context_layer)
-        new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]]
-        context_layer = tf.reshape(context_layer, new_context_layer_shape)
-        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
-        return outputs
-
-    def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
-        if relative_pos is None:
-            q = shape_list(query_layer)[-2]
-            relative_pos = build_relative_position(
-                q,
-                shape_list(key_layer)[-2],
-                bucket_size=self.position_buckets,
-                max_position=self.max_relative_positions,
-            )
-        shape_list_pos = shape_list(relative_pos)
-        if len(shape_list_pos) == 2:
-            relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0)
-        elif len(shape_list_pos) == 3:
-            relative_pos = tf.expand_dims(relative_pos, 1)
-        # bsz x height x query x key
-        elif len(shape_list_pos) != 4:
-            raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}")
-
-        att_span = self.pos_ebd_size
-        rel_embeddings = tf.expand_dims(
-            rel_embeddings[self.pos_ebd_size - att_span : self.pos_ebd_size + att_span, :], 0
-        )
-        if self.share_att_key:
-            pos_query_layer = tf.tile(
-                self.transpose_for_scores(self.query_proj(rel_embeddings), self.num_attention_heads),
-                [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
-            )
-            pos_key_layer = tf.tile(
-                self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads),
-                [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
-            )
-        else:
-            if "c2p" in self.pos_att_type:
-                pos_key_layer = tf.tile(
-                    self.transpose_for_scores(self.pos_key_proj(rel_embeddings), self.num_attention_heads),
-                    [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
-                )  # .split(self.all_head_size, dim=-1)
-            if "p2c" in self.pos_att_type:
-                pos_query_layer = tf.tile(
-                    self.transpose_for_scores(self.pos_query_proj(rel_embeddings), self.num_attention_heads),
-                    [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
-                )  # .split(self.all_head_size, dim=-1)
-
-        score = 0
-        # content->position
-        if "c2p" in self.pos_att_type:
-            scale = tf.math.sqrt(tf.cast(shape_list(pos_key_layer)[-1] * scale_factor, tf.float32))
-            c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 2, 1]))
-            c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1)
-            c2p_att = take_along_axis(
-                c2p_att,
-                tf.broadcast_to(
-                    tf.squeeze(c2p_pos, 0),
-                    [shape_list(query_layer)[0], shape_list(query_layer)[1], shape_list(relative_pos)[-1]],
-                ),
-            )
-            score += c2p_att / scale
-
-        # position->content
-        if "p2c" in self.pos_att_type:
-            scale = tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, tf.float32))
-            if shape_list(key_layer)[-2] != shape_list(query_layer)[-2]:
-                r_pos = build_relative_position(
-                    shape_list(key_layer)[-2],
-                    shape_list(key_layer)[-2],
-                    bucket_size=self.position_buckets,
-                    max_position=self.max_relative_positions,
-                )
-                r_pos = tf.expand_dims(r_pos, 0)
-            else:
-                r_pos = relative_pos
-
-            p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1)
-
-            p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 2, 1]))
-            p2c_att = tf.transpose(
-                take_along_axis(
-                    p2c_att,
-                    tf.broadcast_to(
-                        tf.squeeze(p2c_pos, 0),
-                        [shape_list(query_layer)[0], shape_list(key_layer)[-2], shape_list(key_layer)[-2]],
-                    ),
-                ),
-                [0, 2, 1],
-            )
-            score += p2c_att / scale
-
-        return score
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaEmbeddings Deberta->DebertaV2
-class TFDebertaV2Embeddings(tf.keras.layers.Layer):
-    """Construct the embeddings from word, position and token_type embeddings."""
-
-    def __init__(self, config, **kwargs):
-        super().__init__(**kwargs)
-
-        self.config = config
-        self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
-        self.hidden_size = config.hidden_size
-        self.max_position_embeddings = config.max_position_embeddings
-        self.position_biased_input = getattr(config, "position_biased_input", True)
-        self.initializer_range = config.initializer_range
-        if self.embedding_size != config.hidden_size:
-            self.embed_proj = tf.keras.layers.Dense(
-                config.hidden_size,
-                kernel_initializer=get_initializer(config.initializer_range),
-                name="embed_proj",
-                use_bias=False,
-            )
-        self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
-        self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
-
-    def build(self, input_shape: tf.TensorShape):
-        with tf.name_scope("word_embeddings"):
-            self.weight = self.add_weight(
-                name="weight",
-                shape=[self.config.vocab_size, self.embedding_size],
-                initializer=get_initializer(self.initializer_range),
-            )
-
-        with tf.name_scope("token_type_embeddings"):
-            if self.config.type_vocab_size > 0:
-                self.token_type_embeddings = self.add_weight(
-                    name="embeddings",
-                    shape=[self.config.type_vocab_size, self.embedding_size],
-                    initializer=get_initializer(self.initializer_range),
-                )
-            else:
-                self.token_type_embeddings = None
-
-        with tf.name_scope("position_embeddings"):
-            if self.position_biased_input:
-                self.position_embeddings = self.add_weight(
-                    name="embeddings",
-                    shape=[self.max_position_embeddings, self.hidden_size],
-                    initializer=get_initializer(self.initializer_range),
-                )
-            else:
-                self.position_embeddings = None
-
-        super().build(input_shape)
-
-    def call(
-        self,
-        input_ids: tf.Tensor = None,
-        position_ids: tf.Tensor = None,
-        token_type_ids: tf.Tensor = None,
-        inputs_embeds: tf.Tensor = None,
-        mask: tf.Tensor = None,
-        training: bool = False,
-    ) -> tf.Tensor:
-        """
-        Applies embedding based on inputs tensor.
-
-        Returns:
-            final_embeddings (`tf.Tensor`): output embedding tensor.
-        """
-        if input_ids is None and inputs_embeds is None:
-            raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
-
-        if input_ids is not None:
-            check_embeddings_within_bounds(input_ids, self.config.vocab_size)
-            inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
-
-        input_shape = shape_list(inputs_embeds)[:-1]
-
-        if token_type_ids is None:
-            token_type_ids = tf.fill(dims=input_shape, value=0)
-
-        if position_ids is None:
-            position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
-
-        final_embeddings = inputs_embeds
-        if self.position_biased_input:
-            position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
-            final_embeddings += position_embeds
-        if self.config.type_vocab_size > 0:
-            token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
-            final_embeddings += token_type_embeds
-
-        if self.embedding_size != self.hidden_size:
-            final_embeddings = self.embed_proj(final_embeddings)
-
-        final_embeddings = self.LayerNorm(final_embeddings)
-
-        if mask is not None:
-            if len(shape_list(mask)) != len(shape_list(final_embeddings)):
-                if len(shape_list(mask)) == 4:
-                    mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)
-                mask = tf.cast(tf.expand_dims(mask, axis=2), tf.float32)
-
-            final_embeddings = final_embeddings * mask
-
-        final_embeddings = self.dropout(final_embeddings, training=training)
-
-        return final_embeddings
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaPredictionHeadTransform with Deberta->DebertaV2
-class TFDebertaV2PredictionHeadTransform(tf.keras.layers.Layer):
-    def __init__(self, config: DebertaV2Config, **kwargs):
-        super().__init__(**kwargs)
-
-        self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
-
-        self.dense = tf.keras.layers.Dense(
-            units=self.embedding_size,
-            kernel_initializer=get_initializer(config.initializer_range),
-            name="dense",
-        )
-
-        if isinstance(config.hidden_act, str):
-            self.transform_act_fn = get_tf_activation(config.hidden_act)
-        else:
-            self.transform_act_fn = config.hidden_act
-        self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
-
-    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
-        hidden_states = self.dense(inputs=hidden_states)
-        hidden_states = self.transform_act_fn(hidden_states)
-        hidden_states = self.LayerNorm(hidden_states)
-
-        return hidden_states
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaLMPredictionHead with Deberta->DebertaV2
-class TFDebertaV2LMPredictionHead(tf.keras.layers.Layer):
-    def __init__(self, config: DebertaV2Config, input_embeddings: tf.keras.layers.Layer, **kwargs):
-        super().__init__(**kwargs)
-
-        self.config = config
-        self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
-
-        self.transform = TFDebertaV2PredictionHeadTransform(config, name="transform")
-
-        # The output weights are the same as the input embeddings, but there is
-        # an output-only bias for each token.
-        self.input_embeddings = input_embeddings
-
-    def build(self, input_shape: tf.TensorShape):
-        self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
-
-        super().build(input_shape)
-
-    def get_output_embeddings(self) -> tf.keras.layers.Layer:
-        return self.input_embeddings
-
-    def set_output_embeddings(self, value: tf.Variable):
-        self.input_embeddings.weight = value
-        self.input_embeddings.vocab_size = shape_list(value)[0]
-
-    def get_bias(self) -> Dict[str, tf.Variable]:
-        return {"bias": self.bias}
-
-    def set_bias(self, value: tf.Variable):
-        self.bias = value["bias"]
-        self.config.vocab_size = shape_list(value["bias"])[0]
-
-    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
-        hidden_states = self.transform(hidden_states=hidden_states)
-        seq_length = shape_list(hidden_states)[1]
-        hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
-        hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
-        hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
-        hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
-
-        return hidden_states
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaOnlyMLMHead with Deberta->DebertaV2
-class TFDebertaV2OnlyMLMHead(tf.keras.layers.Layer):
-    def __init__(self, config: DebertaV2Config, input_embeddings: tf.keras.layers.Layer, **kwargs):
-        super().__init__(**kwargs)
-        self.predictions = TFDebertaV2LMPredictionHead(config, input_embeddings, name="predictions")
-
-    def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
-        prediction_scores = self.predictions(hidden_states=sequence_output)
-
-        return prediction_scores
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaMainLayer with Deberta->DebertaV2
-class TFDebertaV2MainLayer(tf.keras.layers.Layer):
-    config_class = DebertaV2Config
-
-    def __init__(self, config: DebertaV2Config, **kwargs):
-        super().__init__(**kwargs)
-
-        self.config = config
-
-        self.embeddings = TFDebertaV2Embeddings(config, name="embeddings")
-        self.encoder = TFDebertaV2Encoder(config, name="encoder")
-
-    def get_input_embeddings(self) -> tf.keras.layers.Layer:
-        return self.embeddings
-
-    def set_input_embeddings(self, value: tf.Variable):
-        self.embeddings.weight = value
-        self.embeddings.vocab_size = shape_list(value)[0]
-
-    def _prune_heads(self, heads_to_prune):
-        """
-        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
-        class PreTrainedModel
-        """
-        raise NotImplementedError
-
-    @unpack_inputs
-    def call(
-        self,
-        input_ids: TFModelInputType | None = None,
-        attention_mask: np.ndarray | tf.Tensor | None = None,
-        token_type_ids: np.ndarray | tf.Tensor | None = None,
-        position_ids: np.ndarray | tf.Tensor | None = None,
-        inputs_embeds: np.ndarray | tf.Tensor | None = None,
-        output_attentions: Optional[bool] = None,
-        output_hidden_states: Optional[bool] = None,
-        return_dict: Optional[bool] = None,
-        training: bool = False,
-    ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
-        if input_ids is not None and inputs_embeds is not None:
-            raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
-        elif input_ids is not None:
-            input_shape = shape_list(input_ids)
-        elif inputs_embeds is not None:
-            input_shape = shape_list(inputs_embeds)[:-1]
-        else:
-            raise ValueError("You have to specify either input_ids or inputs_embeds")
-
-        if attention_mask is None:
-            attention_mask = tf.fill(dims=input_shape, value=1)
-
-        if token_type_ids is None:
-            token_type_ids = tf.fill(dims=input_shape, value=0)
-
-        embedding_output = self.embeddings(
-            input_ids=input_ids,
-            position_ids=position_ids,
-            token_type_ids=token_type_ids,
-            inputs_embeds=inputs_embeds,
-            mask=attention_mask,
-            training=training,
-        )
-
-        encoder_outputs = self.encoder(
-            hidden_states=embedding_output,
-            attention_mask=attention_mask,
-            output_attentions=output_attentions,
-            output_hidden_states=output_hidden_states,
-            return_dict=return_dict,
-            training=training,
-        )
-
-        sequence_output = encoder_outputs[0]
-
-        if not return_dict:
-            return (sequence_output,) + encoder_outputs[1:]
-
-        return TFBaseModelOutput(
-            last_hidden_state=sequence_output,
-            hidden_states=encoder_outputs.hidden_states,
-            attentions=encoder_outputs.attentions,
-        )
-
-
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaPreTrainedModel with Deberta->DebertaV2
-class TFDebertaV2PreTrainedModel(TFPreTrainedModel):
-    """
-    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
-    models.
-    """
-
-    config_class = DebertaV2Config
-    base_model_prefix = "deberta"
-
-
-DEBERTA_START_DOCSTRING = r"""
-    The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
-    Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
-    on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
-    improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
-
-    This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
-    as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
-    behavior.
-
-    <Tip>
-
-    TensorFlow models and layers in `transformers` accept two formats as input:
-
-    - having all inputs as keyword arguments (like PyTorch models), or
-    - having all inputs as a list, tuple or dict in the first positional argument.
-
-    The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
-    and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
-    pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
-    format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
-    the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
-    positional argument:
-
-    - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
-    - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
-    `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
-    - a dictionary with one or several input Tensors associated to the input names given in the docstring:
-    `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
-
-    Note that when creating models and layers with
-    [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
-    about any of this, as you can just pass inputs like you would to any other Python function!
-
-    </Tip>
-
-    Parameters:
-        config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model.
-            Initializing with a config file does not load the weights associated with the model, only the
-            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
-"""
-
-DEBERTA_INPUTS_DOCSTRING = r"""
-    Args:
-        input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
-            Indices of input sequence tokens in the vocabulary.
-
-            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
-            [`PreTrainedTokenizer.__call__`] for details.
-
-            [What are input IDs?](../glossary#input-ids)
-        attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
-            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
-
-            - 1 for tokens that are **not masked**,
-            - 0 for tokens that are **masked**.
-
-            [What are attention masks?](../glossary#attention-mask)
-        token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
-            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
-            1]`:
-
-            - 0 corresponds to a *sentence A* token,
-            - 1 corresponds to a *sentence B* token.
-
-            [What are token type IDs?](../glossary#token-type-ids)
-        position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
-            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
-            config.max_position_embeddings - 1]`.
-
-            [What are position IDs?](../glossary#position-ids)
-        inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
-            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
-            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
-            model's internal embedding lookup matrix.
-        output_attentions (`bool`, *optional*):
-            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
-            tensors for more detail.
-        output_hidden_states (`bool`, *optional*):
-            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
-            more detail.
-        return_dict (`bool`, *optional*):
-            Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple.
-"""
-
-
-@add_start_docstrings(
-    "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
-    DEBERTA_START_DOCSTRING,
-)
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaModel with Deberta->DebertaV2
-class TFDebertaV2Model(TFDebertaV2PreTrainedModel):
-    def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
-        super().__init__(config, *inputs, **kwargs)
-
-        self.deberta = TFDebertaV2MainLayer(config, name="deberta")
-
-    @unpack_inputs
-    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
-    @add_code_sample_docstrings(
-        checkpoint=_CHECKPOINT_FOR_DOC,
-        output_type=TFBaseModelOutput,
-        config_class=_CONFIG_FOR_DOC,
-    )
-    def call(
-        self,
-        input_ids: TFModelInputType | None = None,
-        attention_mask: np.ndarray | tf.Tensor | None = None,
-        token_type_ids: np.ndarray | tf.Tensor | None = None,
-        position_ids: np.ndarray | tf.Tensor | None = None,
-        inputs_embeds: np.ndarray | tf.Tensor | None = None,
-        output_attentions: Optional[bool] = None,
-        output_hidden_states: Optional[bool] = None,
-        return_dict: Optional[bool] = None,
-        training: Optional[bool] = False,
-    ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
-        outputs = self.deberta(
-            input_ids=input_ids,
-            attention_mask=attention_mask,
-            token_type_ids=token_type_ids,
-            position_ids=position_ids,
-            inputs_embeds=inputs_embeds,
-            output_attentions=output_attentions,
-            output_hidden_states=output_hidden_states,
-            return_dict=return_dict,
-            training=training,
-        )
-
-        return outputs
-
-
-@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForMaskedLM with Deberta->DebertaV2
-class TFDebertaV2ForMaskedLM(TFDebertaV2PreTrainedModel, TFMaskedLanguageModelingLoss):
-    def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
-        super().__init__(config, *inputs, **kwargs)
-
-        if config.is_decoder:
-            logger.warning(
-                "If you want to use `TFDebertaV2ForMaskedLM` make sure `config.is_decoder=False` for "
-                "bi-directional self-attention."
-            )
-
-        self.deberta = TFDebertaV2MainLayer(config, name="deberta")
-        self.mlm = TFDebertaV2OnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name="cls")
-
-    def get_lm_head(self) -> tf.keras.layers.Layer:
-        return self.mlm.predictions
-
-    @unpack_inputs
-    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
-    @add_code_sample_docstrings(
-        checkpoint=_CHECKPOINT_FOR_DOC,
-        output_type=TFMaskedLMOutput,
-        config_class=_CONFIG_FOR_DOC,
-    )
-    def call(
-        self,
-        input_ids: TFModelInputType | None = None,
-        attention_mask: np.ndarray | tf.Tensor | None = None,
-        token_type_ids: np.ndarray | tf.Tensor | None = None,
-        position_ids: np.ndarray | tf.Tensor | None = None,
-        inputs_embeds: np.ndarray | tf.Tensor | None = None,
-        output_attentions: Optional[bool] = None,
-        output_hidden_states: Optional[bool] = None,
-        return_dict: Optional[bool] = None,
-        labels: np.ndarray | tf.Tensor | None = None,
-        training: Optional[bool] = False,
-    ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
-        r"""
-        labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
-            Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
-            config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
-            loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
-        """
-        outputs = self.deberta(
-            input_ids=input_ids,
-            attention_mask=attention_mask,
-            token_type_ids=token_type_ids,
-            position_ids=position_ids,
-            inputs_embeds=inputs_embeds,
-            output_attentions=output_attentions,
-            output_hidden_states=output_hidden_states,
-            return_dict=return_dict,
-            training=training,
-        )
-        sequence_output = outputs[0]
-        prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
-        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
-
-        if not return_dict:
-            output = (prediction_scores,) + outputs[2:]
-            return ((loss,) + output) if loss is not None else output
-
-        return TFMaskedLMOutput(
-            loss=loss,
-            logits=prediction_scores,
-            hidden_states=outputs.hidden_states,
-            attentions=outputs.attentions,
-        )
-
-
-@add_start_docstrings(
-    """
-    DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
-    pooled output) e.g. for GLUE tasks.
-    """,
-    DEBERTA_START_DOCSTRING,
-)
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForSequenceClassification with Deberta->DebertaV2
-class TFDebertaV2ForSequenceClassification(TFDebertaV2PreTrainedModel, TFSequenceClassificationLoss):
-    def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
-        super().__init__(config, *inputs, **kwargs)
-
-        self.num_labels = config.num_labels
-
-        self.deberta = TFDebertaV2MainLayer(config, name="deberta")
-        self.pooler = TFDebertaV2ContextPooler(config, name="pooler")
-
-        drop_out = getattr(config, "cls_dropout", None)
-        drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
-        self.dropout = TFDebertaV2StableDropout(drop_out, name="cls_dropout")
-        self.classifier = tf.keras.layers.Dense(
-            units=config.num_labels,
-            kernel_initializer=get_initializer(config.initializer_range),
-            name="classifier",
-        )
-
-    @unpack_inputs
-    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
-    @add_code_sample_docstrings(
-        checkpoint=_CHECKPOINT_FOR_DOC,
-        output_type=TFSequenceClassifierOutput,
-        config_class=_CONFIG_FOR_DOC,
-    )
-    def call(
-        self,
-        input_ids: TFModelInputType | None = None,
-        attention_mask: np.ndarray | tf.Tensor | None = None,
-        token_type_ids: np.ndarray | tf.Tensor | None = None,
-        position_ids: np.ndarray | tf.Tensor | None = None,
-        inputs_embeds: np.ndarray | tf.Tensor | None = None,
-        output_attentions: Optional[bool] = None,
-        output_hidden_states: Optional[bool] = None,
-        return_dict: Optional[bool] = None,
-        labels: np.ndarray | tf.Tensor | None = None,
-        training: Optional[bool] = False,
-    ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
-        r"""
-        labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
-            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
-            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
-            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
-        """
-        outputs = self.deberta(
-            input_ids=input_ids,
-            attention_mask=attention_mask,
-            token_type_ids=token_type_ids,
-            position_ids=position_ids,
-            inputs_embeds=inputs_embeds,
-            output_attentions=output_attentions,
-            output_hidden_states=output_hidden_states,
-            return_dict=return_dict,
-            training=training,
-        )
-        sequence_output = outputs[0]
-        pooled_output = self.pooler(sequence_output, training=training)
-        pooled_output = self.dropout(pooled_output, training=training)
-        logits = self.classifier(pooled_output)
-        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
-
-        if not return_dict:
-            output = (logits,) + outputs[1:]
-
-            return ((loss,) + output) if loss is not None else output
-
-        return TFSequenceClassifierOutput(
-            loss=loss,
-            logits=logits,
-            hidden_states=outputs.hidden_states,
-            attentions=outputs.attentions,
-        )
-
-
-@add_start_docstrings(
-    """
-    DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
-    Named-Entity-Recognition (NER) tasks.
-    """,
-    DEBERTA_START_DOCSTRING,
-)
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForTokenClassification with Deberta->DebertaV2
-class TFDebertaV2ForTokenClassification(TFDebertaV2PreTrainedModel, TFTokenClassificationLoss):
-    def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
-        super().__init__(config, *inputs, **kwargs)
-
-        self.num_labels = config.num_labels
-
-        self.deberta = TFDebertaV2MainLayer(config, name="deberta")
-        self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
-        self.classifier = tf.keras.layers.Dense(
-            units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
-        )
-
-    @unpack_inputs
-    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
-    @add_code_sample_docstrings(
-        checkpoint=_CHECKPOINT_FOR_DOC,
-        output_type=TFTokenClassifierOutput,
-        config_class=_CONFIG_FOR_DOC,
-    )
-    def call(
-        self,
-        input_ids: TFModelInputType | None = None,
-        attention_mask: np.ndarray | tf.Tensor | None = None,
-        token_type_ids: np.ndarray | tf.Tensor | None = None,
-        position_ids: np.ndarray | tf.Tensor | None = None,
-        inputs_embeds: np.ndarray | tf.Tensor | None = None,
-        output_attentions: Optional[bool] = None,
-        output_hidden_states: Optional[bool] = None,
-        return_dict: Optional[bool] = None,
-        labels: np.ndarray | tf.Tensor | None = None,
-        training: Optional[bool] = False,
-    ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
-        r"""
-        labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
-            Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
-        """
-        outputs = self.deberta(
-            input_ids=input_ids,
-            attention_mask=attention_mask,
-            token_type_ids=token_type_ids,
-            position_ids=position_ids,
-            inputs_embeds=inputs_embeds,
-            output_attentions=output_attentions,
-            output_hidden_states=output_hidden_states,
-            return_dict=return_dict,
-            training=training,
-        )
-        sequence_output = outputs[0]
-        sequence_output = self.dropout(sequence_output, training=training)
-        logits = self.classifier(inputs=sequence_output)
-        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
-
-        if not return_dict:
-            output = (logits,) + outputs[1:]
-            return ((loss,) + output) if loss is not None else output
-
-        return TFTokenClassifierOutput(
-            loss=loss,
-            logits=logits,
-            hidden_states=outputs.hidden_states,
-            attentions=outputs.attentions,
-        )
-
-
-@add_start_docstrings(
-    """
-    DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
-    layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
-    """,
-    DEBERTA_START_DOCSTRING,
-)
-# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForQuestionAnswering with Deberta->DebertaV2
-class TFDebertaV2ForQuestionAnswering(TFDebertaV2PreTrainedModel, TFQuestionAnsweringLoss):
-    def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
-        super().__init__(config, *inputs, **kwargs)
-
-        self.num_labels = config.num_labels
-
-        self.deberta = TFDebertaV2MainLayer(config, name="deberta")
-        self.qa_outputs = tf.keras.layers.Dense(
-            units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
-        )
-
-    @unpack_inputs
-    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
-    @add_code_sample_docstrings(
-        checkpoint=_CHECKPOINT_FOR_DOC,
-        output_type=TFQuestionAnsweringModelOutput,
-        config_class=_CONFIG_FOR_DOC,
-    )
-    def call(
-        self,
-        input_ids: TFModelInputType | None = None,
-        attention_mask: np.ndarray | tf.Tensor | None = None,
-        token_type_ids: np.ndarray | tf.Tensor | None = None,
-        position_ids: np.ndarray | tf.Tensor | None = None,
-        inputs_embeds: np.ndarray | tf.Tensor | None = None,
-        output_attentions: Optional[bool] = None,
-        output_hidden_states: Optional[bool] = None,
-        return_dict: Optional[bool] = None,
-        start_positions: np.ndarray | tf.Tensor | None = None,
-        end_positions: np.ndarray | tf.Tensor | None = None,
-        training: Optional[bool] = False,
-    ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
-        r"""
-        start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
-            Labels for position (index) of the start of the labelled span for computing the token classification loss.
-            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
-            are not taken into account for computing the loss.
-        end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
-            Labels for position (index) of the end of the labelled span for computing the token classification loss.
-            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
-            are not taken into account for computing the loss.
-        """
-        outputs = self.deberta(
-            input_ids=input_ids,
-            attention_mask=attention_mask,
-            token_type_ids=token_type_ids,
-            position_ids=position_ids,
-            inputs_embeds=inputs_embeds,
-            output_attentions=output_attentions,
-            output_hidden_states=output_hidden_states,
-            return_dict=return_dict,
-            training=training,
-        )
-        sequence_output = outputs[0]
-        logits = self.qa_outputs(inputs=sequence_output)
-        start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
-        start_logits = tf.squeeze(input=start_logits, axis=-1)
-        end_logits = tf.squeeze(input=end_logits, axis=-1)
-        loss = None
-
-        if start_positions is not None and end_positions is not None:
-            labels = {"start_position": start_positions}
-            labels["end_position"] = end_positions
-            loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
-
-        if not return_dict:
-            output = (start_logits, end_logits) + outputs[2:]
-            return ((loss,) + output) if loss is not None else output
-
-        return TFQuestionAnsweringModelOutput(
-            loss=loss,
-            start_logits=start_logits,
-            end_logits=end_logits,
-            hidden_states=outputs.hidden_states,
-            attentions=outputs.attentions,
-        )
-
-
-@add_start_docstrings(
-    """
-    DeBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
-    softmax) e.g. for RocStories/SWAG tasks.
-    """,
-    DEBERTA_START_DOCSTRING,
-)
-class TFDebertaV2ForMultipleChoice(TFDebertaV2PreTrainedModel, TFMultipleChoiceLoss):
-    # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
-    # _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"]
-    # _keys_to_ignore_on_load_missing = [r"dropout"]
-
-    def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
-        super().__init__(config, *inputs, **kwargs)
-
-        self.deberta = TFDebertaV2MainLayer(config, name="deberta")
-        self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
-        self.pooler = TFDebertaV2ContextPooler(config, name="pooler")
-        self.classifier = tf.keras.layers.Dense(
-            units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
-        )
-
-    @unpack_inputs
-    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
-    @add_code_sample_docstrings(
-        checkpoint=_CHECKPOINT_FOR_DOC,
-        output_type=TFMultipleChoiceModelOutput,
-        config_class=_CONFIG_FOR_DOC,
-    )
-    def call(
-        self,
-        input_ids: TFModelInputType | None = None,
-        attention_mask: np.ndarray | tf.Tensor | None = None,
-        token_type_ids: np.ndarray | tf.Tensor | None = None,
-        position_ids: np.ndarray | tf.Tensor | None = None,
-        inputs_embeds: np.ndarray | tf.Tensor | None = None,
-        output_attentions: Optional[bool] = None,
-        output_hidden_states: Optional[bool] = None,
-        return_dict: Optional[bool] = None,
-        labels: np.ndarray | tf.Tensor | None = None,
-        training: Optional[bool] = False,
-    ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
-        r"""
-        labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
-            Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
-            where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
-        """
-        if input_ids is not None:
-            num_choices = shape_list(input_ids)[1]
-            seq_length = shape_list(input_ids)[2]
-        else:
-            num_choices = shape_list(inputs_embeds)[1]
-            seq_length = shape_list(inputs_embeds)[2]
-
-        flat_input_ids = tf.reshape(tensor=input_ids, shape=(-1, seq_length)) if input_ids is not None else None
-        flat_attention_mask = (
-            tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None
-        )
-        flat_token_type_ids = (
-            tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None
-        )
-        flat_position_ids = (
-            tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
-        )
-        flat_inputs_embeds = (
-            tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3]))
-            if inputs_embeds is not None
-            else None
-        )
-        outputs = self.deberta(
-            input_ids=flat_input_ids,
-            attention_mask=flat_attention_mask,
-            token_type_ids=flat_token_type_ids,
-            position_ids=flat_position_ids,
-            inputs_embeds=flat_inputs_embeds,
-            output_attentions=output_attentions,
-            output_hidden_states=output_hidden_states,
-            return_dict=return_dict,
-            training=training,
-        )
-        sequence_output = outputs[0]
-        pooled_output = self.pooler(sequence_output, training=training)
-        pooled_output = self.dropout(pooled_output, training=training)
-        logits = self.classifier(pooled_output)
-        reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
-        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits)
-
-        if not return_dict:
-            output = (reshaped_logits,) + outputs[2:]
-            return ((loss,) + output) if loss is not None else output
-
-        return TFMultipleChoiceModelOutput(
-            loss=loss,
-            logits=reshaped_logits,
-            hidden_states=outputs.hidden_states,
-            attentions=outputs.attentions,
-        )
diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/modules/attentions.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/modules/attentions.py
deleted file mode 100644
index f9c11ca4a3acb86bf1abc04d9dcfa82a4ed4061f..0000000000000000000000000000000000000000
--- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/modules/attentions.py
+++ /dev/null
@@ -1,349 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import modules.commons as commons
-import modules.modules as modules
-from modules.modules import LayerNorm
-
-
-class FFT(nn.Module):
-  def __init__(self, hidden_channels, filter_channels, n_heads, n_layers=1, kernel_size=1, p_dropout=0.,
-               proximal_bias=False, proximal_init=True, **kwargs):
-    super().__init__()
-    self.hidden_channels = hidden_channels
-    self.filter_channels = filter_channels
-    self.n_heads = n_heads
-    self.n_layers = n_layers
-    self.kernel_size = kernel_size
-    self.p_dropout = p_dropout
-    self.proximal_bias = proximal_bias
-    self.proximal_init = proximal_init
-
-    self.drop = nn.Dropout(p_dropout)
-    self.self_attn_layers = nn.ModuleList()
-    self.norm_layers_0 = nn.ModuleList()
-    self.ffn_layers = nn.ModuleList()
-    self.norm_layers_1 = nn.ModuleList()
-    for i in range(self.n_layers):
-      self.self_attn_layers.append(
-        MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias,
-                           proximal_init=proximal_init))
-      self.norm_layers_0.append(LayerNorm(hidden_channels))
-      self.ffn_layers.append(
-        FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
-      self.norm_layers_1.append(LayerNorm(hidden_channels))
-
-  def forward(self, x, x_mask):
-    """
-    x: decoder input
-    h: encoder output
-    """
-    self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
-    x = x * x_mask
-    for i in range(self.n_layers):
-      y = self.self_attn_layers[i](x, x, self_attn_mask)
-      y = self.drop(y)
-      x = self.norm_layers_0[i](x + y)
-
-      y = self.ffn_layers[i](x, x_mask)
-      y = self.drop(y)
-      x = self.norm_layers_1[i](x + y)
-    x = x * x_mask
-    return x
-
-
-class Encoder(nn.Module):
-  def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
-    super().__init__()
-    self.hidden_channels = hidden_channels
-    self.filter_channels = filter_channels
-    self.n_heads = n_heads
-    self.n_layers = n_layers
-    self.kernel_size = kernel_size
-    self.p_dropout = p_dropout
-    self.window_size = window_size
-
-    self.drop = nn.Dropout(p_dropout)
-    self.attn_layers = nn.ModuleList()
-    self.norm_layers_1 = nn.ModuleList()
-    self.ffn_layers = nn.ModuleList()
-    self.norm_layers_2 = nn.ModuleList()
-    for i in range(self.n_layers):
-      self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
-      self.norm_layers_1.append(LayerNorm(hidden_channels))
-      self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
-      self.norm_layers_2.append(LayerNorm(hidden_channels))
-
-  def forward(self, x, x_mask):
-    attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
-    x = x * x_mask
-    for i in range(self.n_layers):
-      y = self.attn_layers[i](x, x, attn_mask)
-      y = self.drop(y)
-      x = self.norm_layers_1[i](x + y)
-
-      y = self.ffn_layers[i](x, x_mask)
-      y = self.drop(y)
-      x = self.norm_layers_2[i](x + y)
-    x = x * x_mask
-    return x
-
-
-class Decoder(nn.Module):
-  def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
-    super().__init__()
-    self.hidden_channels = hidden_channels
-    self.filter_channels = filter_channels
-    self.n_heads = n_heads
-    self.n_layers = n_layers
-    self.kernel_size = kernel_size
-    self.p_dropout = p_dropout
-    self.proximal_bias = proximal_bias
-    self.proximal_init = proximal_init
-
-    self.drop = nn.Dropout(p_dropout)
-    self.self_attn_layers = nn.ModuleList()
-    self.norm_layers_0 = nn.ModuleList()
-    self.encdec_attn_layers = nn.ModuleList()
-    self.norm_layers_1 = nn.ModuleList()
-    self.ffn_layers = nn.ModuleList()
-    self.norm_layers_2 = nn.ModuleList()
-    for i in range(self.n_layers):
-      self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
-      self.norm_layers_0.append(LayerNorm(hidden_channels))
-      self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
-      self.norm_layers_1.append(LayerNorm(hidden_channels))
-      self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
-      self.norm_layers_2.append(LayerNorm(hidden_channels))
-
-  def forward(self, x, x_mask, h, h_mask):
-    """
-    x: decoder input
-    h: encoder output
-    """
-    self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
-    encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
-    x = x * x_mask
-    for i in range(self.n_layers):
-      y = self.self_attn_layers[i](x, x, self_attn_mask)
-      y = self.drop(y)
-      x = self.norm_layers_0[i](x + y)
-
-      y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
-      y = self.drop(y)
-      x = self.norm_layers_1[i](x + y)
-      
-      y = self.ffn_layers[i](x, x_mask)
-      y = self.drop(y)
-      x = self.norm_layers_2[i](x + y)
-    x = x * x_mask
-    return x
-
-
-class MultiHeadAttention(nn.Module):
-  def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
-    super().__init__()
-    assert channels % n_heads == 0
-
-    self.channels = channels
-    self.out_channels = out_channels
-    self.n_heads = n_heads
-    self.p_dropout = p_dropout
-    self.window_size = window_size
-    self.heads_share = heads_share
-    self.block_length = block_length
-    self.proximal_bias = proximal_bias
-    self.proximal_init = proximal_init
-    self.attn = None
-
-    self.k_channels = channels // n_heads
-    self.conv_q = nn.Conv1d(channels, channels, 1)
-    self.conv_k = nn.Conv1d(channels, channels, 1)
-    self.conv_v = nn.Conv1d(channels, channels, 1)
-    self.conv_o = nn.Conv1d(channels, out_channels, 1)
-    self.drop = nn.Dropout(p_dropout)
-
-    if window_size is not None:
-      n_heads_rel = 1 if heads_share else n_heads
-      rel_stddev = self.k_channels**-0.5
-      self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-      self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-
-    nn.init.xavier_uniform_(self.conv_q.weight)
-    nn.init.xavier_uniform_(self.conv_k.weight)
-    nn.init.xavier_uniform_(self.conv_v.weight)
-    if proximal_init:
-      with torch.no_grad():
-        self.conv_k.weight.copy_(self.conv_q.weight)
-        self.conv_k.bias.copy_(self.conv_q.bias)
-      
-  def forward(self, x, c, attn_mask=None):
-    q = self.conv_q(x)
-    k = self.conv_k(c)
-    v = self.conv_v(c)
-    
-    x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
-    x = self.conv_o(x)
-    return x
-
-  def attention(self, query, key, value, mask=None):
-    # reshape [b, d, t] -> [b, n_h, t, d_k]
-    b, d, t_s, t_t = (*key.size(), query.size(2))
-    query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
-    key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-    value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
-    scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
-    if self.window_size is not None:
-      assert t_s == t_t, "Relative attention is only available for self-attention."
-      key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
-      rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
-      scores_local = self._relative_position_to_absolute_position(rel_logits)
-      scores = scores + scores_local
-    if self.proximal_bias:
-      assert t_s == t_t, "Proximal bias is only available for self-attention."
-      scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
-    if mask is not None:
-      scores = scores.masked_fill(mask == 0, -1e4)
-      if self.block_length is not None:
-        assert t_s == t_t, "Local attention is only available for self-attention."
-        block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
-        scores = scores.masked_fill(block_mask == 0, -1e4)
-    p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
-    p_attn = self.drop(p_attn)
-    output = torch.matmul(p_attn, value)
-    if self.window_size is not None:
-      relative_weights = self._absolute_position_to_relative_position(p_attn)
-      value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
-      output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
-    output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
-    return output, p_attn
-
-  def _matmul_with_relative_values(self, x, y):
-    """
-    x: [b, h, l, m]
-    y: [h or 1, m, d]
-    ret: [b, h, l, d]
-    """
-    ret = torch.matmul(x, y.unsqueeze(0))
-    return ret
-
-  def _matmul_with_relative_keys(self, x, y):
-    """
-    x: [b, h, l, d]
-    y: [h or 1, m, d]
-    ret: [b, h, l, m]
-    """
-    ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
-    return ret
-
-  def _get_relative_embeddings(self, relative_embeddings, length):
-    max_relative_position = 2 * self.window_size + 1
-    # Pad first before slice to avoid using cond ops.
-    pad_length = max(length - (self.window_size + 1), 0)
-    slice_start_position = max((self.window_size + 1) - length, 0)
-    slice_end_position = slice_start_position + 2 * length - 1
-    if pad_length > 0:
-      padded_relative_embeddings = F.pad(
-          relative_embeddings,
-          commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
-    else:
-      padded_relative_embeddings = relative_embeddings
-    used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
-    return used_relative_embeddings
-
-  def _relative_position_to_absolute_position(self, x):
-    """
-    x: [b, h, l, 2*l-1]
-    ret: [b, h, l, l]
-    """
-    batch, heads, length, _ = x.size()
-    # Concat columns of pad to shift from relative to absolute indexing.
-    x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
-
-    # Concat extra elements so to add up to shape (len+1, 2*len-1).
-    x_flat = x.view([batch, heads, length * 2 * length])
-    x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
-
-    # Reshape and slice out the padded elements.
-    x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
-    return x_final
-
-  def _absolute_position_to_relative_position(self, x):
-    """
-    x: [b, h, l, l]
-    ret: [b, h, l, 2*l-1]
-    """
-    batch, heads, length, _ = x.size()
-    # padd along column
-    x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
-    x_flat = x.view([batch, heads, length**2 + length*(length -1)])
-    # add 0's in the beginning that will skew the elements after reshape
-    x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
-    x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
-    return x_final
-
-  def _attention_bias_proximal(self, length):
-    """Bias for self-attention to encourage attention to close positions.
-    Args:
-      length: an integer scalar.
-    Returns:
-      a Tensor with shape [1, 1, length, length]
-    """
-    r = torch.arange(length, dtype=torch.float32)
-    diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
-    return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
-  def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
-    super().__init__()
-    self.in_channels = in_channels
-    self.out_channels = out_channels
-    self.filter_channels = filter_channels
-    self.kernel_size = kernel_size
-    self.p_dropout = p_dropout
-    self.activation = activation
-    self.causal = causal
-
-    if causal:
-      self.padding = self._causal_padding
-    else:
-      self.padding = self._same_padding
-
-    self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
-    self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
-    self.drop = nn.Dropout(p_dropout)
-
-  def forward(self, x, x_mask):
-    x = self.conv_1(self.padding(x * x_mask))
-    if self.activation == "gelu":
-      x = x * torch.sigmoid(1.702 * x)
-    else:
-      x = torch.relu(x)
-    x = self.drop(x)
-    x = self.conv_2(self.padding(x * x_mask))
-    return x * x_mask
-  
-  def _causal_padding(self, x):
-    if self.kernel_size == 1:
-      return x
-    pad_l = self.kernel_size - 1
-    pad_r = 0
-    padding = [[0, 0], [0, 0], [pad_l, pad_r]]
-    x = F.pad(x, commons.convert_pad_shape(padding))
-    return x
-
-  def _same_padding(self, x):
-    if self.kernel_size == 1:
-      return x
-    pad_l = (self.kernel_size - 1) // 2
-    pad_r = self.kernel_size // 2
-    padding = [[0, 0], [0, 0], [pad_l, pad_r]]
-    x = F.pad(x, commons.convert_pad_shape(padding))
-    return x
diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tests/data/test_detection_utils.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tests/data/test_detection_utils.py
deleted file mode 100644
index aac56c07da2be4e181e3e95de8cee1fc2858286d..0000000000000000000000000000000000000000
--- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tests/data/test_detection_utils.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import copy
-import numpy as np
-import os
-import unittest
-import pycocotools.mask as mask_util
-
-from detectron2.data import MetadataCatalog, detection_utils
-from detectron2.data import transforms as T
-from detectron2.structures import BitMasks, BoxMode
-from detectron2.utils.file_io import PathManager
-
-
-class TestTransformAnnotations(unittest.TestCase):
-    def test_transform_simple_annotation(self):
-        transforms = T.TransformList([T.HFlipTransform(400)])
-        anno = {
-            "bbox": np.asarray([10, 10, 200, 300]),
-            "bbox_mode": BoxMode.XYXY_ABS,
-            "category_id": 3,
-            "segmentation": [[10, 10, 100, 100, 100, 10], [150, 150, 200, 150, 200, 200]],
-        }
-
-        output = detection_utils.transform_instance_annotations(anno, transforms, (400, 400))
-        self.assertTrue(np.allclose(output["bbox"], [200, 10, 390, 300]))
-        self.assertEqual(len(output["segmentation"]), len(anno["segmentation"]))
-        self.assertTrue(np.allclose(output["segmentation"][0], [390, 10, 300, 100, 300, 10]))
-
-        detection_utils.annotations_to_instances([output, output], (400, 400))
-
-    def test_transform_empty_annotation(self):
-        detection_utils.annotations_to_instances([], (400, 400))
-
-    def test_flip_keypoints(self):
-        transforms = T.TransformList([T.HFlipTransform(400)])
-        anno = {
-            "bbox": np.asarray([10, 10, 200, 300]),
-            "bbox_mode": BoxMode.XYXY_ABS,
-            "keypoints": np.random.rand(17, 3) * 50 + 15,
-        }
-
-        output = detection_utils.transform_instance_annotations(
-            copy.deepcopy(anno),
-            transforms,
-            (400, 400),
-            keypoint_hflip_indices=detection_utils.create_keypoint_hflip_indices(
-                ["keypoints_coco_2017_train"]
-            ),
-        )
-        # The first keypoint is nose
-        self.assertTrue(np.allclose(output["keypoints"][0, 0], 400 - anno["keypoints"][0, 0]))
-        # The last 16 keypoints are 8 left-right pairs
-        self.assertTrue(
-            np.allclose(
-                output["keypoints"][1:, 0].reshape(-1, 2)[:, ::-1],
-                400 - anno["keypoints"][1:, 0].reshape(-1, 2),
-            )
-        )
-        self.assertTrue(
-            np.allclose(
-                output["keypoints"][1:, 1:].reshape(-1, 2, 2)[:, ::-1, :],
-                anno["keypoints"][1:, 1:].reshape(-1, 2, 2),
-            )
-        )
-
-    def test_crop(self):
-        transforms = T.TransformList([T.CropTransform(300, 300, 10, 10)])
-        keypoints = np.random.rand(17, 3) * 50 + 15
-        keypoints[:, 2] = 2
-        anno = {
-            "bbox": np.asarray([10, 10, 200, 400]),
-            "bbox_mode": BoxMode.XYXY_ABS,
-            "keypoints": keypoints,
-        }
-
-        output = detection_utils.transform_instance_annotations(
-            copy.deepcopy(anno), transforms, (10, 10)
-        )
-        # box is shifted and cropped
-        self.assertTrue((output["bbox"] == np.asarray([0, 0, 0, 10])).all())
-        # keypoints are no longer visible
-        self.assertTrue((output["keypoints"][:, 2] == 0).all())
-
-    def test_transform_RLE(self):
-        transforms = T.TransformList([T.HFlipTransform(400)])
-        mask = np.zeros((300, 400), order="F").astype("uint8")
-        mask[:, :200] = 1
-
-        anno = {
-            "bbox": np.asarray([10, 10, 200, 300]),
-            "bbox_mode": BoxMode.XYXY_ABS,
-            "segmentation": mask_util.encode(mask[:, :, None])[0],
-            "category_id": 3,
-        }
-        output = detection_utils.transform_instance_annotations(
-            copy.deepcopy(anno), transforms, (300, 400)
-        )
-        mask = output["segmentation"]
-        self.assertTrue((mask[:, 200:] == 1).all())
-        self.assertTrue((mask[:, :200] == 0).all())
-
-        inst = detection_utils.annotations_to_instances(
-            [output, output], (400, 400), mask_format="bitmask"
-        )
-        self.assertTrue(isinstance(inst.gt_masks, BitMasks))
-
-    def test_transform_RLE_resize(self):
-        transforms = T.TransformList(
-            [T.HFlipTransform(400), T.ScaleTransform(300, 400, 400, 400, "bilinear")]
-        )
-        mask = np.zeros((300, 400), order="F").astype("uint8")
-        mask[:, :200] = 1
-
-        anno = {
-            "bbox": np.asarray([10, 10, 200, 300]),
-            "bbox_mode": BoxMode.XYXY_ABS,
-            "segmentation": mask_util.encode(mask[:, :, None])[0],
-            "category_id": 3,
-        }
-        output = detection_utils.transform_instance_annotations(
-            copy.deepcopy(anno), transforms, (400, 400)
-        )
-
-        inst = detection_utils.annotations_to_instances(
-            [output, output], (400, 400), mask_format="bitmask"
-        )
-        self.assertTrue(isinstance(inst.gt_masks, BitMasks))
-
-    def test_gen_crop(self):
-        instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS}
-        t = detection_utils.gen_crop_transform_with_instance((10, 10), (150, 150), instance)
-        # the box center must fall into the cropped region
-        self.assertTrue(t.x0 <= 55 <= t.x0 + t.w)
-
-    def test_gen_crop_outside_boxes(self):
-        instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS}
-        with self.assertRaises(AssertionError):
-            detection_utils.gen_crop_transform_with_instance((10, 10), (15, 15), instance)
-
-    def test_read_sem_seg(self):
-        cityscapes_dir = MetadataCatalog.get("cityscapes_fine_sem_seg_val").gt_dir
-        sem_seg_gt_path = os.path.join(
-            cityscapes_dir, "frankfurt", "frankfurt_000001_083852_gtFine_labelIds.png"
-        )
-        if not PathManager.exists(sem_seg_gt_path):
-            raise unittest.SkipTest(
-                "Semantic segmentation ground truth {} not found.".format(sem_seg_gt_path)
-            )
-        sem_seg = detection_utils.read_image(sem_seg_gt_path, "L")
-        self.assertEqual(sem_seg.ndim, 3)
-        self.assertEqual(sem_seg.shape[2], 1)
-        self.assertEqual(sem_seg.dtype, np.uint8)
-        self.assertEqual(sem_seg.max(), 32)
-        self.assertEqual(sem_seg.min(), 1)
-
-    def test_read_exif_orientation(self):
-        # https://github.com/recurser/exif-orientation-examples/raw/master/Landscape_5.jpg
-        URL = "detectron2://assets/Landscape_5.jpg"
-        img = detection_utils.read_image(URL, "RGB")
-        self.assertEqual(img.ndim, 3)
-        self.assertEqual(img.dtype, np.uint8)
-        self.assertEqual(img.shape, (1200, 1800, 3))  # check that shape is not transposed
-
-    def test_opencv_exif_orientation(self):
-        import cv2
-
-        URL = "detectron2://assets/Landscape_5.jpg"
-        with PathManager.open(URL, "rb") as f:
-            img = cv2.imdecode(np.frombuffer(f.read(), dtype="uint8"), cv2.IMREAD_COLOR)
-        self.assertEqual(img.dtype, np.uint8)
-        self.assertEqual(img.shape, (1200, 1800, 3))
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/spaces/youplala/StoreCopilot/src/callbacks/layout/__init__.py b/spaces/youplala/StoreCopilot/src/callbacks/layout/__init__.py
deleted file mode 100644
index 56bdb1be426e6c7a0c608a818abb42d98129317b..0000000000000000000000000000000000000000
--- a/spaces/youplala/StoreCopilot/src/callbacks/layout/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-import src.callbacks.layout.menu as menu
-import src.callbacks.layout.template as template
diff --git "a/spaces/yunfei0710/gpt-academic/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" "b/spaces/yunfei0710/gpt-academic/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py"
deleted file mode 100644
index 554c485aa0891f74c57cacfcbe076febe7a11029..0000000000000000000000000000000000000000
--- "a/spaces/yunfei0710/gpt-academic/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py"
+++ /dev/null
@@ -1,175 +0,0 @@
-from toolbox import update_ui
-from toolbox import CatchException, report_execption, write_results_to_file
-fast_debug = False
-
-class PaperFileGroup():
-    def __init__(self):
-        self.file_paths = []
-        self.file_contents = []
-        self.sp_file_contents = []
-        self.sp_file_index = []
-        self.sp_file_tag = []
-
-        # count_token
-        from request_llm.bridge_all import model_info
-        enc = model_info["gpt-3.5-turbo"]['tokenizer']
-        def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
-        self.get_token_num = get_token_num
-
-    def run_file_split(self, max_token_limit=1900):
-        """
-        将长文本分离开来
-        """
-        for index, file_content in enumerate(self.file_contents):
-            if self.get_token_num(file_content) < max_token_limit:
-                self.sp_file_contents.append(file_content)
-                self.sp_file_index.append(index)
-                self.sp_file_tag.append(self.file_paths[index])
-            else:
-                from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
-                segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
-                for j, segment in enumerate(segments):
-                    self.sp_file_contents.append(segment)
-                    self.sp_file_index.append(index)
-                    self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
-
-        print('Segmentation: done')
-
-def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
-    import time, os, re
-    from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
-
-    #  <-------- 读取Latex文件,删除其中的所有注释 ----------> 
-    pfg = PaperFileGroup()
-
-    for index, fp in enumerate(file_manifest):
-        with open(fp, 'r', encoding='utf-8', errors='replace') as f:
-            file_content = f.read()
-            # 定义注释的正则表达式
-            comment_pattern = r'(?<!\\)%.*'
-            # 使用正则表达式查找注释,并替换为空字符串
-            clean_tex_content = re.sub(comment_pattern, '', file_content)
-            # 记录删除注释后的文本
-            pfg.file_paths.append(fp)
-            pfg.file_contents.append(clean_tex_content)
-
-    #  <-------- 拆分过长的latex文件 ----------> 
-    pfg.run_file_split(max_token_limit=1024)
-    n_split = len(pfg.sp_file_contents)
-
-    #  <-------- 抽取摘要 ----------> 
-    # if language == 'en':
-    #     abs_extract_inputs = f"Please write an abstract for this paper"
-
-    # # 单线,获取文章meta信息
-    # paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
-    #     inputs=abs_extract_inputs,
-    #     inputs_show_user=f"正在抽取摘要信息。",
-    #     llm_kwargs=llm_kwargs,
-    #     chatbot=chatbot, history=[],
-    #     sys_prompt="Your job is to collect information from materials。",
-    # )
-
-    #  <-------- 多线程润色开始 ----------> 
-    if language == 'en->zh':
-        inputs_array = ["Below is a section from an English academic paper, translate it into Chinese, do not modify any latex command such as \section, \cite and equations:" + 
-                        f"\n\n{frag}" for frag in pfg.sp_file_contents]
-        inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
-        sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
-    elif language == 'zh->en':
-        inputs_array = [f"Below is a section from a Chinese academic paper, translate it into English, do not modify any latex command such as \section, \cite and equations:" + 
-                        f"\n\n{frag}" for frag in pfg.sp_file_contents]
-        inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
-        sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
-
-    gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
-        inputs_array=inputs_array,
-        inputs_show_user_array=inputs_show_user_array,
-        llm_kwargs=llm_kwargs,
-        chatbot=chatbot,
-        history_array=[[""] for _ in range(n_split)],
-        sys_prompt_array=sys_prompt_array,
-        # max_workers=5,  # OpenAI所允许的最大并行过载
-        scroller_max_len = 80
-    )
-
-    #  <-------- 整理结果,退出 ----------> 
-    create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
-    res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
-    history = gpt_response_collection
-    chatbot.append((f"{fp}完成了吗?", res))
-    yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
-
-
-
-
-@CatchException
-def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
-    # 基本信息:功能、贡献者
-    chatbot.append([
-        "函数插件功能?",
-        "对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
-    yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
-    # 尝试导入依赖,如果缺少依赖,则给出安装建议
-    try:
-        import tiktoken
-    except:
-        report_execption(chatbot, history,
-                         a=f"解析项目: {txt}",
-                         b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
-        yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-        return
-    history = []    # 清空历史,以免输入溢出
-    import glob, os
-    if os.path.exists(txt):
-        project_folder = txt
-    else:
-        if txt == "": txt = '空空如也的输入栏'
-        report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
-        yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-        return
-    file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
-    if len(file_manifest) == 0:
-        report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
-        yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-        return
-    yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
-
-
-
-
-
-@CatchException
-def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
-    # 基本信息:功能、贡献者
-    chatbot.append([
-        "函数插件功能?",
-        "对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
-    yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
-    # 尝试导入依赖,如果缺少依赖,则给出安装建议
-    try:
-        import tiktoken
-    except:
-        report_execption(chatbot, history,
-                         a=f"解析项目: {txt}",
-                         b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
-        yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-        return
-    history = []    # 清空历史,以免输入溢出
-    import glob, os
-    if os.path.exists(txt):
-        project_folder = txt
-    else:
-        if txt == "": txt = '空空如也的输入栏'
-        report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
-        yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-        return
-    file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
-    if len(file_manifest) == 0:
-        report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
-        yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-        return
-    yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
\ No newline at end of file
diff --git a/spaces/zeynepgulhan/whisper-medium-cv-tr-demo/app.py b/spaces/zeynepgulhan/whisper-medium-cv-tr-demo/app.py
deleted file mode 100644
index 3fb4dd0550e3fdba1a0a8153057b2b2c16090676..0000000000000000000000000000000000000000
--- a/spaces/zeynepgulhan/whisper-medium-cv-tr-demo/app.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import torch
-
-import gradio as gr
-import pytube as pt
-from transformers import pipeline
-from huggingface_hub import model_info
-
-MODEL_NAME = "zeynepgulhan/whisper-medium-mediaspeech-cv-tr" #this always needs to stay in line 8 :D sorry for the hackiness
-lang = "tr"
-
-device = 0 if torch.cuda.is_available() else "cpu"
-pipe = pipeline(
-    task="automatic-speech-recognition",
-    model=MODEL_NAME,
-    chunk_length_s=30,
-    device=device,
-)
-
-pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")
-
-def transcribe(microphone, file_upload):
-    warn_output = ""
-    if (microphone is not None) and (file_upload is not None):
-        warn_output = (
-            "WARNING: You've uploaded an audio file and used the microphone. "
-            "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
-        )
-
-    elif (microphone is None) and (file_upload is None):
-        return "ERROR: You have to either use the microphone or upload an audio file"
-
-    file = microphone if microphone is not None else file_upload
-
-    text = pipe(file)["text"]
-
-    return warn_output + text
-
-
-def _return_yt_html_embed(yt_url):
-    video_id = yt_url.split("?v=")[-1]
-    HTML_str = (
-        f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
-        " </center>"
-    )
-    return HTML_str
-
-
-def yt_transcribe(yt_url):
-    yt = pt.YouTube(yt_url)
-    html_embed_str = _return_yt_html_embed(yt_url)
-    stream = yt.streams.filter(only_audio=True)[0]
-    stream.download(filename="audio.mp3")
-
-    text = pipe("audio.mp3")["text"]
-
-    return html_embed_str, text
-
-
-demo = gr.Blocks()
-
-mf_transcribe = gr.Interface(
-    fn=transcribe,
-    inputs=[
-        gr.inputs.Audio(source="microphone", type="filepath", optional=True),
-        gr.inputs.Audio(source="upload", type="filepath", optional=True),
-    ],
-    outputs="text",
-    layout="horizontal",
-    theme="huggingface",
-    title="Whisper Turkish Demo: Transcribe Audio",
-    description=(
-        "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the the fine-tuned"
-        f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and {lang} Transformers to transcribe audio files"
-        " of arbitrary length."
-    ),
-    allow_flagging="never",
-)
-
-yt_transcribe = gr.Interface(
-    fn=yt_transcribe,
-    inputs=[gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL")],
-    outputs=["html", "text"],
-    layout="horizontal",
-    theme="huggingface",
-    title="Whisper Turkish Demo: Transcribe YouTube",
-    description=(
-        "Transcribe long-form YouTube videos with the click of a button! Demo uses the the fine-tuned checkpoint:"
-        f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and {lang} Transformers to transcribe audio files of"
-        " arbitrary length."
-    ),
-    allow_flagging="never",
-)
-
-with demo:
-    gr.TabbedInterface([mf_transcribe, yt_transcribe], ["Transcribe Audio", "Transcribe YouTube"])
-
-demo.launch(enable_queue=True)
diff --git a/spaces/zhanghaohui/szu-gpt-academic/docs/waifu_plugin/jquery-ui.min.js b/spaces/zhanghaohui/szu-gpt-academic/docs/waifu_plugin/jquery-ui.min.js
deleted file mode 100644
index 25398a167415050ae8bfb0bfebac6aa3ab790909..0000000000000000000000000000000000000000
--- a/spaces/zhanghaohui/szu-gpt-academic/docs/waifu_plugin/jquery-ui.min.js
+++ /dev/null
@@ -1,13 +0,0 @@
-/*! jQuery UI - v1.12.1 - 2016-09-14
-* http://jqueryui.com
-* Includes: widget.js, position.js, data.js, disable-selection.js, effect.js, effects/effect-blind.js, effects/effect-bounce.js, effects/effect-clip.js, effects/effect-drop.js, effects/effect-explode.js, effects/effect-fade.js, effects/effect-fold.js, effects/effect-highlight.js, effects/effect-puff.js, effects/effect-pulsate.js, effects/effect-scale.js, effects/effect-shake.js, effects/effect-size.js, effects/effect-slide.js, effects/effect-transfer.js, focusable.js, form-reset-mixin.js, jquery-1-7.js, keycode.js, labels.js, scroll-parent.js, tabbable.js, unique-id.js, widgets/accordion.js, widgets/autocomplete.js, widgets/button.js, widgets/checkboxradio.js, widgets/controlgroup.js, widgets/datepicker.js, widgets/dialog.js, widgets/draggable.js, widgets/droppable.js, widgets/menu.js, widgets/mouse.js, widgets/progressbar.js, widgets/resizable.js, widgets/selectable.js, widgets/selectmenu.js, widgets/slider.js, widgets/sortable.js, widgets/spinner.js, widgets/tabs.js, widgets/tooltip.js
-* Copyright jQuery Foundation and other contributors; Licensed MIT */
-
-(function(t){"function"==typeof define&&define.amd?define(["jquery"],t):t(jQuery)})(function(t){function e(t){for(var e=t.css("visibility");"inherit"===e;)t=t.parent(),e=t.css("visibility");return"hidden"!==e}function i(t){for(var e,i;t.length&&t[0]!==document;){if(e=t.css("position"),("absolute"===e||"relative"===e||"fixed"===e)&&(i=parseInt(t.css("zIndex"),10),!isNaN(i)&&0!==i))return i;t=t.parent()}return 0}function s(){this._curInst=null,this._keyEvent=!1,this._disabledInputs=[],this._datepickerShowing=!1,this._inDialog=!1,this._mainDivId="ui-datepicker-div",this._inlineClass="ui-datepicker-inline",this._appendClass="ui-datepicker-append",this._triggerClass="ui-datepicker-trigger",this._dialogClass="ui-datepicker-dialog",this._disableClass="ui-datepicker-disabled",this._unselectableClass="ui-datepicker-unselectable",this._currentClass="ui-datepicker-current-day",this._dayOverClass="ui-datepicker-days-cell-over",this.regional=[],this.regional[""]={closeText:"Done",prevText:"Prev",nextText:"Next",currentText:"Today",monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],weekHeader:"Wk",dateFormat:"mm/dd/yy",firstDay:0,isRTL:!1,showMonthAfterYear:!1,yearSuffix:""},this._defaults={showOn:"focus",showAnim:"fadeIn",showOptions:{},defaultDate:null,appendText:"",buttonText:"...",buttonImage:"",buttonImageOnly:!1,hideIfNoPrevNext:!1,navigationAsDateFormat:!1,gotoCurrent:!1,changeMonth:!1,changeYear:!1,yearRange:"c-10:c+10",showOtherMonths:!1,selectOtherMonths:!1,showWeek:!1,calculateWeek:this.iso8601Week,shortYearCutoff:"+10",minDate:null,maxDate:null,duration:"fast",beforeShowDay:null,beforeShow:null,onSelect:null,onChangeMonthYear:null,onClose:null,numberOfMonths:1,showCurrentAtPos:0,stepMonths:1,stepBigMonths:12,altField:"",altFormat:"",constrainInput:!0,showButtonPanel:!1,autoSize:!1,disabled:!1},t.extend(this._defaults,this.regional[""]),this.regional.en=t.extend(!0,{},this.regional[""]),this.regional["en-US"]=t.extend(!0,{},this.regional.en),this.dpDiv=n(t("<div id='"+this._mainDivId+"' class='ui-datepicker ui-widget ui-widget-content ui-helper-clearfix ui-corner-all'></div>"))}function n(e){var i="button, .ui-datepicker-prev, .ui-datepicker-next, .ui-datepicker-calendar td a";return e.on("mouseout",i,function(){t(this).removeClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).removeClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).removeClass("ui-datepicker-next-hover")}).on("mouseover",i,o)}function o(){t.datepicker._isDisabledDatepicker(m.inline?m.dpDiv.parent()[0]:m.input[0])||(t(this).parents(".ui-datepicker-calendar").find("a").removeClass("ui-state-hover"),t(this).addClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).addClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).addClass("ui-datepicker-next-hover"))}function a(e,i){t.extend(e,i);for(var s in i)null==i[s]&&(e[s]=i[s]);return e}function r(t){return function(){var e=this.element.val();t.apply(this,arguments),this._refresh(),e!==this.element.val()&&this._trigger("change")}}t.ui=t.ui||{},t.ui.version="1.12.1";var h=0,l=Array.prototype.slice;t.cleanData=function(e){return function(i){var s,n,o;for(o=0;null!=(n=i[o]);o++)try{s=t._data(n,"events"),s&&s.remove&&t(n).triggerHandler("remove")}catch(a){}e(i)}}(t.cleanData),t.widget=function(e,i,s){var n,o,a,r={},h=e.split(".")[0];e=e.split(".")[1];var l=h+"-"+e;return s||(s=i,i=t.Widget),t.isArray(s)&&(s=t.extend.apply(null,[{}].concat(s))),t.expr[":"][l.toLowerCase()]=function(e){return!!t.data(e,l)},t[h]=t[h]||{},n=t[h][e],o=t[h][e]=function(t,e){return this._createWidget?(arguments.length&&this._createWidget(t,e),void 0):new o(t,e)},t.extend(o,n,{version:s.version,_proto:t.extend({},s),_childConstructors:[]}),a=new i,a.options=t.widget.extend({},a.options),t.each(s,function(e,s){return t.isFunction(s)?(r[e]=function(){function t(){return i.prototype[e].apply(this,arguments)}function n(t){return i.prototype[e].apply(this,t)}return function(){var e,i=this._super,o=this._superApply;return this._super=t,this._superApply=n,e=s.apply(this,arguments),this._super=i,this._superApply=o,e}}(),void 0):(r[e]=s,void 0)}),o.prototype=t.widget.extend(a,{widgetEventPrefix:n?a.widgetEventPrefix||e:e},r,{constructor:o,namespace:h,widgetName:e,widgetFullName:l}),n?(t.each(n._childConstructors,function(e,i){var s=i.prototype;t.widget(s.namespace+"."+s.widgetName,o,i._proto)}),delete n._childConstructors):i._childConstructors.push(o),t.widget.bridge(e,o),o},t.widget.extend=function(e){for(var i,s,n=l.call(arguments,1),o=0,a=n.length;a>o;o++)for(i in n[o])s=n[o][i],n[o].hasOwnProperty(i)&&void 0!==s&&(e[i]=t.isPlainObject(s)?t.isPlainObject(e[i])?t.widget.extend({},e[i],s):t.widget.extend({},s):s);return e},t.widget.bridge=function(e,i){var s=i.prototype.widgetFullName||e;t.fn[e]=function(n){var o="string"==typeof n,a=l.call(arguments,1),r=this;return o?this.length||"instance"!==n?this.each(function(){var i,o=t.data(this,s);return"instance"===n?(r=o,!1):o?t.isFunction(o[n])&&"_"!==n.charAt(0)?(i=o[n].apply(o,a),i!==o&&void 0!==i?(r=i&&i.jquery?r.pushStack(i.get()):i,!1):void 0):t.error("no such method '"+n+"' for "+e+" widget instance"):t.error("cannot call methods on "+e+" prior to initialization; "+"attempted to call method '"+n+"'")}):r=void 0:(a.length&&(n=t.widget.extend.apply(null,[n].concat(a))),this.each(function(){var e=t.data(this,s);e?(e.option(n||{}),e._init&&e._init()):t.data(this,s,new i(n,this))})),r}},t.Widget=function(){},t.Widget._childConstructors=[],t.Widget.prototype={widgetName:"widget",widgetEventPrefix:"",defaultElement:"<div>",options:{classes:{},disabled:!1,create:null},_createWidget:function(e,i){i=t(i||this.defaultElement||this)[0],this.element=t(i),this.uuid=h++,this.eventNamespace="."+this.widgetName+this.uuid,this.bindings=t(),this.hoverable=t(),this.focusable=t(),this.classesElementLookup={},i!==this&&(t.data(i,this.widgetFullName,this),this._on(!0,this.element,{remove:function(t){t.target===i&&this.destroy()}}),this.document=t(i.style?i.ownerDocument:i.document||i),this.window=t(this.document[0].defaultView||this.document[0].parentWindow)),this.options=t.widget.extend({},this.options,this._getCreateOptions(),e),this._create(),this.options.disabled&&this._setOptionDisabled(this.options.disabled),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:function(){return{}},_getCreateEventData:t.noop,_create:t.noop,_init:t.noop,destroy:function(){var e=this;this._destroy(),t.each(this.classesElementLookup,function(t,i){e._removeClass(i,t)}),this.element.off(this.eventNamespace).removeData(this.widgetFullName),this.widget().off(this.eventNamespace).removeAttr("aria-disabled"),this.bindings.off(this.eventNamespace)},_destroy:t.noop,widget:function(){return this.element},option:function(e,i){var s,n,o,a=e;if(0===arguments.length)return t.widget.extend({},this.options);if("string"==typeof e)if(a={},s=e.split("."),e=s.shift(),s.length){for(n=a[e]=t.widget.extend({},this.options[e]),o=0;s.length-1>o;o++)n[s[o]]=n[s[o]]||{},n=n[s[o]];if(e=s.pop(),1===arguments.length)return void 0===n[e]?null:n[e];n[e]=i}else{if(1===arguments.length)return void 0===this.options[e]?null:this.options[e];a[e]=i}return this._setOptions(a),this},_setOptions:function(t){var e;for(e in t)this._setOption(e,t[e]);return this},_setOption:function(t,e){return"classes"===t&&this._setOptionClasses(e),this.options[t]=e,"disabled"===t&&this._setOptionDisabled(e),this},_setOptionClasses:function(e){var i,s,n;for(i in e)n=this.classesElementLookup[i],e[i]!==this.options.classes[i]&&n&&n.length&&(s=t(n.get()),this._removeClass(n,i),s.addClass(this._classes({element:s,keys:i,classes:e,add:!0})))},_setOptionDisabled:function(t){this._toggleClass(this.widget(),this.widgetFullName+"-disabled",null,!!t),t&&(this._removeClass(this.hoverable,null,"ui-state-hover"),this._removeClass(this.focusable,null,"ui-state-focus"))},enable:function(){return this._setOptions({disabled:!1})},disable:function(){return this._setOptions({disabled:!0})},_classes:function(e){function i(i,o){var a,r;for(r=0;i.length>r;r++)a=n.classesElementLookup[i[r]]||t(),a=e.add?t(t.unique(a.get().concat(e.element.get()))):t(a.not(e.element).get()),n.classesElementLookup[i[r]]=a,s.push(i[r]),o&&e.classes[i[r]]&&s.push(e.classes[i[r]])}var s=[],n=this;return e=t.extend({element:this.element,classes:this.options.classes||{}},e),this._on(e.element,{remove:"_untrackClassesElement"}),e.keys&&i(e.keys.match(/\S+/g)||[],!0),e.extra&&i(e.extra.match(/\S+/g)||[]),s.join(" ")},_untrackClassesElement:function(e){var i=this;t.each(i.classesElementLookup,function(s,n){-1!==t.inArray(e.target,n)&&(i.classesElementLookup[s]=t(n.not(e.target).get()))})},_removeClass:function(t,e,i){return this._toggleClass(t,e,i,!1)},_addClass:function(t,e,i){return this._toggleClass(t,e,i,!0)},_toggleClass:function(t,e,i,s){s="boolean"==typeof s?s:i;var n="string"==typeof t||null===t,o={extra:n?e:i,keys:n?t:e,element:n?this.element:t,add:s};return o.element.toggleClass(this._classes(o),s),this},_on:function(e,i,s){var n,o=this;"boolean"!=typeof e&&(s=i,i=e,e=!1),s?(i=n=t(i),this.bindings=this.bindings.add(i)):(s=i,i=this.element,n=this.widget()),t.each(s,function(s,a){function r(){return e||o.options.disabled!==!0&&!t(this).hasClass("ui-state-disabled")?("string"==typeof a?o[a]:a).apply(o,arguments):void 0}"string"!=typeof a&&(r.guid=a.guid=a.guid||r.guid||t.guid++);var h=s.match(/^([\w:-]*)\s*(.*)$/),l=h[1]+o.eventNamespace,c=h[2];c?n.on(l,c,r):i.on(l,r)})},_off:function(e,i){i=(i||"").split(" ").join(this.eventNamespace+" ")+this.eventNamespace,e.off(i).off(i),this.bindings=t(this.bindings.not(e).get()),this.focusable=t(this.focusable.not(e).get()),this.hoverable=t(this.hoverable.not(e).get())},_delay:function(t,e){function i(){return("string"==typeof t?s[t]:t).apply(s,arguments)}var s=this;return setTimeout(i,e||0)},_hoverable:function(e){this.hoverable=this.hoverable.add(e),this._on(e,{mouseenter:function(e){this._addClass(t(e.currentTarget),null,"ui-state-hover")},mouseleave:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-hover")}})},_focusable:function(e){this.focusable=this.focusable.add(e),this._on(e,{focusin:function(e){this._addClass(t(e.currentTarget),null,"ui-state-focus")},focusout:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-focus")}})},_trigger:function(e,i,s){var n,o,a=this.options[e];if(s=s||{},i=t.Event(i),i.type=(e===this.widgetEventPrefix?e:this.widgetEventPrefix+e).toLowerCase(),i.target=this.element[0],o=i.originalEvent)for(n in o)n in i||(i[n]=o[n]);return this.element.trigger(i,s),!(t.isFunction(a)&&a.apply(this.element[0],[i].concat(s))===!1||i.isDefaultPrevented())}},t.each({show:"fadeIn",hide:"fadeOut"},function(e,i){t.Widget.prototype["_"+e]=function(s,n,o){"string"==typeof n&&(n={effect:n});var a,r=n?n===!0||"number"==typeof n?i:n.effect||i:e;n=n||{},"number"==typeof n&&(n={duration:n}),a=!t.isEmptyObject(n),n.complete=o,n.delay&&s.delay(n.delay),a&&t.effects&&t.effects.effect[r]?s[e](n):r!==e&&s[r]?s[r](n.duration,n.easing,o):s.queue(function(i){t(this)[e](),o&&o.call(s[0]),i()})}}),t.widget,function(){function e(t,e,i){return[parseFloat(t[0])*(u.test(t[0])?e/100:1),parseFloat(t[1])*(u.test(t[1])?i/100:1)]}function i(e,i){return parseInt(t.css(e,i),10)||0}function s(e){var i=e[0];return 9===i.nodeType?{width:e.width(),height:e.height(),offset:{top:0,left:0}}:t.isWindow(i)?{width:e.width(),height:e.height(),offset:{top:e.scrollTop(),left:e.scrollLeft()}}:i.preventDefault?{width:0,height:0,offset:{top:i.pageY,left:i.pageX}}:{width:e.outerWidth(),height:e.outerHeight(),offset:e.offset()}}var n,o=Math.max,a=Math.abs,r=/left|center|right/,h=/top|center|bottom/,l=/[\+\-]\d+(\.[\d]+)?%?/,c=/^\w+/,u=/%$/,d=t.fn.position;t.position={scrollbarWidth:function(){if(void 0!==n)return n;var e,i,s=t("<div style='display:block;position:absolute;width:50px;height:50px;overflow:hidden;'><div style='height:100px;width:auto;'></div></div>"),o=s.children()[0];return t("body").append(s),e=o.offsetWidth,s.css("overflow","scroll"),i=o.offsetWidth,e===i&&(i=s[0].clientWidth),s.remove(),n=e-i},getScrollInfo:function(e){var i=e.isWindow||e.isDocument?"":e.element.css("overflow-x"),s=e.isWindow||e.isDocument?"":e.element.css("overflow-y"),n="scroll"===i||"auto"===i&&e.width<e.element[0].scrollWidth,o="scroll"===s||"auto"===s&&e.height<e.element[0].scrollHeight;return{width:o?t.position.scrollbarWidth():0,height:n?t.position.scrollbarWidth():0}},getWithinInfo:function(e){var i=t(e||window),s=t.isWindow(i[0]),n=!!i[0]&&9===i[0].nodeType,o=!s&&!n;return{element:i,isWindow:s,isDocument:n,offset:o?t(e).offset():{left:0,top:0},scrollLeft:i.scrollLeft(),scrollTop:i.scrollTop(),width:i.outerWidth(),height:i.outerHeight()}}},t.fn.position=function(n){if(!n||!n.of)return d.apply(this,arguments);n=t.extend({},n);var u,p,f,g,m,_,v=t(n.of),b=t.position.getWithinInfo(n.within),y=t.position.getScrollInfo(b),w=(n.collision||"flip").split(" "),k={};return _=s(v),v[0].preventDefault&&(n.at="left top"),p=_.width,f=_.height,g=_.offset,m=t.extend({},g),t.each(["my","at"],function(){var t,e,i=(n[this]||"").split(" ");1===i.length&&(i=r.test(i[0])?i.concat(["center"]):h.test(i[0])?["center"].concat(i):["center","center"]),i[0]=r.test(i[0])?i[0]:"center",i[1]=h.test(i[1])?i[1]:"center",t=l.exec(i[0]),e=l.exec(i[1]),k[this]=[t?t[0]:0,e?e[0]:0],n[this]=[c.exec(i[0])[0],c.exec(i[1])[0]]}),1===w.length&&(w[1]=w[0]),"right"===n.at[0]?m.left+=p:"center"===n.at[0]&&(m.left+=p/2),"bottom"===n.at[1]?m.top+=f:"center"===n.at[1]&&(m.top+=f/2),u=e(k.at,p,f),m.left+=u[0],m.top+=u[1],this.each(function(){var s,r,h=t(this),l=h.outerWidth(),c=h.outerHeight(),d=i(this,"marginLeft"),_=i(this,"marginTop"),x=l+d+i(this,"marginRight")+y.width,C=c+_+i(this,"marginBottom")+y.height,D=t.extend({},m),I=e(k.my,h.outerWidth(),h.outerHeight());"right"===n.my[0]?D.left-=l:"center"===n.my[0]&&(D.left-=l/2),"bottom"===n.my[1]?D.top-=c:"center"===n.my[1]&&(D.top-=c/2),D.left+=I[0],D.top+=I[1],s={marginLeft:d,marginTop:_},t.each(["left","top"],function(e,i){t.ui.position[w[e]]&&t.ui.position[w[e]][i](D,{targetWidth:p,targetHeight:f,elemWidth:l,elemHeight:c,collisionPosition:s,collisionWidth:x,collisionHeight:C,offset:[u[0]+I[0],u[1]+I[1]],my:n.my,at:n.at,within:b,elem:h})}),n.using&&(r=function(t){var e=g.left-D.left,i=e+p-l,s=g.top-D.top,r=s+f-c,u={target:{element:v,left:g.left,top:g.top,width:p,height:f},element:{element:h,left:D.left,top:D.top,width:l,height:c},horizontal:0>i?"left":e>0?"right":"center",vertical:0>r?"top":s>0?"bottom":"middle"};l>p&&p>a(e+i)&&(u.horizontal="center"),c>f&&f>a(s+r)&&(u.vertical="middle"),u.important=o(a(e),a(i))>o(a(s),a(r))?"horizontal":"vertical",n.using.call(this,t,u)}),h.offset(t.extend(D,{using:r}))})},t.ui.position={fit:{left:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollLeft:s.offset.left,a=s.width,r=t.left-e.collisionPosition.marginLeft,h=n-r,l=r+e.collisionWidth-a-n;e.collisionWidth>a?h>0&&0>=l?(i=t.left+h+e.collisionWidth-a-n,t.left+=h-i):t.left=l>0&&0>=h?n:h>l?n+a-e.collisionWidth:n:h>0?t.left+=h:l>0?t.left-=l:t.left=o(t.left-r,t.left)},top:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollTop:s.offset.top,a=e.within.height,r=t.top-e.collisionPosition.marginTop,h=n-r,l=r+e.collisionHeight-a-n;e.collisionHeight>a?h>0&&0>=l?(i=t.top+h+e.collisionHeight-a-n,t.top+=h-i):t.top=l>0&&0>=h?n:h>l?n+a-e.collisionHeight:n:h>0?t.top+=h:l>0?t.top-=l:t.top=o(t.top-r,t.top)}},flip:{left:function(t,e){var i,s,n=e.within,o=n.offset.left+n.scrollLeft,r=n.width,h=n.isWindow?n.scrollLeft:n.offset.left,l=t.left-e.collisionPosition.marginLeft,c=l-h,u=l+e.collisionWidth-r-h,d="left"===e.my[0]?-e.elemWidth:"right"===e.my[0]?e.elemWidth:0,p="left"===e.at[0]?e.targetWidth:"right"===e.at[0]?-e.targetWidth:0,f=-2*e.offset[0];0>c?(i=t.left+d+p+f+e.collisionWidth-r-o,(0>i||a(c)>i)&&(t.left+=d+p+f)):u>0&&(s=t.left-e.collisionPosition.marginLeft+d+p+f-h,(s>0||u>a(s))&&(t.left+=d+p+f))},top:function(t,e){var i,s,n=e.within,o=n.offset.top+n.scrollTop,r=n.height,h=n.isWindow?n.scrollTop:n.offset.top,l=t.top-e.collisionPosition.marginTop,c=l-h,u=l+e.collisionHeight-r-h,d="top"===e.my[1],p=d?-e.elemHeight:"bottom"===e.my[1]?e.elemHeight:0,f="top"===e.at[1]?e.targetHeight:"bottom"===e.at[1]?-e.targetHeight:0,g=-2*e.offset[1];0>c?(s=t.top+p+f+g+e.collisionHeight-r-o,(0>s||a(c)>s)&&(t.top+=p+f+g)):u>0&&(i=t.top-e.collisionPosition.marginTop+p+f+g-h,(i>0||u>a(i))&&(t.top+=p+f+g))}},flipfit:{left:function(){t.ui.position.flip.left.apply(this,arguments),t.ui.position.fit.left.apply(this,arguments)},top:function(){t.ui.position.flip.top.apply(this,arguments),t.ui.position.fit.top.apply(this,arguments)}}}}(),t.ui.position,t.extend(t.expr[":"],{data:t.expr.createPseudo?t.expr.createPseudo(function(e){return function(i){return!!t.data(i,e)}}):function(e,i,s){return!!t.data(e,s[3])}}),t.fn.extend({disableSelection:function(){var t="onselectstart"in document.createElement("div")?"selectstart":"mousedown";return function(){return this.on(t+".ui-disableSelection",function(t){t.preventDefault()})}}(),enableSelection:function(){return this.off(".ui-disableSelection")}});var c="ui-effects-",u="ui-effects-style",d="ui-effects-animated",p=t;t.effects={effect:{}},function(t,e){function i(t,e,i){var s=u[e.type]||{};return null==t?i||!e.def?null:e.def:(t=s.floor?~~t:parseFloat(t),isNaN(t)?e.def:s.mod?(t+s.mod)%s.mod:0>t?0:t>s.max?s.max:t)}function s(i){var s=l(),n=s._rgba=[];return i=i.toLowerCase(),f(h,function(t,o){var a,r=o.re.exec(i),h=r&&o.parse(r),l=o.space||"rgba";return h?(a=s[l](h),s[c[l].cache]=a[c[l].cache],n=s._rgba=a._rgba,!1):e}),n.length?("0,0,0,0"===n.join()&&t.extend(n,o.transparent),s):o[i]}function n(t,e,i){return i=(i+1)%1,1>6*i?t+6*(e-t)*i:1>2*i?e:2>3*i?t+6*(e-t)*(2/3-i):t}var o,a="backgroundColor borderBottomColor borderLeftColor borderRightColor borderTopColor color columnRuleColor outlineColor textDecorationColor textEmphasisColor",r=/^([\-+])=\s*(\d+\.?\d*)/,h=[{re:/rgba?\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(t){return[t[1],t[2],t[3],t[4]]}},{re:/rgba?\(\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(t){return[2.55*t[1],2.55*t[2],2.55*t[3],t[4]]}},{re:/#([a-f0-9]{2})([a-f0-9]{2})([a-f0-9]{2})/,parse:function(t){return[parseInt(t[1],16),parseInt(t[2],16),parseInt(t[3],16)]}},{re:/#([a-f0-9])([a-f0-9])([a-f0-9])/,parse:function(t){return[parseInt(t[1]+t[1],16),parseInt(t[2]+t[2],16),parseInt(t[3]+t[3],16)]}},{re:/hsla?\(\s*(\d+(?:\.\d+)?)\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,space:"hsla",parse:function(t){return[t[1],t[2]/100,t[3]/100,t[4]]}}],l=t.Color=function(e,i,s,n){return new t.Color.fn.parse(e,i,s,n)},c={rgba:{props:{red:{idx:0,type:"byte"},green:{idx:1,type:"byte"},blue:{idx:2,type:"byte"}}},hsla:{props:{hue:{idx:0,type:"degrees"},saturation:{idx:1,type:"percent"},lightness:{idx:2,type:"percent"}}}},u={"byte":{floor:!0,max:255},percent:{max:1},degrees:{mod:360,floor:!0}},d=l.support={},p=t("<p>")[0],f=t.each;p.style.cssText="background-color:rgba(1,1,1,.5)",d.rgba=p.style.backgroundColor.indexOf("rgba")>-1,f(c,function(t,e){e.cache="_"+t,e.props.alpha={idx:3,type:"percent",def:1}}),l.fn=t.extend(l.prototype,{parse:function(n,a,r,h){if(n===e)return this._rgba=[null,null,null,null],this;(n.jquery||n.nodeType)&&(n=t(n).css(a),a=e);var u=this,d=t.type(n),p=this._rgba=[];return a!==e&&(n=[n,a,r,h],d="array"),"string"===d?this.parse(s(n)||o._default):"array"===d?(f(c.rgba.props,function(t,e){p[e.idx]=i(n[e.idx],e)}),this):"object"===d?(n instanceof l?f(c,function(t,e){n[e.cache]&&(u[e.cache]=n[e.cache].slice())}):f(c,function(e,s){var o=s.cache;f(s.props,function(t,e){if(!u[o]&&s.to){if("alpha"===t||null==n[t])return;u[o]=s.to(u._rgba)}u[o][e.idx]=i(n[t],e,!0)}),u[o]&&0>t.inArray(null,u[o].slice(0,3))&&(u[o][3]=1,s.from&&(u._rgba=s.from(u[o])))}),this):e},is:function(t){var i=l(t),s=!0,n=this;return f(c,function(t,o){var a,r=i[o.cache];return r&&(a=n[o.cache]||o.to&&o.to(n._rgba)||[],f(o.props,function(t,i){return null!=r[i.idx]?s=r[i.idx]===a[i.idx]:e})),s}),s},_space:function(){var t=[],e=this;return f(c,function(i,s){e[s.cache]&&t.push(i)}),t.pop()},transition:function(t,e){var s=l(t),n=s._space(),o=c[n],a=0===this.alpha()?l("transparent"):this,r=a[o.cache]||o.to(a._rgba),h=r.slice();return s=s[o.cache],f(o.props,function(t,n){var o=n.idx,a=r[o],l=s[o],c=u[n.type]||{};null!==l&&(null===a?h[o]=l:(c.mod&&(l-a>c.mod/2?a+=c.mod:a-l>c.mod/2&&(a-=c.mod)),h[o]=i((l-a)*e+a,n)))}),this[n](h)},blend:function(e){if(1===this._rgba[3])return this;var i=this._rgba.slice(),s=i.pop(),n=l(e)._rgba;return l(t.map(i,function(t,e){return(1-s)*n[e]+s*t}))},toRgbaString:function(){var e="rgba(",i=t.map(this._rgba,function(t,e){return null==t?e>2?1:0:t});return 1===i[3]&&(i.pop(),e="rgb("),e+i.join()+")"},toHslaString:function(){var e="hsla(",i=t.map(this.hsla(),function(t,e){return null==t&&(t=e>2?1:0),e&&3>e&&(t=Math.round(100*t)+"%"),t});return 1===i[3]&&(i.pop(),e="hsl("),e+i.join()+")"},toHexString:function(e){var i=this._rgba.slice(),s=i.pop();return e&&i.push(~~(255*s)),"#"+t.map(i,function(t){return t=(t||0).toString(16),1===t.length?"0"+t:t}).join("")},toString:function(){return 0===this._rgba[3]?"transparent":this.toRgbaString()}}),l.fn.parse.prototype=l.fn,c.hsla.to=function(t){if(null==t[0]||null==t[1]||null==t[2])return[null,null,null,t[3]];var e,i,s=t[0]/255,n=t[1]/255,o=t[2]/255,a=t[3],r=Math.max(s,n,o),h=Math.min(s,n,o),l=r-h,c=r+h,u=.5*c;return e=h===r?0:s===r?60*(n-o)/l+360:n===r?60*(o-s)/l+120:60*(s-n)/l+240,i=0===l?0:.5>=u?l/c:l/(2-c),[Math.round(e)%360,i,u,null==a?1:a]},c.hsla.from=function(t){if(null==t[0]||null==t[1]||null==t[2])return[null,null,null,t[3]];var e=t[0]/360,i=t[1],s=t[2],o=t[3],a=.5>=s?s*(1+i):s+i-s*i,r=2*s-a;return[Math.round(255*n(r,a,e+1/3)),Math.round(255*n(r,a,e)),Math.round(255*n(r,a,e-1/3)),o]},f(c,function(s,n){var o=n.props,a=n.cache,h=n.to,c=n.from;l.fn[s]=function(s){if(h&&!this[a]&&(this[a]=h(this._rgba)),s===e)return this[a].slice();var n,r=t.type(s),u="array"===r||"object"===r?s:arguments,d=this[a].slice();return f(o,function(t,e){var s=u["object"===r?t:e.idx];null==s&&(s=d[e.idx]),d[e.idx]=i(s,e)}),c?(n=l(c(d)),n[a]=d,n):l(d)},f(o,function(e,i){l.fn[e]||(l.fn[e]=function(n){var o,a=t.type(n),h="alpha"===e?this._hsla?"hsla":"rgba":s,l=this[h](),c=l[i.idx];return"undefined"===a?c:("function"===a&&(n=n.call(this,c),a=t.type(n)),null==n&&i.empty?this:("string"===a&&(o=r.exec(n),o&&(n=c+parseFloat(o[2])*("+"===o[1]?1:-1))),l[i.idx]=n,this[h](l)))})})}),l.hook=function(e){var i=e.split(" ");f(i,function(e,i){t.cssHooks[i]={set:function(e,n){var o,a,r="";if("transparent"!==n&&("string"!==t.type(n)||(o=s(n)))){if(n=l(o||n),!d.rgba&&1!==n._rgba[3]){for(a="backgroundColor"===i?e.parentNode:e;(""===r||"transparent"===r)&&a&&a.style;)try{r=t.css(a,"backgroundColor"),a=a.parentNode}catch(h){}n=n.blend(r&&"transparent"!==r?r:"_default")}n=n.toRgbaString()}try{e.style[i]=n}catch(h){}}},t.fx.step[i]=function(e){e.colorInit||(e.start=l(e.elem,i),e.end=l(e.end),e.colorInit=!0),t.cssHooks[i].set(e.elem,e.start.transition(e.end,e.pos))}})},l.hook(a),t.cssHooks.borderColor={expand:function(t){var e={};return f(["Top","Right","Bottom","Left"],function(i,s){e["border"+s+"Color"]=t}),e}},o=t.Color.names={aqua:"#00ffff",black:"#000000",blue:"#0000ff",fuchsia:"#ff00ff",gray:"#808080",green:"#008000",lime:"#00ff00",maroon:"#800000",navy:"#000080",olive:"#808000",purple:"#800080",red:"#ff0000",silver:"#c0c0c0",teal:"#008080",white:"#ffffff",yellow:"#ffff00",transparent:[null,null,null,0],_default:"#ffffff"}}(p),function(){function e(e){var i,s,n=e.ownerDocument.defaultView?e.ownerDocument.defaultView.getComputedStyle(e,null):e.currentStyle,o={};if(n&&n.length&&n[0]&&n[n[0]])for(s=n.length;s--;)i=n[s],"string"==typeof n[i]&&(o[t.camelCase(i)]=n[i]);else for(i in n)"string"==typeof n[i]&&(o[i]=n[i]);return o}function i(e,i){var s,o,a={};for(s in i)o=i[s],e[s]!==o&&(n[s]||(t.fx.step[s]||!isNaN(parseFloat(o)))&&(a[s]=o));return a}var s=["add","remove","toggle"],n={border:1,borderBottom:1,borderColor:1,borderLeft:1,borderRight:1,borderTop:1,borderWidth:1,margin:1,padding:1};t.each(["borderLeftStyle","borderRightStyle","borderBottomStyle","borderTopStyle"],function(e,i){t.fx.step[i]=function(t){("none"!==t.end&&!t.setAttr||1===t.pos&&!t.setAttr)&&(p.style(t.elem,i,t.end),t.setAttr=!0)}}),t.fn.addBack||(t.fn.addBack=function(t){return this.add(null==t?this.prevObject:this.prevObject.filter(t))}),t.effects.animateClass=function(n,o,a,r){var h=t.speed(o,a,r);return this.queue(function(){var o,a=t(this),r=a.attr("class")||"",l=h.children?a.find("*").addBack():a;l=l.map(function(){var i=t(this);return{el:i,start:e(this)}}),o=function(){t.each(s,function(t,e){n[e]&&a[e+"Class"](n[e])})},o(),l=l.map(function(){return this.end=e(this.el[0]),this.diff=i(this.start,this.end),this}),a.attr("class",r),l=l.map(function(){var e=this,i=t.Deferred(),s=t.extend({},h,{queue:!1,complete:function(){i.resolve(e)}});return this.el.animate(this.diff,s),i.promise()}),t.when.apply(t,l.get()).done(function(){o(),t.each(arguments,function(){var e=this.el;t.each(this.diff,function(t){e.css(t,"")})}),h.complete.call(a[0])})})},t.fn.extend({addClass:function(e){return function(i,s,n,o){return s?t.effects.animateClass.call(this,{add:i},s,n,o):e.apply(this,arguments)}}(t.fn.addClass),removeClass:function(e){return function(i,s,n,o){return arguments.length>1?t.effects.animateClass.call(this,{remove:i},s,n,o):e.apply(this,arguments)}}(t.fn.removeClass),toggleClass:function(e){return function(i,s,n,o,a){return"boolean"==typeof s||void 0===s?n?t.effects.animateClass.call(this,s?{add:i}:{remove:i},n,o,a):e.apply(this,arguments):t.effects.animateClass.call(this,{toggle:i},s,n,o)}}(t.fn.toggleClass),switchClass:function(e,i,s,n,o){return t.effects.animateClass.call(this,{add:i,remove:e},s,n,o)}})}(),function(){function e(e,i,s,n){return t.isPlainObject(e)&&(i=e,e=e.effect),e={effect:e},null==i&&(i={}),t.isFunction(i)&&(n=i,s=null,i={}),("number"==typeof i||t.fx.speeds[i])&&(n=s,s=i,i={}),t.isFunction(s)&&(n=s,s=null),i&&t.extend(e,i),s=s||i.duration,e.duration=t.fx.off?0:"number"==typeof s?s:s in t.fx.speeds?t.fx.speeds[s]:t.fx.speeds._default,e.complete=n||i.complete,e}function i(e){return!e||"number"==typeof e||t.fx.speeds[e]?!0:"string"!=typeof e||t.effects.effect[e]?t.isFunction(e)?!0:"object"!=typeof e||e.effect?!1:!0:!0}function s(t,e){var i=e.outerWidth(),s=e.outerHeight(),n=/^rect\((-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto)\)$/,o=n.exec(t)||["",0,i,s,0];return{top:parseFloat(o[1])||0,right:"auto"===o[2]?i:parseFloat(o[2]),bottom:"auto"===o[3]?s:parseFloat(o[3]),left:parseFloat(o[4])||0}}t.expr&&t.expr.filters&&t.expr.filters.animated&&(t.expr.filters.animated=function(e){return function(i){return!!t(i).data(d)||e(i)}}(t.expr.filters.animated)),t.uiBackCompat!==!1&&t.extend(t.effects,{save:function(t,e){for(var i=0,s=e.length;s>i;i++)null!==e[i]&&t.data(c+e[i],t[0].style[e[i]])},restore:function(t,e){for(var i,s=0,n=e.length;n>s;s++)null!==e[s]&&(i=t.data(c+e[s]),t.css(e[s],i))},setMode:function(t,e){return"toggle"===e&&(e=t.is(":hidden")?"show":"hide"),e},createWrapper:function(e){if(e.parent().is(".ui-effects-wrapper"))return e.parent();var i={width:e.outerWidth(!0),height:e.outerHeight(!0),"float":e.css("float")},s=t("<div></div>").addClass("ui-effects-wrapper").css({fontSize:"100%",background:"transparent",border:"none",margin:0,padding:0}),n={width:e.width(),height:e.height()},o=document.activeElement;try{o.id}catch(a){o=document.body}return e.wrap(s),(e[0]===o||t.contains(e[0],o))&&t(o).trigger("focus"),s=e.parent(),"static"===e.css("position")?(s.css({position:"relative"}),e.css({position:"relative"})):(t.extend(i,{position:e.css("position"),zIndex:e.css("z-index")}),t.each(["top","left","bottom","right"],function(t,s){i[s]=e.css(s),isNaN(parseInt(i[s],10))&&(i[s]="auto")}),e.css({position:"relative",top:0,left:0,right:"auto",bottom:"auto"})),e.css(n),s.css(i).show()},removeWrapper:function(e){var i=document.activeElement;return e.parent().is(".ui-effects-wrapper")&&(e.parent().replaceWith(e),(e[0]===i||t.contains(e[0],i))&&t(i).trigger("focus")),e}}),t.extend(t.effects,{version:"1.12.1",define:function(e,i,s){return s||(s=i,i="effect"),t.effects.effect[e]=s,t.effects.effect[e].mode=i,s},scaledDimensions:function(t,e,i){if(0===e)return{height:0,width:0,outerHeight:0,outerWidth:0};var s="horizontal"!==i?(e||100)/100:1,n="vertical"!==i?(e||100)/100:1;return{height:t.height()*n,width:t.width()*s,outerHeight:t.outerHeight()*n,outerWidth:t.outerWidth()*s}},clipToBox:function(t){return{width:t.clip.right-t.clip.left,height:t.clip.bottom-t.clip.top,left:t.clip.left,top:t.clip.top}},unshift:function(t,e,i){var s=t.queue();e>1&&s.splice.apply(s,[1,0].concat(s.splice(e,i))),t.dequeue()},saveStyle:function(t){t.data(u,t[0].style.cssText)},restoreStyle:function(t){t[0].style.cssText=t.data(u)||"",t.removeData(u)},mode:function(t,e){var i=t.is(":hidden");return"toggle"===e&&(e=i?"show":"hide"),(i?"hide"===e:"show"===e)&&(e="none"),e},getBaseline:function(t,e){var i,s;switch(t[0]){case"top":i=0;break;case"middle":i=.5;break;case"bottom":i=1;break;default:i=t[0]/e.height}switch(t[1]){case"left":s=0;break;case"center":s=.5;break;case"right":s=1;break;default:s=t[1]/e.width}return{x:s,y:i}},createPlaceholder:function(e){var i,s=e.css("position"),n=e.position();return e.css({marginTop:e.css("marginTop"),marginBottom:e.css("marginBottom"),marginLeft:e.css("marginLeft"),marginRight:e.css("marginRight")}).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()),/^(static|relative)/.test(s)&&(s="absolute",i=t("<"+e[0].nodeName+">").insertAfter(e).css({display:/^(inline|ruby)/.test(e.css("display"))?"inline-block":"block",visibility:"hidden",marginTop:e.css("marginTop"),marginBottom:e.css("marginBottom"),marginLeft:e.css("marginLeft"),marginRight:e.css("marginRight"),"float":e.css("float")}).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()).addClass("ui-effects-placeholder"),e.data(c+"placeholder",i)),e.css({position:s,left:n.left,top:n.top}),i},removePlaceholder:function(t){var e=c+"placeholder",i=t.data(e);i&&(i.remove(),t.removeData(e))},cleanUp:function(e){t.effects.restoreStyle(e),t.effects.removePlaceholder(e)},setTransition:function(e,i,s,n){return n=n||{},t.each(i,function(t,i){var o=e.cssUnit(i);o[0]>0&&(n[i]=o[0]*s+o[1])}),n}}),t.fn.extend({effect:function(){function i(e){function i(){r.removeData(d),t.effects.cleanUp(r),"hide"===s.mode&&r.hide(),a()}function a(){t.isFunction(h)&&h.call(r[0]),t.isFunction(e)&&e()}var r=t(this);s.mode=c.shift(),t.uiBackCompat===!1||o?"none"===s.mode?(r[l](),a()):n.call(r[0],s,i):(r.is(":hidden")?"hide"===l:"show"===l)?(r[l](),a()):n.call(r[0],s,a)}var s=e.apply(this,arguments),n=t.effects.effect[s.effect],o=n.mode,a=s.queue,r=a||"fx",h=s.complete,l=s.mode,c=[],u=function(e){var i=t(this),s=t.effects.mode(i,l)||o;i.data(d,!0),c.push(s),o&&("show"===s||s===o&&"hide"===s)&&i.show(),o&&"none"===s||t.effects.saveStyle(i),t.isFunction(e)&&e()};return t.fx.off||!n?l?this[l](s.duration,h):this.each(function(){h&&h.call(this)}):a===!1?this.each(u).each(i):this.queue(r,u).queue(r,i)},show:function(t){return function(s){if(i(s))return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="show",this.effect.call(this,n)
-}}(t.fn.show),hide:function(t){return function(s){if(i(s))return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="hide",this.effect.call(this,n)}}(t.fn.hide),toggle:function(t){return function(s){if(i(s)||"boolean"==typeof s)return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="toggle",this.effect.call(this,n)}}(t.fn.toggle),cssUnit:function(e){var i=this.css(e),s=[];return t.each(["em","px","%","pt"],function(t,e){i.indexOf(e)>0&&(s=[parseFloat(i),e])}),s},cssClip:function(t){return t?this.css("clip","rect("+t.top+"px "+t.right+"px "+t.bottom+"px "+t.left+"px)"):s(this.css("clip"),this)},transfer:function(e,i){var s=t(this),n=t(e.to),o="fixed"===n.css("position"),a=t("body"),r=o?a.scrollTop():0,h=o?a.scrollLeft():0,l=n.offset(),c={top:l.top-r,left:l.left-h,height:n.innerHeight(),width:n.innerWidth()},u=s.offset(),d=t("<div class='ui-effects-transfer'></div>").appendTo("body").addClass(e.className).css({top:u.top-r,left:u.left-h,height:s.innerHeight(),width:s.innerWidth(),position:o?"fixed":"absolute"}).animate(c,e.duration,e.easing,function(){d.remove(),t.isFunction(i)&&i()})}}),t.fx.step.clip=function(e){e.clipInit||(e.start=t(e.elem).cssClip(),"string"==typeof e.end&&(e.end=s(e.end,e.elem)),e.clipInit=!0),t(e.elem).cssClip({top:e.pos*(e.end.top-e.start.top)+e.start.top,right:e.pos*(e.end.right-e.start.right)+e.start.right,bottom:e.pos*(e.end.bottom-e.start.bottom)+e.start.bottom,left:e.pos*(e.end.left-e.start.left)+e.start.left})}}(),function(){var e={};t.each(["Quad","Cubic","Quart","Quint","Expo"],function(t,i){e[i]=function(e){return Math.pow(e,t+2)}}),t.extend(e,{Sine:function(t){return 1-Math.cos(t*Math.PI/2)},Circ:function(t){return 1-Math.sqrt(1-t*t)},Elastic:function(t){return 0===t||1===t?t:-Math.pow(2,8*(t-1))*Math.sin((80*(t-1)-7.5)*Math.PI/15)},Back:function(t){return t*t*(3*t-2)},Bounce:function(t){for(var e,i=4;((e=Math.pow(2,--i))-1)/11>t;);return 1/Math.pow(4,3-i)-7.5625*Math.pow((3*e-2)/22-t,2)}}),t.each(e,function(e,i){t.easing["easeIn"+e]=i,t.easing["easeOut"+e]=function(t){return 1-i(1-t)},t.easing["easeInOut"+e]=function(t){return.5>t?i(2*t)/2:1-i(-2*t+2)/2}})}();var f=t.effects;t.effects.define("blind","hide",function(e,i){var s={up:["bottom","top"],vertical:["bottom","top"],down:["top","bottom"],left:["right","left"],horizontal:["right","left"],right:["left","right"]},n=t(this),o=e.direction||"up",a=n.cssClip(),r={clip:t.extend({},a)},h=t.effects.createPlaceholder(n);r.clip[s[o][0]]=r.clip[s[o][1]],"show"===e.mode&&(n.cssClip(r.clip),h&&h.css(t.effects.clipToBox(r)),r.clip=a),h&&h.animate(t.effects.clipToBox(r),e.duration,e.easing),n.animate(r,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("bounce",function(e,i){var s,n,o,a=t(this),r=e.mode,h="hide"===r,l="show"===r,c=e.direction||"up",u=e.distance,d=e.times||5,p=2*d+(l||h?1:0),f=e.duration/p,g=e.easing,m="up"===c||"down"===c?"top":"left",_="up"===c||"left"===c,v=0,b=a.queue().length;for(t.effects.createPlaceholder(a),o=a.css(m),u||(u=a["top"===m?"outerHeight":"outerWidth"]()/3),l&&(n={opacity:1},n[m]=o,a.css("opacity",0).css(m,_?2*-u:2*u).animate(n,f,g)),h&&(u/=Math.pow(2,d-1)),n={},n[m]=o;d>v;v++)s={},s[m]=(_?"-=":"+=")+u,a.animate(s,f,g).animate(n,f,g),u=h?2*u:u/2;h&&(s={opacity:0},s[m]=(_?"-=":"+=")+u,a.animate(s,f,g)),a.queue(i),t.effects.unshift(a,b,p+1)}),t.effects.define("clip","hide",function(e,i){var s,n={},o=t(this),a=e.direction||"vertical",r="both"===a,h=r||"horizontal"===a,l=r||"vertical"===a;s=o.cssClip(),n.clip={top:l?(s.bottom-s.top)/2:s.top,right:h?(s.right-s.left)/2:s.right,bottom:l?(s.bottom-s.top)/2:s.bottom,left:h?(s.right-s.left)/2:s.left},t.effects.createPlaceholder(o),"show"===e.mode&&(o.cssClip(n.clip),n.clip=s),o.animate(n,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("drop","hide",function(e,i){var s,n=t(this),o=e.mode,a="show"===o,r=e.direction||"left",h="up"===r||"down"===r?"top":"left",l="up"===r||"left"===r?"-=":"+=",c="+="===l?"-=":"+=",u={opacity:0};t.effects.createPlaceholder(n),s=e.distance||n["top"===h?"outerHeight":"outerWidth"](!0)/2,u[h]=l+s,a&&(n.css(u),u[h]=c+s,u.opacity=1),n.animate(u,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("explode","hide",function(e,i){function s(){b.push(this),b.length===u*d&&n()}function n(){p.css({visibility:"visible"}),t(b).remove(),i()}var o,a,r,h,l,c,u=e.pieces?Math.round(Math.sqrt(e.pieces)):3,d=u,p=t(this),f=e.mode,g="show"===f,m=p.show().css("visibility","hidden").offset(),_=Math.ceil(p.outerWidth()/d),v=Math.ceil(p.outerHeight()/u),b=[];for(o=0;u>o;o++)for(h=m.top+o*v,c=o-(u-1)/2,a=0;d>a;a++)r=m.left+a*_,l=a-(d-1)/2,p.clone().appendTo("body").wrap("<div></div>").css({position:"absolute",visibility:"visible",left:-a*_,top:-o*v}).parent().addClass("ui-effects-explode").css({position:"absolute",overflow:"hidden",width:_,height:v,left:r+(g?l*_:0),top:h+(g?c*v:0),opacity:g?0:1}).animate({left:r+(g?0:l*_),top:h+(g?0:c*v),opacity:g?1:0},e.duration||500,e.easing,s)}),t.effects.define("fade","toggle",function(e,i){var s="show"===e.mode;t(this).css("opacity",s?0:1).animate({opacity:s?1:0},{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("fold","hide",function(e,i){var s=t(this),n=e.mode,o="show"===n,a="hide"===n,r=e.size||15,h=/([0-9]+)%/.exec(r),l=!!e.horizFirst,c=l?["right","bottom"]:["bottom","right"],u=e.duration/2,d=t.effects.createPlaceholder(s),p=s.cssClip(),f={clip:t.extend({},p)},g={clip:t.extend({},p)},m=[p[c[0]],p[c[1]]],_=s.queue().length;h&&(r=parseInt(h[1],10)/100*m[a?0:1]),f.clip[c[0]]=r,g.clip[c[0]]=r,g.clip[c[1]]=0,o&&(s.cssClip(g.clip),d&&d.css(t.effects.clipToBox(g)),g.clip=p),s.queue(function(i){d&&d.animate(t.effects.clipToBox(f),u,e.easing).animate(t.effects.clipToBox(g),u,e.easing),i()}).animate(f,u,e.easing).animate(g,u,e.easing).queue(i),t.effects.unshift(s,_,4)}),t.effects.define("highlight","show",function(e,i){var s=t(this),n={backgroundColor:s.css("backgroundColor")};"hide"===e.mode&&(n.opacity=0),t.effects.saveStyle(s),s.css({backgroundImage:"none",backgroundColor:e.color||"#ffff99"}).animate(n,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("size",function(e,i){var s,n,o,a=t(this),r=["fontSize"],h=["borderTopWidth","borderBottomWidth","paddingTop","paddingBottom"],l=["borderLeftWidth","borderRightWidth","paddingLeft","paddingRight"],c=e.mode,u="effect"!==c,d=e.scale||"both",p=e.origin||["middle","center"],f=a.css("position"),g=a.position(),m=t.effects.scaledDimensions(a),_=e.from||m,v=e.to||t.effects.scaledDimensions(a,0);t.effects.createPlaceholder(a),"show"===c&&(o=_,_=v,v=o),n={from:{y:_.height/m.height,x:_.width/m.width},to:{y:v.height/m.height,x:v.width/m.width}},("box"===d||"both"===d)&&(n.from.y!==n.to.y&&(_=t.effects.setTransition(a,h,n.from.y,_),v=t.effects.setTransition(a,h,n.to.y,v)),n.from.x!==n.to.x&&(_=t.effects.setTransition(a,l,n.from.x,_),v=t.effects.setTransition(a,l,n.to.x,v))),("content"===d||"both"===d)&&n.from.y!==n.to.y&&(_=t.effects.setTransition(a,r,n.from.y,_),v=t.effects.setTransition(a,r,n.to.y,v)),p&&(s=t.effects.getBaseline(p,m),_.top=(m.outerHeight-_.outerHeight)*s.y+g.top,_.left=(m.outerWidth-_.outerWidth)*s.x+g.left,v.top=(m.outerHeight-v.outerHeight)*s.y+g.top,v.left=(m.outerWidth-v.outerWidth)*s.x+g.left),a.css(_),("content"===d||"both"===d)&&(h=h.concat(["marginTop","marginBottom"]).concat(r),l=l.concat(["marginLeft","marginRight"]),a.find("*[width]").each(function(){var i=t(this),s=t.effects.scaledDimensions(i),o={height:s.height*n.from.y,width:s.width*n.from.x,outerHeight:s.outerHeight*n.from.y,outerWidth:s.outerWidth*n.from.x},a={height:s.height*n.to.y,width:s.width*n.to.x,outerHeight:s.height*n.to.y,outerWidth:s.width*n.to.x};n.from.y!==n.to.y&&(o=t.effects.setTransition(i,h,n.from.y,o),a=t.effects.setTransition(i,h,n.to.y,a)),n.from.x!==n.to.x&&(o=t.effects.setTransition(i,l,n.from.x,o),a=t.effects.setTransition(i,l,n.to.x,a)),u&&t.effects.saveStyle(i),i.css(o),i.animate(a,e.duration,e.easing,function(){u&&t.effects.restoreStyle(i)})})),a.animate(v,{queue:!1,duration:e.duration,easing:e.easing,complete:function(){var e=a.offset();0===v.opacity&&a.css("opacity",_.opacity),u||(a.css("position","static"===f?"relative":f).offset(e),t.effects.saveStyle(a)),i()}})}),t.effects.define("scale",function(e,i){var s=t(this),n=e.mode,o=parseInt(e.percent,10)||(0===parseInt(e.percent,10)?0:"effect"!==n?0:100),a=t.extend(!0,{from:t.effects.scaledDimensions(s),to:t.effects.scaledDimensions(s,o,e.direction||"both"),origin:e.origin||["middle","center"]},e);e.fade&&(a.from.opacity=1,a.to.opacity=0),t.effects.effect.size.call(this,a,i)}),t.effects.define("puff","hide",function(e,i){var s=t.extend(!0,{},e,{fade:!0,percent:parseInt(e.percent,10)||150});t.effects.effect.scale.call(this,s,i)}),t.effects.define("pulsate","show",function(e,i){var s=t(this),n=e.mode,o="show"===n,a="hide"===n,r=o||a,h=2*(e.times||5)+(r?1:0),l=e.duration/h,c=0,u=1,d=s.queue().length;for((o||!s.is(":visible"))&&(s.css("opacity",0).show(),c=1);h>u;u++)s.animate({opacity:c},l,e.easing),c=1-c;s.animate({opacity:c},l,e.easing),s.queue(i),t.effects.unshift(s,d,h+1)}),t.effects.define("shake",function(e,i){var s=1,n=t(this),o=e.direction||"left",a=e.distance||20,r=e.times||3,h=2*r+1,l=Math.round(e.duration/h),c="up"===o||"down"===o?"top":"left",u="up"===o||"left"===o,d={},p={},f={},g=n.queue().length;for(t.effects.createPlaceholder(n),d[c]=(u?"-=":"+=")+a,p[c]=(u?"+=":"-=")+2*a,f[c]=(u?"-=":"+=")+2*a,n.animate(d,l,e.easing);r>s;s++)n.animate(p,l,e.easing).animate(f,l,e.easing);n.animate(p,l,e.easing).animate(d,l/2,e.easing).queue(i),t.effects.unshift(n,g,h+1)}),t.effects.define("slide","show",function(e,i){var s,n,o=t(this),a={up:["bottom","top"],down:["top","bottom"],left:["right","left"],right:["left","right"]},r=e.mode,h=e.direction||"left",l="up"===h||"down"===h?"top":"left",c="up"===h||"left"===h,u=e.distance||o["top"===l?"outerHeight":"outerWidth"](!0),d={};t.effects.createPlaceholder(o),s=o.cssClip(),n=o.position()[l],d[l]=(c?-1:1)*u+n,d.clip=o.cssClip(),d.clip[a[h][1]]=d.clip[a[h][0]],"show"===r&&(o.cssClip(d.clip),o.css(l,d[l]),d.clip=s,d[l]=n),o.animate(d,{queue:!1,duration:e.duration,easing:e.easing,complete:i})});var f;t.uiBackCompat!==!1&&(f=t.effects.define("transfer",function(e,i){t(this).transfer(e,i)})),t.ui.focusable=function(i,s){var n,o,a,r,h,l=i.nodeName.toLowerCase();return"area"===l?(n=i.parentNode,o=n.name,i.href&&o&&"map"===n.nodeName.toLowerCase()?(a=t("img[usemap='#"+o+"']"),a.length>0&&a.is(":visible")):!1):(/^(input|select|textarea|button|object)$/.test(l)?(r=!i.disabled,r&&(h=t(i).closest("fieldset")[0],h&&(r=!h.disabled))):r="a"===l?i.href||s:s,r&&t(i).is(":visible")&&e(t(i)))},t.extend(t.expr[":"],{focusable:function(e){return t.ui.focusable(e,null!=t.attr(e,"tabindex"))}}),t.ui.focusable,t.fn.form=function(){return"string"==typeof this[0].form?this.closest("form"):t(this[0].form)},t.ui.formResetMixin={_formResetHandler:function(){var e=t(this);setTimeout(function(){var i=e.data("ui-form-reset-instances");t.each(i,function(){this.refresh()})})},_bindFormResetHandler:function(){if(this.form=this.element.form(),this.form.length){var t=this.form.data("ui-form-reset-instances")||[];t.length||this.form.on("reset.ui-form-reset",this._formResetHandler),t.push(this),this.form.data("ui-form-reset-instances",t)}},_unbindFormResetHandler:function(){if(this.form.length){var e=this.form.data("ui-form-reset-instances");e.splice(t.inArray(this,e),1),e.length?this.form.data("ui-form-reset-instances",e):this.form.removeData("ui-form-reset-instances").off("reset.ui-form-reset")}}},"1.7"===t.fn.jquery.substring(0,3)&&(t.each(["Width","Height"],function(e,i){function s(e,i,s,o){return t.each(n,function(){i-=parseFloat(t.css(e,"padding"+this))||0,s&&(i-=parseFloat(t.css(e,"border"+this+"Width"))||0),o&&(i-=parseFloat(t.css(e,"margin"+this))||0)}),i}var n="Width"===i?["Left","Right"]:["Top","Bottom"],o=i.toLowerCase(),a={innerWidth:t.fn.innerWidth,innerHeight:t.fn.innerHeight,outerWidth:t.fn.outerWidth,outerHeight:t.fn.outerHeight};t.fn["inner"+i]=function(e){return void 0===e?a["inner"+i].call(this):this.each(function(){t(this).css(o,s(this,e)+"px")})},t.fn["outer"+i]=function(e,n){return"number"!=typeof e?a["outer"+i].call(this,e):this.each(function(){t(this).css(o,s(this,e,!0,n)+"px")})}}),t.fn.addBack=function(t){return this.add(null==t?this.prevObject:this.prevObject.filter(t))}),t.ui.keyCode={BACKSPACE:8,COMMA:188,DELETE:46,DOWN:40,END:35,ENTER:13,ESCAPE:27,HOME:36,LEFT:37,PAGE_DOWN:34,PAGE_UP:33,PERIOD:190,RIGHT:39,SPACE:32,TAB:9,UP:38},t.ui.escapeSelector=function(){var t=/([!"#$%&'()*+,.\/:;<=>?@[\]^`{|}~])/g;return function(e){return e.replace(t,"\\$1")}}(),t.fn.labels=function(){var e,i,s,n,o;return this[0].labels&&this[0].labels.length?this.pushStack(this[0].labels):(n=this.eq(0).parents("label"),s=this.attr("id"),s&&(e=this.eq(0).parents().last(),o=e.add(e.length?e.siblings():this.siblings()),i="label[for='"+t.ui.escapeSelector(s)+"']",n=n.add(o.find(i).addBack(i))),this.pushStack(n))},t.fn.scrollParent=function(e){var i=this.css("position"),s="absolute"===i,n=e?/(auto|scroll|hidden)/:/(auto|scroll)/,o=this.parents().filter(function(){var e=t(this);return s&&"static"===e.css("position")?!1:n.test(e.css("overflow")+e.css("overflow-y")+e.css("overflow-x"))}).eq(0);return"fixed"!==i&&o.length?o:t(this[0].ownerDocument||document)},t.extend(t.expr[":"],{tabbable:function(e){var i=t.attr(e,"tabindex"),s=null!=i;return(!s||i>=0)&&t.ui.focusable(e,s)}}),t.fn.extend({uniqueId:function(){var t=0;return function(){return this.each(function(){this.id||(this.id="ui-id-"+ ++t)})}}(),removeUniqueId:function(){return this.each(function(){/^ui-id-\d+$/.test(this.id)&&t(this).removeAttr("id")})}}),t.widget("ui.accordion",{version:"1.12.1",options:{active:0,animate:{},classes:{"ui-accordion-header":"ui-corner-top","ui-accordion-header-collapsed":"ui-corner-all","ui-accordion-content":"ui-corner-bottom"},collapsible:!1,event:"click",header:"> li > :first-child, > :not(li):even",heightStyle:"auto",icons:{activeHeader:"ui-icon-triangle-1-s",header:"ui-icon-triangle-1-e"},activate:null,beforeActivate:null},hideProps:{borderTopWidth:"hide",borderBottomWidth:"hide",paddingTop:"hide",paddingBottom:"hide",height:"hide"},showProps:{borderTopWidth:"show",borderBottomWidth:"show",paddingTop:"show",paddingBottom:"show",height:"show"},_create:function(){var e=this.options;this.prevShow=this.prevHide=t(),this._addClass("ui-accordion","ui-widget ui-helper-reset"),this.element.attr("role","tablist"),e.collapsible||e.active!==!1&&null!=e.active||(e.active=0),this._processPanels(),0>e.active&&(e.active+=this.headers.length),this._refresh()},_getCreateEventData:function(){return{header:this.active,panel:this.active.length?this.active.next():t()}},_createIcons:function(){var e,i,s=this.options.icons;s&&(e=t("<span>"),this._addClass(e,"ui-accordion-header-icon","ui-icon "+s.header),e.prependTo(this.headers),i=this.active.children(".ui-accordion-header-icon"),this._removeClass(i,s.header)._addClass(i,null,s.activeHeader)._addClass(this.headers,"ui-accordion-icons"))},_destroyIcons:function(){this._removeClass(this.headers,"ui-accordion-icons"),this.headers.children(".ui-accordion-header-icon").remove()},_destroy:function(){var t;this.element.removeAttr("role"),this.headers.removeAttr("role aria-expanded aria-selected aria-controls tabIndex").removeUniqueId(),this._destroyIcons(),t=this.headers.next().css("display","").removeAttr("role aria-hidden aria-labelledby").removeUniqueId(),"content"!==this.options.heightStyle&&t.css("height","")},_setOption:function(t,e){return"active"===t?(this._activate(e),void 0):("event"===t&&(this.options.event&&this._off(this.headers,this.options.event),this._setupEvents(e)),this._super(t,e),"collapsible"!==t||e||this.options.active!==!1||this._activate(0),"icons"===t&&(this._destroyIcons(),e&&this._createIcons()),void 0)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t),this._toggleClass(null,"ui-state-disabled",!!t),this._toggleClass(this.headers.add(this.headers.next()),null,"ui-state-disabled",!!t)},_keydown:function(e){if(!e.altKey&&!e.ctrlKey){var i=t.ui.keyCode,s=this.headers.length,n=this.headers.index(e.target),o=!1;switch(e.keyCode){case i.RIGHT:case i.DOWN:o=this.headers[(n+1)%s];break;case i.LEFT:case i.UP:o=this.headers[(n-1+s)%s];break;case i.SPACE:case i.ENTER:this._eventHandler(e);break;case i.HOME:o=this.headers[0];break;case i.END:o=this.headers[s-1]}o&&(t(e.target).attr("tabIndex",-1),t(o).attr("tabIndex",0),t(o).trigger("focus"),e.preventDefault())}},_panelKeyDown:function(e){e.keyCode===t.ui.keyCode.UP&&e.ctrlKey&&t(e.currentTarget).prev().trigger("focus")},refresh:function(){var e=this.options;this._processPanels(),e.active===!1&&e.collapsible===!0||!this.headers.length?(e.active=!1,this.active=t()):e.active===!1?this._activate(0):this.active.length&&!t.contains(this.element[0],this.active[0])?this.headers.length===this.headers.find(".ui-state-disabled").length?(e.active=!1,this.active=t()):this._activate(Math.max(0,e.active-1)):e.active=this.headers.index(this.active),this._destroyIcons(),this._refresh()},_processPanels:function(){var t=this.headers,e=this.panels;this.headers=this.element.find(this.options.header),this._addClass(this.headers,"ui-accordion-header ui-accordion-header-collapsed","ui-state-default"),this.panels=this.headers.next().filter(":not(.ui-accordion-content-active)").hide(),this._addClass(this.panels,"ui-accordion-content","ui-helper-reset ui-widget-content"),e&&(this._off(t.not(this.headers)),this._off(e.not(this.panels)))},_refresh:function(){var e,i=this.options,s=i.heightStyle,n=this.element.parent();this.active=this._findActive(i.active),this._addClass(this.active,"ui-accordion-header-active","ui-state-active")._removeClass(this.active,"ui-accordion-header-collapsed"),this._addClass(this.active.next(),"ui-accordion-content-active"),this.active.next().show(),this.headers.attr("role","tab").each(function(){var e=t(this),i=e.uniqueId().attr("id"),s=e.next(),n=s.uniqueId().attr("id");e.attr("aria-controls",n),s.attr("aria-labelledby",i)}).next().attr("role","tabpanel"),this.headers.not(this.active).attr({"aria-selected":"false","aria-expanded":"false",tabIndex:-1}).next().attr({"aria-hidden":"true"}).hide(),this.active.length?this.active.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0}).next().attr({"aria-hidden":"false"}):this.headers.eq(0).attr("tabIndex",0),this._createIcons(),this._setupEvents(i.event),"fill"===s?(e=n.height(),this.element.siblings(":visible").each(function(){var i=t(this),s=i.css("position");"absolute"!==s&&"fixed"!==s&&(e-=i.outerHeight(!0))}),this.headers.each(function(){e-=t(this).outerHeight(!0)}),this.headers.next().each(function(){t(this).height(Math.max(0,e-t(this).innerHeight()+t(this).height()))}).css("overflow","auto")):"auto"===s&&(e=0,this.headers.next().each(function(){var i=t(this).is(":visible");i||t(this).show(),e=Math.max(e,t(this).css("height","").height()),i||t(this).hide()}).height(e))},_activate:function(e){var i=this._findActive(e)[0];i!==this.active[0]&&(i=i||this.active[0],this._eventHandler({target:i,currentTarget:i,preventDefault:t.noop}))},_findActive:function(e){return"number"==typeof e?this.headers.eq(e):t()},_setupEvents:function(e){var i={keydown:"_keydown"};e&&t.each(e.split(" "),function(t,e){i[e]="_eventHandler"}),this._off(this.headers.add(this.headers.next())),this._on(this.headers,i),this._on(this.headers.next(),{keydown:"_panelKeyDown"}),this._hoverable(this.headers),this._focusable(this.headers)},_eventHandler:function(e){var i,s,n=this.options,o=this.active,a=t(e.currentTarget),r=a[0]===o[0],h=r&&n.collapsible,l=h?t():a.next(),c=o.next(),u={oldHeader:o,oldPanel:c,newHeader:h?t():a,newPanel:l};e.preventDefault(),r&&!n.collapsible||this._trigger("beforeActivate",e,u)===!1||(n.active=h?!1:this.headers.index(a),this.active=r?t():a,this._toggle(u),this._removeClass(o,"ui-accordion-header-active","ui-state-active"),n.icons&&(i=o.children(".ui-accordion-header-icon"),this._removeClass(i,null,n.icons.activeHeader)._addClass(i,null,n.icons.header)),r||(this._removeClass(a,"ui-accordion-header-collapsed")._addClass(a,"ui-accordion-header-active","ui-state-active"),n.icons&&(s=a.children(".ui-accordion-header-icon"),this._removeClass(s,null,n.icons.header)._addClass(s,null,n.icons.activeHeader)),this._addClass(a.next(),"ui-accordion-content-active")))},_toggle:function(e){var i=e.newPanel,s=this.prevShow.length?this.prevShow:e.oldPanel;this.prevShow.add(this.prevHide).stop(!0,!0),this.prevShow=i,this.prevHide=s,this.options.animate?this._animate(i,s,e):(s.hide(),i.show(),this._toggleComplete(e)),s.attr({"aria-hidden":"true"}),s.prev().attr({"aria-selected":"false","aria-expanded":"false"}),i.length&&s.length?s.prev().attr({tabIndex:-1,"aria-expanded":"false"}):i.length&&this.headers.filter(function(){return 0===parseInt(t(this).attr("tabIndex"),10)}).attr("tabIndex",-1),i.attr("aria-hidden","false").prev().attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0})},_animate:function(t,e,i){var s,n,o,a=this,r=0,h=t.css("box-sizing"),l=t.length&&(!e.length||t.index()<e.index()),c=this.options.animate||{},u=l&&c.down||c,d=function(){a._toggleComplete(i)};return"number"==typeof u&&(o=u),"string"==typeof u&&(n=u),n=n||u.easing||c.easing,o=o||u.duration||c.duration,e.length?t.length?(s=t.show().outerHeight(),e.animate(this.hideProps,{duration:o,easing:n,step:function(t,e){e.now=Math.round(t)}}),t.hide().animate(this.showProps,{duration:o,easing:n,complete:d,step:function(t,i){i.now=Math.round(t),"height"!==i.prop?"content-box"===h&&(r+=i.now):"content"!==a.options.heightStyle&&(i.now=Math.round(s-e.outerHeight()-r),r=0)}}),void 0):e.animate(this.hideProps,o,n,d):t.animate(this.showProps,o,n,d)},_toggleComplete:function(t){var e=t.oldPanel,i=e.prev();this._removeClass(e,"ui-accordion-content-active"),this._removeClass(i,"ui-accordion-header-active")._addClass(i,"ui-accordion-header-collapsed"),e.length&&(e.parent()[0].className=e.parent()[0].className),this._trigger("activate",null,t)}}),t.ui.safeActiveElement=function(t){var e;try{e=t.activeElement}catch(i){e=t.body}return e||(e=t.body),e.nodeName||(e=t.body),e},t.widget("ui.menu",{version:"1.12.1",defaultElement:"<ul>",delay:300,options:{icons:{submenu:"ui-icon-caret-1-e"},items:"> *",menus:"ul",position:{my:"left top",at:"right top"},role:"menu",blur:null,focus:null,select:null},_create:function(){this.activeMenu=this.element,this.mouseHandled=!1,this.element.uniqueId().attr({role:this.options.role,tabIndex:0}),this._addClass("ui-menu","ui-widget ui-widget-content"),this._on({"mousedown .ui-menu-item":function(t){t.preventDefault()},"click .ui-menu-item":function(e){var i=t(e.target),s=t(t.ui.safeActiveElement(this.document[0]));!this.mouseHandled&&i.not(".ui-state-disabled").length&&(this.select(e),e.isPropagationStopped()||(this.mouseHandled=!0),i.has(".ui-menu").length?this.expand(e):!this.element.is(":focus")&&s.closest(".ui-menu").length&&(this.element.trigger("focus",[!0]),this.active&&1===this.active.parents(".ui-menu").length&&clearTimeout(this.timer)))},"mouseenter .ui-menu-item":function(e){if(!this.previousFilter){var i=t(e.target).closest(".ui-menu-item"),s=t(e.currentTarget);i[0]===s[0]&&(this._removeClass(s.siblings().children(".ui-state-active"),null,"ui-state-active"),this.focus(e,s))}},mouseleave:"collapseAll","mouseleave .ui-menu":"collapseAll",focus:function(t,e){var i=this.active||this.element.find(this.options.items).eq(0);e||this.focus(t,i)},blur:function(e){this._delay(function(){var i=!t.contains(this.element[0],t.ui.safeActiveElement(this.document[0]));i&&this.collapseAll(e)})},keydown:"_keydown"}),this.refresh(),this._on(this.document,{click:function(t){this._closeOnDocumentClick(t)&&this.collapseAll(t),this.mouseHandled=!1}})},_destroy:function(){var e=this.element.find(".ui-menu-item").removeAttr("role aria-disabled"),i=e.children(".ui-menu-item-wrapper").removeUniqueId().removeAttr("tabIndex role aria-haspopup");this.element.removeAttr("aria-activedescendant").find(".ui-menu").addBack().removeAttr("role aria-labelledby aria-expanded aria-hidden aria-disabled tabIndex").removeUniqueId().show(),i.children().each(function(){var e=t(this);e.data("ui-menu-submenu-caret")&&e.remove()})},_keydown:function(e){var i,s,n,o,a=!0;switch(e.keyCode){case t.ui.keyCode.PAGE_UP:this.previousPage(e);break;case t.ui.keyCode.PAGE_DOWN:this.nextPage(e);break;case t.ui.keyCode.HOME:this._move("first","first",e);break;case t.ui.keyCode.END:this._move("last","last",e);break;case t.ui.keyCode.UP:this.previous(e);break;case t.ui.keyCode.DOWN:this.next(e);break;case t.ui.keyCode.LEFT:this.collapse(e);break;case t.ui.keyCode.RIGHT:this.active&&!this.active.is(".ui-state-disabled")&&this.expand(e);break;case t.ui.keyCode.ENTER:case t.ui.keyCode.SPACE:this._activate(e);break;case t.ui.keyCode.ESCAPE:this.collapse(e);break;default:a=!1,s=this.previousFilter||"",o=!1,n=e.keyCode>=96&&105>=e.keyCode?""+(e.keyCode-96):String.fromCharCode(e.keyCode),clearTimeout(this.filterTimer),n===s?o=!0:n=s+n,i=this._filterMenuItems(n),i=o&&-1!==i.index(this.active.next())?this.active.nextAll(".ui-menu-item"):i,i.length||(n=String.fromCharCode(e.keyCode),i=this._filterMenuItems(n)),i.length?(this.focus(e,i),this.previousFilter=n,this.filterTimer=this._delay(function(){delete this.previousFilter},1e3)):delete this.previousFilter}a&&e.preventDefault()},_activate:function(t){this.active&&!this.active.is(".ui-state-disabled")&&(this.active.children("[aria-haspopup='true']").length?this.expand(t):this.select(t))},refresh:function(){var e,i,s,n,o,a=this,r=this.options.icons.submenu,h=this.element.find(this.options.menus);this._toggleClass("ui-menu-icons",null,!!this.element.find(".ui-icon").length),s=h.filter(":not(.ui-menu)").hide().attr({role:this.options.role,"aria-hidden":"true","aria-expanded":"false"}).each(function(){var e=t(this),i=e.prev(),s=t("<span>").data("ui-menu-submenu-caret",!0);a._addClass(s,"ui-menu-icon","ui-icon "+r),i.attr("aria-haspopup","true").prepend(s),e.attr("aria-labelledby",i.attr("id"))}),this._addClass(s,"ui-menu","ui-widget ui-widget-content ui-front"),e=h.add(this.element),i=e.find(this.options.items),i.not(".ui-menu-item").each(function(){var e=t(this);a._isDivider(e)&&a._addClass(e,"ui-menu-divider","ui-widget-content")}),n=i.not(".ui-menu-item, .ui-menu-divider"),o=n.children().not(".ui-menu").uniqueId().attr({tabIndex:-1,role:this._itemRole()}),this._addClass(n,"ui-menu-item")._addClass(o,"ui-menu-item-wrapper"),i.filter(".ui-state-disabled").attr("aria-disabled","true"),this.active&&!t.contains(this.element[0],this.active[0])&&this.blur()},_itemRole:function(){return{menu:"menuitem",listbox:"option"}[this.options.role]},_setOption:function(t,e){if("icons"===t){var i=this.element.find(".ui-menu-icon");this._removeClass(i,null,this.options.icons.submenu)._addClass(i,null,e.submenu)}this._super(t,e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t+""),this._toggleClass(null,"ui-state-disabled",!!t)},focus:function(t,e){var i,s,n;this.blur(t,t&&"focus"===t.type),this._scrollIntoView(e),this.active=e.first(),s=this.active.children(".ui-menu-item-wrapper"),this._addClass(s,null,"ui-state-active"),this.options.role&&this.element.attr("aria-activedescendant",s.attr("id")),n=this.active.parent().closest(".ui-menu-item").children(".ui-menu-item-wrapper"),this._addClass(n,null,"ui-state-active"),t&&"keydown"===t.type?this._close():this.timer=this._delay(function(){this._close()},this.delay),i=e.children(".ui-menu"),i.length&&t&&/^mouse/.test(t.type)&&this._startOpening(i),this.activeMenu=e.parent(),this._trigger("focus",t,{item:e})},_scrollIntoView:function(e){var i,s,n,o,a,r;this._hasScroll()&&(i=parseFloat(t.css(this.activeMenu[0],"borderTopWidth"))||0,s=parseFloat(t.css(this.activeMenu[0],"paddingTop"))||0,n=e.offset().top-this.activeMenu.offset().top-i-s,o=this.activeMenu.scrollTop(),a=this.activeMenu.height(),r=e.outerHeight(),0>n?this.activeMenu.scrollTop(o+n):n+r>a&&this.activeMenu.scrollTop(o+n-a+r))},blur:function(t,e){e||clearTimeout(this.timer),this.active&&(this._removeClass(this.active.children(".ui-menu-item-wrapper"),null,"ui-state-active"),this._trigger("blur",t,{item:this.active}),this.active=null)},_startOpening:function(t){clearTimeout(this.timer),"true"===t.attr("aria-hidden")&&(this.timer=this._delay(function(){this._close(),this._open(t)},this.delay))},_open:function(e){var i=t.extend({of:this.active},this.options.position);clearTimeout(this.timer),this.element.find(".ui-menu").not(e.parents(".ui-menu")).hide().attr("aria-hidden","true"),e.show().removeAttr("aria-hidden").attr("aria-expanded","true").position(i)},collapseAll:function(e,i){clearTimeout(this.timer),this.timer=this._delay(function(){var s=i?this.element:t(e&&e.target).closest(this.element.find(".ui-menu"));s.length||(s=this.element),this._close(s),this.blur(e),this._removeClass(s.find(".ui-state-active"),null,"ui-state-active"),this.activeMenu=s},this.delay)},_close:function(t){t||(t=this.active?this.active.parent():this.element),t.find(".ui-menu").hide().attr("aria-hidden","true").attr("aria-expanded","false")},_closeOnDocumentClick:function(e){return!t(e.target).closest(".ui-menu").length},_isDivider:function(t){return!/[^\-\u2014\u2013\s]/.test(t.text())},collapse:function(t){var e=this.active&&this.active.parent().closest(".ui-menu-item",this.element);e&&e.length&&(this._close(),this.focus(t,e))},expand:function(t){var e=this.active&&this.active.children(".ui-menu ").find(this.options.items).first();e&&e.length&&(this._open(e.parent()),this._delay(function(){this.focus(t,e)}))},next:function(t){this._move("next","first",t)},previous:function(t){this._move("prev","last",t)},isFirstItem:function(){return this.active&&!this.active.prevAll(".ui-menu-item").length},isLastItem:function(){return this.active&&!this.active.nextAll(".ui-menu-item").length},_move:function(t,e,i){var s;this.active&&(s="first"===t||"last"===t?this.active["first"===t?"prevAll":"nextAll"](".ui-menu-item").eq(-1):this.active[t+"All"](".ui-menu-item").eq(0)),s&&s.length&&this.active||(s=this.activeMenu.find(this.options.items)[e]()),this.focus(i,s)},nextPage:function(e){var i,s,n;return this.active?(this.isLastItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.nextAll(".ui-menu-item").each(function(){return i=t(this),0>i.offset().top-s-n}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items)[this.active?"last":"first"]())),void 0):(this.next(e),void 0)},previousPage:function(e){var i,s,n;return this.active?(this.isFirstItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.prevAll(".ui-menu-item").each(function(){return i=t(this),i.offset().top-s+n>0}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items).first())),void 0):(this.next(e),void 0)},_hasScroll:function(){return this.element.outerHeight()<this.element.prop("scrollHeight")},select:function(e){this.active=this.active||t(e.target).closest(".ui-menu-item");var i={item:this.active};this.active.has(".ui-menu").length||this.collapseAll(e,!0),this._trigger("select",e,i)},_filterMenuItems:function(e){var i=e.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g,"\\$&"),s=RegExp("^"+i,"i");return this.activeMenu.find(this.options.items).filter(".ui-menu-item").filter(function(){return s.test(t.trim(t(this).children(".ui-menu-item-wrapper").text()))})}}),t.widget("ui.autocomplete",{version:"1.12.1",defaultElement:"<input>",options:{appendTo:null,autoFocus:!1,delay:300,minLength:1,position:{my:"left top",at:"left bottom",collision:"none"},source:null,change:null,close:null,focus:null,open:null,response:null,search:null,select:null},requestIndex:0,pending:0,_create:function(){var e,i,s,n=this.element[0].nodeName.toLowerCase(),o="textarea"===n,a="input"===n;
-this.isMultiLine=o||!a&&this._isContentEditable(this.element),this.valueMethod=this.element[o||a?"val":"text"],this.isNewMenu=!0,this._addClass("ui-autocomplete-input"),this.element.attr("autocomplete","off"),this._on(this.element,{keydown:function(n){if(this.element.prop("readOnly"))return e=!0,s=!0,i=!0,void 0;e=!1,s=!1,i=!1;var o=t.ui.keyCode;switch(n.keyCode){case o.PAGE_UP:e=!0,this._move("previousPage",n);break;case o.PAGE_DOWN:e=!0,this._move("nextPage",n);break;case o.UP:e=!0,this._keyEvent("previous",n);break;case o.DOWN:e=!0,this._keyEvent("next",n);break;case o.ENTER:this.menu.active&&(e=!0,n.preventDefault(),this.menu.select(n));break;case o.TAB:this.menu.active&&this.menu.select(n);break;case o.ESCAPE:this.menu.element.is(":visible")&&(this.isMultiLine||this._value(this.term),this.close(n),n.preventDefault());break;default:i=!0,this._searchTimeout(n)}},keypress:function(s){if(e)return e=!1,(!this.isMultiLine||this.menu.element.is(":visible"))&&s.preventDefault(),void 0;if(!i){var n=t.ui.keyCode;switch(s.keyCode){case n.PAGE_UP:this._move("previousPage",s);break;case n.PAGE_DOWN:this._move("nextPage",s);break;case n.UP:this._keyEvent("previous",s);break;case n.DOWN:this._keyEvent("next",s)}}},input:function(t){return s?(s=!1,t.preventDefault(),void 0):(this._searchTimeout(t),void 0)},focus:function(){this.selectedItem=null,this.previous=this._value()},blur:function(t){return this.cancelBlur?(delete this.cancelBlur,void 0):(clearTimeout(this.searching),this.close(t),this._change(t),void 0)}}),this._initSource(),this.menu=t("<ul>").appendTo(this._appendTo()).menu({role:null}).hide().menu("instance"),this._addClass(this.menu.element,"ui-autocomplete","ui-front"),this._on(this.menu.element,{mousedown:function(e){e.preventDefault(),this.cancelBlur=!0,this._delay(function(){delete this.cancelBlur,this.element[0]!==t.ui.safeActiveElement(this.document[0])&&this.element.trigger("focus")})},menufocus:function(e,i){var s,n;return this.isNewMenu&&(this.isNewMenu=!1,e.originalEvent&&/^mouse/.test(e.originalEvent.type))?(this.menu.blur(),this.document.one("mousemove",function(){t(e.target).trigger(e.originalEvent)}),void 0):(n=i.item.data("ui-autocomplete-item"),!1!==this._trigger("focus",e,{item:n})&&e.originalEvent&&/^key/.test(e.originalEvent.type)&&this._value(n.value),s=i.item.attr("aria-label")||n.value,s&&t.trim(s).length&&(this.liveRegion.children().hide(),t("<div>").text(s).appendTo(this.liveRegion)),void 0)},menuselect:function(e,i){var s=i.item.data("ui-autocomplete-item"),n=this.previous;this.element[0]!==t.ui.safeActiveElement(this.document[0])&&(this.element.trigger("focus"),this.previous=n,this._delay(function(){this.previous=n,this.selectedItem=s})),!1!==this._trigger("select",e,{item:s})&&this._value(s.value),this.term=this._value(),this.close(e),this.selectedItem=s}}),this.liveRegion=t("<div>",{role:"status","aria-live":"assertive","aria-relevant":"additions"}).appendTo(this.document[0].body),this._addClass(this.liveRegion,null,"ui-helper-hidden-accessible"),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_destroy:function(){clearTimeout(this.searching),this.element.removeAttr("autocomplete"),this.menu.element.remove(),this.liveRegion.remove()},_setOption:function(t,e){this._super(t,e),"source"===t&&this._initSource(),"appendTo"===t&&this.menu.element.appendTo(this._appendTo()),"disabled"===t&&e&&this.xhr&&this.xhr.abort()},_isEventTargetInWidget:function(e){var i=this.menu.element[0];return e.target===this.element[0]||e.target===i||t.contains(i,e.target)},_closeOnClickOutside:function(t){this._isEventTargetInWidget(t)||this.close()},_appendTo:function(){var e=this.options.appendTo;return e&&(e=e.jquery||e.nodeType?t(e):this.document.find(e).eq(0)),e&&e[0]||(e=this.element.closest(".ui-front, dialog")),e.length||(e=this.document[0].body),e},_initSource:function(){var e,i,s=this;t.isArray(this.options.source)?(e=this.options.source,this.source=function(i,s){s(t.ui.autocomplete.filter(e,i.term))}):"string"==typeof this.options.source?(i=this.options.source,this.source=function(e,n){s.xhr&&s.xhr.abort(),s.xhr=t.ajax({url:i,data:e,dataType:"json",success:function(t){n(t)},error:function(){n([])}})}):this.source=this.options.source},_searchTimeout:function(t){clearTimeout(this.searching),this.searching=this._delay(function(){var e=this.term===this._value(),i=this.menu.element.is(":visible"),s=t.altKey||t.ctrlKey||t.metaKey||t.shiftKey;(!e||e&&!i&&!s)&&(this.selectedItem=null,this.search(null,t))},this.options.delay)},search:function(t,e){return t=null!=t?t:this._value(),this.term=this._value(),t.length<this.options.minLength?this.close(e):this._trigger("search",e)!==!1?this._search(t):void 0},_search:function(t){this.pending++,this._addClass("ui-autocomplete-loading"),this.cancelSearch=!1,this.source({term:t},this._response())},_response:function(){var e=++this.requestIndex;return t.proxy(function(t){e===this.requestIndex&&this.__response(t),this.pending--,this.pending||this._removeClass("ui-autocomplete-loading")},this)},__response:function(t){t&&(t=this._normalize(t)),this._trigger("response",null,{content:t}),!this.options.disabled&&t&&t.length&&!this.cancelSearch?(this._suggest(t),this._trigger("open")):this._close()},close:function(t){this.cancelSearch=!0,this._close(t)},_close:function(t){this._off(this.document,"mousedown"),this.menu.element.is(":visible")&&(this.menu.element.hide(),this.menu.blur(),this.isNewMenu=!0,this._trigger("close",t))},_change:function(t){this.previous!==this._value()&&this._trigger("change",t,{item:this.selectedItem})},_normalize:function(e){return e.length&&e[0].label&&e[0].value?e:t.map(e,function(e){return"string"==typeof e?{label:e,value:e}:t.extend({},e,{label:e.label||e.value,value:e.value||e.label})})},_suggest:function(e){var i=this.menu.element.empty();this._renderMenu(i,e),this.isNewMenu=!0,this.menu.refresh(),i.show(),this._resizeMenu(),i.position(t.extend({of:this.element},this.options.position)),this.options.autoFocus&&this.menu.next(),this._on(this.document,{mousedown:"_closeOnClickOutside"})},_resizeMenu:function(){var t=this.menu.element;t.outerWidth(Math.max(t.width("").outerWidth()+1,this.element.outerWidth()))},_renderMenu:function(e,i){var s=this;t.each(i,function(t,i){s._renderItemData(e,i)})},_renderItemData:function(t,e){return this._renderItem(t,e).data("ui-autocomplete-item",e)},_renderItem:function(e,i){return t("<li>").append(t("<div>").text(i.label)).appendTo(e)},_move:function(t,e){return this.menu.element.is(":visible")?this.menu.isFirstItem()&&/^previous/.test(t)||this.menu.isLastItem()&&/^next/.test(t)?(this.isMultiLine||this._value(this.term),this.menu.blur(),void 0):(this.menu[t](e),void 0):(this.search(null,e),void 0)},widget:function(){return this.menu.element},_value:function(){return this.valueMethod.apply(this.element,arguments)},_keyEvent:function(t,e){(!this.isMultiLine||this.menu.element.is(":visible"))&&(this._move(t,e),e.preventDefault())},_isContentEditable:function(t){if(!t.length)return!1;var e=t.prop("contentEditable");return"inherit"===e?this._isContentEditable(t.parent()):"true"===e}}),t.extend(t.ui.autocomplete,{escapeRegex:function(t){return t.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g,"\\$&")},filter:function(e,i){var s=RegExp(t.ui.autocomplete.escapeRegex(i),"i");return t.grep(e,function(t){return s.test(t.label||t.value||t)})}}),t.widget("ui.autocomplete",t.ui.autocomplete,{options:{messages:{noResults:"No search results.",results:function(t){return t+(t>1?" results are":" result is")+" available, use up and down arrow keys to navigate."}}},__response:function(e){var i;this._superApply(arguments),this.options.disabled||this.cancelSearch||(i=e&&e.length?this.options.messages.results(e.length):this.options.messages.noResults,this.liveRegion.children().hide(),t("<div>").text(i).appendTo(this.liveRegion))}}),t.ui.autocomplete;var g=/ui-corner-([a-z]){2,6}/g;t.widget("ui.controlgroup",{version:"1.12.1",defaultElement:"<div>",options:{direction:"horizontal",disabled:null,onlyVisible:!0,items:{button:"input[type=button], input[type=submit], input[type=reset], button, a",controlgroupLabel:".ui-controlgroup-label",checkboxradio:"input[type='checkbox'], input[type='radio']",selectmenu:"select",spinner:".ui-spinner-input"}},_create:function(){this._enhance()},_enhance:function(){this.element.attr("role","toolbar"),this.refresh()},_destroy:function(){this._callChildMethod("destroy"),this.childWidgets.removeData("ui-controlgroup-data"),this.element.removeAttr("role"),this.options.items.controlgroupLabel&&this.element.find(this.options.items.controlgroupLabel).find(".ui-controlgroup-label-contents").contents().unwrap()},_initWidgets:function(){var e=this,i=[];t.each(this.options.items,function(s,n){var o,a={};return n?"controlgroupLabel"===s?(o=e.element.find(n),o.each(function(){var e=t(this);e.children(".ui-controlgroup-label-contents").length||e.contents().wrapAll("<span class='ui-controlgroup-label-contents'></span>")}),e._addClass(o,null,"ui-widget ui-widget-content ui-state-default"),i=i.concat(o.get()),void 0):(t.fn[s]&&(a=e["_"+s+"Options"]?e["_"+s+"Options"]("middle"):{classes:{}},e.element.find(n).each(function(){var n=t(this),o=n[s]("instance"),r=t.widget.extend({},a);if("button"!==s||!n.parent(".ui-spinner").length){o||(o=n[s]()[s]("instance")),o&&(r.classes=e._resolveClassesValues(r.classes,o)),n[s](r);var h=n[s]("widget");t.data(h[0],"ui-controlgroup-data",o?o:n[s]("instance")),i.push(h[0])}})),void 0):void 0}),this.childWidgets=t(t.unique(i)),this._addClass(this.childWidgets,"ui-controlgroup-item")},_callChildMethod:function(e){this.childWidgets.each(function(){var i=t(this),s=i.data("ui-controlgroup-data");s&&s[e]&&s[e]()})},_updateCornerClass:function(t,e){var i="ui-corner-top ui-corner-bottom ui-corner-left ui-corner-right ui-corner-all",s=this._buildSimpleOptions(e,"label").classes.label;this._removeClass(t,null,i),this._addClass(t,null,s)},_buildSimpleOptions:function(t,e){var i="vertical"===this.options.direction,s={classes:{}};return s.classes[e]={middle:"",first:"ui-corner-"+(i?"top":"left"),last:"ui-corner-"+(i?"bottom":"right"),only:"ui-corner-all"}[t],s},_spinnerOptions:function(t){var e=this._buildSimpleOptions(t,"ui-spinner");return e.classes["ui-spinner-up"]="",e.classes["ui-spinner-down"]="",e},_buttonOptions:function(t){return this._buildSimpleOptions(t,"ui-button")},_checkboxradioOptions:function(t){return this._buildSimpleOptions(t,"ui-checkboxradio-label")},_selectmenuOptions:function(t){var e="vertical"===this.options.direction;return{width:e?"auto":!1,classes:{middle:{"ui-selectmenu-button-open":"","ui-selectmenu-button-closed":""},first:{"ui-selectmenu-button-open":"ui-corner-"+(e?"top":"tl"),"ui-selectmenu-button-closed":"ui-corner-"+(e?"top":"left")},last:{"ui-selectmenu-button-open":e?"":"ui-corner-tr","ui-selectmenu-button-closed":"ui-corner-"+(e?"bottom":"right")},only:{"ui-selectmenu-button-open":"ui-corner-top","ui-selectmenu-button-closed":"ui-corner-all"}}[t]}},_resolveClassesValues:function(e,i){var s={};return t.each(e,function(n){var o=i.options.classes[n]||"";o=t.trim(o.replace(g,"")),s[n]=(o+" "+e[n]).replace(/\s+/g," ")}),s},_setOption:function(t,e){return"direction"===t&&this._removeClass("ui-controlgroup-"+this.options.direction),this._super(t,e),"disabled"===t?(this._callChildMethod(e?"disable":"enable"),void 0):(this.refresh(),void 0)},refresh:function(){var e,i=this;this._addClass("ui-controlgroup ui-controlgroup-"+this.options.direction),"horizontal"===this.options.direction&&this._addClass(null,"ui-helper-clearfix"),this._initWidgets(),e=this.childWidgets,this.options.onlyVisible&&(e=e.filter(":visible")),e.length&&(t.each(["first","last"],function(t,s){var n=e[s]().data("ui-controlgroup-data");if(n&&i["_"+n.widgetName+"Options"]){var o=i["_"+n.widgetName+"Options"](1===e.length?"only":s);o.classes=i._resolveClassesValues(o.classes,n),n.element[n.widgetName](o)}else i._updateCornerClass(e[s](),s)}),this._callChildMethod("refresh"))}}),t.widget("ui.checkboxradio",[t.ui.formResetMixin,{version:"1.12.1",options:{disabled:null,label:null,icon:!0,classes:{"ui-checkboxradio-label":"ui-corner-all","ui-checkboxradio-icon":"ui-corner-all"}},_getCreateOptions:function(){var e,i,s=this,n=this._super()||{};return this._readType(),i=this.element.labels(),this.label=t(i[i.length-1]),this.label.length||t.error("No label found for checkboxradio widget"),this.originalLabel="",this.label.contents().not(this.element[0]).each(function(){s.originalLabel+=3===this.nodeType?t(this).text():this.outerHTML}),this.originalLabel&&(n.label=this.originalLabel),e=this.element[0].disabled,null!=e&&(n.disabled=e),n},_create:function(){var t=this.element[0].checked;this._bindFormResetHandler(),null==this.options.disabled&&(this.options.disabled=this.element[0].disabled),this._setOption("disabled",this.options.disabled),this._addClass("ui-checkboxradio","ui-helper-hidden-accessible"),this._addClass(this.label,"ui-checkboxradio-label","ui-button ui-widget"),"radio"===this.type&&this._addClass(this.label,"ui-checkboxradio-radio-label"),this.options.label&&this.options.label!==this.originalLabel?this._updateLabel():this.originalLabel&&(this.options.label=this.originalLabel),this._enhance(),t&&(this._addClass(this.label,"ui-checkboxradio-checked","ui-state-active"),this.icon&&this._addClass(this.icon,null,"ui-state-hover")),this._on({change:"_toggleClasses",focus:function(){this._addClass(this.label,null,"ui-state-focus ui-visual-focus")},blur:function(){this._removeClass(this.label,null,"ui-state-focus ui-visual-focus")}})},_readType:function(){var e=this.element[0].nodeName.toLowerCase();this.type=this.element[0].type,"input"===e&&/radio|checkbox/.test(this.type)||t.error("Can't create checkboxradio on element.nodeName="+e+" and element.type="+this.type)},_enhance:function(){this._updateIcon(this.element[0].checked)},widget:function(){return this.label},_getRadioGroup:function(){var e,i=this.element[0].name,s="input[name='"+t.ui.escapeSelector(i)+"']";return i?(e=this.form.length?t(this.form[0].elements).filter(s):t(s).filter(function(){return 0===t(this).form().length}),e.not(this.element)):t([])},_toggleClasses:function(){var e=this.element[0].checked;this._toggleClass(this.label,"ui-checkboxradio-checked","ui-state-active",e),this.options.icon&&"checkbox"===this.type&&this._toggleClass(this.icon,null,"ui-icon-check ui-state-checked",e)._toggleClass(this.icon,null,"ui-icon-blank",!e),"radio"===this.type&&this._getRadioGroup().each(function(){var e=t(this).checkboxradio("instance");e&&e._removeClass(e.label,"ui-checkboxradio-checked","ui-state-active")})},_destroy:function(){this._unbindFormResetHandler(),this.icon&&(this.icon.remove(),this.iconSpace.remove())},_setOption:function(t,e){return"label"!==t||e?(this._super(t,e),"disabled"===t?(this._toggleClass(this.label,null,"ui-state-disabled",e),this.element[0].disabled=e,void 0):(this.refresh(),void 0)):void 0},_updateIcon:function(e){var i="ui-icon ui-icon-background ";this.options.icon?(this.icon||(this.icon=t("<span>"),this.iconSpace=t("<span> </span>"),this._addClass(this.iconSpace,"ui-checkboxradio-icon-space")),"checkbox"===this.type?(i+=e?"ui-icon-check ui-state-checked":"ui-icon-blank",this._removeClass(this.icon,null,e?"ui-icon-blank":"ui-icon-check")):i+="ui-icon-blank",this._addClass(this.icon,"ui-checkboxradio-icon",i),e||this._removeClass(this.icon,null,"ui-icon-check ui-state-checked"),this.icon.prependTo(this.label).after(this.iconSpace)):void 0!==this.icon&&(this.icon.remove(),this.iconSpace.remove(),delete this.icon)},_updateLabel:function(){var t=this.label.contents().not(this.element[0]);this.icon&&(t=t.not(this.icon[0])),this.iconSpace&&(t=t.not(this.iconSpace[0])),t.remove(),this.label.append(this.options.label)},refresh:function(){var t=this.element[0].checked,e=this.element[0].disabled;this._updateIcon(t),this._toggleClass(this.label,"ui-checkboxradio-checked","ui-state-active",t),null!==this.options.label&&this._updateLabel(),e!==this.options.disabled&&this._setOptions({disabled:e})}}]),t.ui.checkboxradio,t.widget("ui.button",{version:"1.12.1",defaultElement:"<button>",options:{classes:{"ui-button":"ui-corner-all"},disabled:null,icon:null,iconPosition:"beginning",label:null,showLabel:!0},_getCreateOptions:function(){var t,e=this._super()||{};return this.isInput=this.element.is("input"),t=this.element[0].disabled,null!=t&&(e.disabled=t),this.originalLabel=this.isInput?this.element.val():this.element.html(),this.originalLabel&&(e.label=this.originalLabel),e},_create:function(){!this.option.showLabel&!this.options.icon&&(this.options.showLabel=!0),null==this.options.disabled&&(this.options.disabled=this.element[0].disabled||!1),this.hasTitle=!!this.element.attr("title"),this.options.label&&this.options.label!==this.originalLabel&&(this.isInput?this.element.val(this.options.label):this.element.html(this.options.label)),this._addClass("ui-button","ui-widget"),this._setOption("disabled",this.options.disabled),this._enhance(),this.element.is("a")&&this._on({keyup:function(e){e.keyCode===t.ui.keyCode.SPACE&&(e.preventDefault(),this.element[0].click?this.element[0].click():this.element.trigger("click"))}})},_enhance:function(){this.element.is("button")||this.element.attr("role","button"),this.options.icon&&(this._updateIcon("icon",this.options.icon),this._updateTooltip())},_updateTooltip:function(){this.title=this.element.attr("title"),this.options.showLabel||this.title||this.element.attr("title",this.options.label)},_updateIcon:function(e,i){var s="iconPosition"!==e,n=s?this.options.iconPosition:i,o="top"===n||"bottom"===n;this.icon?s&&this._removeClass(this.icon,null,this.options.icon):(this.icon=t("<span>"),this._addClass(this.icon,"ui-button-icon","ui-icon"),this.options.showLabel||this._addClass("ui-button-icon-only")),s&&this._addClass(this.icon,null,i),this._attachIcon(n),o?(this._addClass(this.icon,null,"ui-widget-icon-block"),this.iconSpace&&this.iconSpace.remove()):(this.iconSpace||(this.iconSpace=t("<span> </span>"),this._addClass(this.iconSpace,"ui-button-icon-space")),this._removeClass(this.icon,null,"ui-wiget-icon-block"),this._attachIconSpace(n))},_destroy:function(){this.element.removeAttr("role"),this.icon&&this.icon.remove(),this.iconSpace&&this.iconSpace.remove(),this.hasTitle||this.element.removeAttr("title")},_attachIconSpace:function(t){this.icon[/^(?:end|bottom)/.test(t)?"before":"after"](this.iconSpace)},_attachIcon:function(t){this.element[/^(?:end|bottom)/.test(t)?"append":"prepend"](this.icon)},_setOptions:function(t){var e=void 0===t.showLabel?this.options.showLabel:t.showLabel,i=void 0===t.icon?this.options.icon:t.icon;e||i||(t.showLabel=!0),this._super(t)},_setOption:function(t,e){"icon"===t&&(e?this._updateIcon(t,e):this.icon&&(this.icon.remove(),this.iconSpace&&this.iconSpace.remove())),"iconPosition"===t&&this._updateIcon(t,e),"showLabel"===t&&(this._toggleClass("ui-button-icon-only",null,!e),this._updateTooltip()),"label"===t&&(this.isInput?this.element.val(e):(this.element.html(e),this.icon&&(this._attachIcon(this.options.iconPosition),this._attachIconSpace(this.options.iconPosition)))),this._super(t,e),"disabled"===t&&(this._toggleClass(null,"ui-state-disabled",e),this.element[0].disabled=e,e&&this.element.blur())},refresh:function(){var t=this.element.is("input, button")?this.element[0].disabled:this.element.hasClass("ui-button-disabled");t!==this.options.disabled&&this._setOptions({disabled:t}),this._updateTooltip()}}),t.uiBackCompat!==!1&&(t.widget("ui.button",t.ui.button,{options:{text:!0,icons:{primary:null,secondary:null}},_create:function(){this.options.showLabel&&!this.options.text&&(this.options.showLabel=this.options.text),!this.options.showLabel&&this.options.text&&(this.options.text=this.options.showLabel),this.options.icon||!this.options.icons.primary&&!this.options.icons.secondary?this.options.icon&&(this.options.icons.primary=this.options.icon):this.options.icons.primary?this.options.icon=this.options.icons.primary:(this.options.icon=this.options.icons.secondary,this.options.iconPosition="end"),this._super()},_setOption:function(t,e){return"text"===t?(this._super("showLabel",e),void 0):("showLabel"===t&&(this.options.text=e),"icon"===t&&(this.options.icons.primary=e),"icons"===t&&(e.primary?(this._super("icon",e.primary),this._super("iconPosition","beginning")):e.secondary&&(this._super("icon",e.secondary),this._super("iconPosition","end"))),this._superApply(arguments),void 0)}}),t.fn.button=function(e){return function(){return!this.length||this.length&&"INPUT"!==this[0].tagName||this.length&&"INPUT"===this[0].tagName&&"checkbox"!==this.attr("type")&&"radio"!==this.attr("type")?e.apply(this,arguments):(t.ui.checkboxradio||t.error("Checkboxradio widget missing"),0===arguments.length?this.checkboxradio({icon:!1}):this.checkboxradio.apply(this,arguments))}}(t.fn.button),t.fn.buttonset=function(){return t.ui.controlgroup||t.error("Controlgroup widget missing"),"option"===arguments[0]&&"items"===arguments[1]&&arguments[2]?this.controlgroup.apply(this,[arguments[0],"items.button",arguments[2]]):"option"===arguments[0]&&"items"===arguments[1]?this.controlgroup.apply(this,[arguments[0],"items.button"]):("object"==typeof arguments[0]&&arguments[0].items&&(arguments[0].items={button:arguments[0].items}),this.controlgroup.apply(this,arguments))}),t.ui.button,t.extend(t.ui,{datepicker:{version:"1.12.1"}});var m;t.extend(s.prototype,{markerClassName:"hasDatepicker",maxRows:4,_widgetDatepicker:function(){return this.dpDiv},setDefaults:function(t){return a(this._defaults,t||{}),this},_attachDatepicker:function(e,i){var s,n,o;s=e.nodeName.toLowerCase(),n="div"===s||"span"===s,e.id||(this.uuid+=1,e.id="dp"+this.uuid),o=this._newInst(t(e),n),o.settings=t.extend({},i||{}),"input"===s?this._connectDatepicker(e,o):n&&this._inlineDatepicker(e,o)},_newInst:function(e,i){var s=e[0].id.replace(/([^A-Za-z0-9_\-])/g,"\\\\$1");return{id:s,input:e,selectedDay:0,selectedMonth:0,selectedYear:0,drawMonth:0,drawYear:0,inline:i,dpDiv:i?n(t("<div class='"+this._inlineClass+" ui-datepicker ui-widget ui-widget-content ui-helper-clearfix ui-corner-all'></div>")):this.dpDiv}},_connectDatepicker:function(e,i){var s=t(e);i.append=t([]),i.trigger=t([]),s.hasClass(this.markerClassName)||(this._attachments(s,i),s.addClass(this.markerClassName).on("keydown",this._doKeyDown).on("keypress",this._doKeyPress).on("keyup",this._doKeyUp),this._autoSize(i),t.data(e,"datepicker",i),i.settings.disabled&&this._disableDatepicker(e))},_attachments:function(e,i){var s,n,o,a=this._get(i,"appendText"),r=this._get(i,"isRTL");i.append&&i.append.remove(),a&&(i.append=t("<span class='"+this._appendClass+"'>"+a+"</span>"),e[r?"before":"after"](i.append)),e.off("focus",this._showDatepicker),i.trigger&&i.trigger.remove(),s=this._get(i,"showOn"),("focus"===s||"both"===s)&&e.on("focus",this._showDatepicker),("button"===s||"both"===s)&&(n=this._get(i,"buttonText"),o=this._get(i,"buttonImage"),i.trigger=t(this._get(i,"buttonImageOnly")?t("<img/>").addClass(this._triggerClass).attr({src:o,alt:n,title:n}):t("<button type='button'></button>").addClass(this._triggerClass).html(o?t("<img/>").attr({src:o,alt:n,title:n}):n)),e[r?"before":"after"](i.trigger),i.trigger.on("click",function(){return t.datepicker._datepickerShowing&&t.datepicker._lastInput===e[0]?t.datepicker._hideDatepicker():t.datepicker._datepickerShowing&&t.datepicker._lastInput!==e[0]?(t.datepicker._hideDatepicker(),t.datepicker._showDatepicker(e[0])):t.datepicker._showDatepicker(e[0]),!1}))},_autoSize:function(t){if(this._get(t,"autoSize")&&!t.inline){var e,i,s,n,o=new Date(2009,11,20),a=this._get(t,"dateFormat");a.match(/[DM]/)&&(e=function(t){for(i=0,s=0,n=0;t.length>n;n++)t[n].length>i&&(i=t[n].length,s=n);return s},o.setMonth(e(this._get(t,a.match(/MM/)?"monthNames":"monthNamesShort"))),o.setDate(e(this._get(t,a.match(/DD/)?"dayNames":"dayNamesShort"))+20-o.getDay())),t.input.attr("size",this._formatDate(t,o).length)}},_inlineDatepicker:function(e,i){var s=t(e);s.hasClass(this.markerClassName)||(s.addClass(this.markerClassName).append(i.dpDiv),t.data(e,"datepicker",i),this._setDate(i,this._getDefaultDate(i),!0),this._updateDatepicker(i),this._updateAlternate(i),i.settings.disabled&&this._disableDatepicker(e),i.dpDiv.css("display","block"))},_dialogDatepicker:function(e,i,s,n,o){var r,h,l,c,u,d=this._dialogInst;return d||(this.uuid+=1,r="dp"+this.uuid,this._dialogInput=t("<input type='text' id='"+r+"' style='position: absolute; top: -100px; width: 0px;'/>"),this._dialogInput.on("keydown",this._doKeyDown),t("body").append(this._dialogInput),d=this._dialogInst=this._newInst(this._dialogInput,!1),d.settings={},t.data(this._dialogInput[0],"datepicker",d)),a(d.settings,n||{}),i=i&&i.constructor===Date?this._formatDate(d,i):i,this._dialogInput.val(i),this._pos=o?o.length?o:[o.pageX,o.pageY]:null,this._pos||(h=document.documentElement.clientWidth,l=document.documentElement.clientHeight,c=document.documentElement.scrollLeft||document.body.scrollLeft,u=document.documentElement.scrollTop||document.body.scrollTop,this._pos=[h/2-100+c,l/2-150+u]),this._dialogInput.css("left",this._pos[0]+20+"px").css("top",this._pos[1]+"px"),d.settings.onSelect=s,this._inDialog=!0,this.dpDiv.addClass(this._dialogClass),this._showDatepicker(this._dialogInput[0]),t.blockUI&&t.blockUI(this.dpDiv),t.data(this._dialogInput[0],"datepicker",d),this},_destroyDatepicker:function(e){var i,s=t(e),n=t.data(e,"datepicker");s.hasClass(this.markerClassName)&&(i=e.nodeName.toLowerCase(),t.removeData(e,"datepicker"),"input"===i?(n.append.remove(),n.trigger.remove(),s.removeClass(this.markerClassName).off("focus",this._showDatepicker).off("keydown",this._doKeyDown).off("keypress",this._doKeyPress).off("keyup",this._doKeyUp)):("div"===i||"span"===i)&&s.removeClass(this.markerClassName).empty(),m===n&&(m=null))},_enableDatepicker:function(e){var i,s,n=t(e),o=t.data(e,"datepicker");n.hasClass(this.markerClassName)&&(i=e.nodeName.toLowerCase(),"input"===i?(e.disabled=!1,o.trigger.filter("button").each(function(){this.disabled=!1}).end().filter("img").css({opacity:"1.0",cursor:""})):("div"===i||"span"===i)&&(s=n.children("."+this._inlineClass),s.children().removeClass("ui-state-disabled"),s.find("select.ui-datepicker-month, select.ui-datepicker-year").prop("disabled",!1)),this._disabledInputs=t.map(this._disabledInputs,function(t){return t===e?null:t}))},_disableDatepicker:function(e){var i,s,n=t(e),o=t.data(e,"datepicker");n.hasClass(this.markerClassName)&&(i=e.nodeName.toLowerCase(),"input"===i?(e.disabled=!0,o.trigger.filter("button").each(function(){this.disabled=!0}).end().filter("img").css({opacity:"0.5",cursor:"default"})):("div"===i||"span"===i)&&(s=n.children("."+this._inlineClass),s.children().addClass("ui-state-disabled"),s.find("select.ui-datepicker-month, select.ui-datepicker-year").prop("disabled",!0)),this._disabledInputs=t.map(this._disabledInputs,function(t){return t===e?null:t}),this._disabledInputs[this._disabledInputs.length]=e)},_isDisabledDatepicker:function(t){if(!t)return!1;for(var e=0;this._disabledInputs.length>e;e++)if(this._disabledInputs[e]===t)return!0;return!1},_getInst:function(e){try{return t.data(e,"datepicker")}catch(i){throw"Missing instance data for this datepicker"}},_optionDatepicker:function(e,i,s){var n,o,r,h,l=this._getInst(e);return 2===arguments.length&&"string"==typeof i?"defaults"===i?t.extend({},t.datepicker._defaults):l?"all"===i?t.extend({},l.settings):this._get(l,i):null:(n=i||{},"string"==typeof i&&(n={},n[i]=s),l&&(this._curInst===l&&this._hideDatepicker(),o=this._getDateDatepicker(e,!0),r=this._getMinMaxDate(l,"min"),h=this._getMinMaxDate(l,"max"),a(l.settings,n),null!==r&&void 0!==n.dateFormat&&void 0===n.minDate&&(l.settings.minDate=this._formatDate(l,r)),null!==h&&void 0!==n.dateFormat&&void 0===n.maxDate&&(l.settings.maxDate=this._formatDate(l,h)),"disabled"in n&&(n.disabled?this._disableDatepicker(e):this._enableDatepicker(e)),this._attachments(t(e),l),this._autoSize(l),this._setDate(l,o),this._updateAlternate(l),this._updateDatepicker(l)),void 0)},_changeDatepicker:function(t,e,i){this._optionDatepicker(t,e,i)},_refreshDatepicker:function(t){var e=this._getInst(t);e&&this._updateDatepicker(e)},_setDateDatepicker:function(t,e){var i=this._getInst(t);i&&(this._setDate(i,e),this._updateDatepicker(i),this._updateAlternate(i))},_getDateDatepicker:function(t,e){var i=this._getInst(t);return i&&!i.inline&&this._setDateFromField(i,e),i?this._getDate(i):null},_doKeyDown:function(e){var i,s,n,o=t.datepicker._getInst(e.target),a=!0,r=o.dpDiv.is(".ui-datepicker-rtl");if(o._keyEvent=!0,t.datepicker._datepickerShowing)switch(e.keyCode){case 9:t.datepicker._hideDatepicker(),a=!1;break;case 13:return n=t("td."+t.datepicker._dayOverClass+":not(."+t.datepicker._currentClass+")",o.dpDiv),n[0]&&t.datepicker._selectDay(e.target,o.selectedMonth,o.selectedYear,n[0]),i=t.datepicker._get(o,"onSelect"),i?(s=t.datepicker._formatDate(o),i.apply(o.input?o.input[0]:null,[s,o])):t.datepicker._hideDatepicker(),!1;case 27:t.datepicker._hideDatepicker();break;case 33:t.datepicker._adjustDate(e.target,e.ctrlKey?-t.datepicker._get(o,"stepBigMonths"):-t.datepicker._get(o,"stepMonths"),"M");break;case 34:t.datepicker._adjustDate(e.target,e.ctrlKey?+t.datepicker._get(o,"stepBigMonths"):+t.datepicker._get(o,"stepMonths"),"M");break;case 35:(e.ctrlKey||e.metaKey)&&t.datepicker._clearDate(e.target),a=e.ctrlKey||e.metaKey;break;case 36:(e.ctrlKey||e.metaKey)&&t.datepicker._gotoToday(e.target),a=e.ctrlKey||e.metaKey;break;case 37:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,r?1:-1,"D"),a=e.ctrlKey||e.metaKey,e.originalEvent.altKey&&t.datepicker._adjustDate(e.target,e.ctrlKey?-t.datepicker._get(o,"stepBigMonths"):-t.datepicker._get(o,"stepMonths"),"M");break;case 38:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,-7,"D"),a=e.ctrlKey||e.metaKey;break;case 39:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,r?-1:1,"D"),a=e.ctrlKey||e.metaKey,e.originalEvent.altKey&&t.datepicker._adjustDate(e.target,e.ctrlKey?+t.datepicker._get(o,"stepBigMonths"):+t.datepicker._get(o,"stepMonths"),"M");break;case 40:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,7,"D"),a=e.ctrlKey||e.metaKey;break;default:a=!1}else 36===e.keyCode&&e.ctrlKey?t.datepicker._showDatepicker(this):a=!1;a&&(e.preventDefault(),e.stopPropagation())},_doKeyPress:function(e){var i,s,n=t.datepicker._getInst(e.target);return t.datepicker._get(n,"constrainInput")?(i=t.datepicker._possibleChars(t.datepicker._get(n,"dateFormat")),s=String.fromCharCode(null==e.charCode?e.keyCode:e.charCode),e.ctrlKey||e.metaKey||" ">s||!i||i.indexOf(s)>-1):void 0},_doKeyUp:function(e){var i,s=t.datepicker._getInst(e.target);if(s.input.val()!==s.lastVal)try{i=t.datepicker.parseDate(t.datepicker._get(s,"dateFormat"),s.input?s.input.val():null,t.datepicker._getFormatConfig(s)),i&&(t.datepicker._setDateFromField(s),t.datepicker._updateAlternate(s),t.datepicker._updateDatepicker(s))}catch(n){}return!0},_showDatepicker:function(e){if(e=e.target||e,"input"!==e.nodeName.toLowerCase()&&(e=t("input",e.parentNode)[0]),!t.datepicker._isDisabledDatepicker(e)&&t.datepicker._lastInput!==e){var s,n,o,r,h,l,c;s=t.datepicker._getInst(e),t.datepicker._curInst&&t.datepicker._curInst!==s&&(t.datepicker._curInst.dpDiv.stop(!0,!0),s&&t.datepicker._datepickerShowing&&t.datepicker._hideDatepicker(t.datepicker._curInst.input[0])),n=t.datepicker._get(s,"beforeShow"),o=n?n.apply(e,[e,s]):{},o!==!1&&(a(s.settings,o),s.lastVal=null,t.datepicker._lastInput=e,t.datepicker._setDateFromField(s),t.datepicker._inDialog&&(e.value=""),t.datepicker._pos||(t.datepicker._pos=t.datepicker._findPos(e),t.datepicker._pos[1]+=e.offsetHeight),r=!1,t(e).parents().each(function(){return r|="fixed"===t(this).css("position"),!r}),h={left:t.datepicker._pos[0],top:t.datepicker._pos[1]},t.datepicker._pos=null,s.dpDiv.empty(),s.dpDiv.css({position:"absolute",display:"block",top:"-1000px"}),t.datepicker._updateDatepicker(s),h=t.datepicker._checkOffset(s,h,r),s.dpDiv.css({position:t.datepicker._inDialog&&t.blockUI?"static":r?"fixed":"absolute",display:"none",left:h.left+"px",top:h.top+"px"}),s.inline||(l=t.datepicker._get(s,"showAnim"),c=t.datepicker._get(s,"duration"),s.dpDiv.css("z-index",i(t(e))+1),t.datepicker._datepickerShowing=!0,t.effects&&t.effects.effect[l]?s.dpDiv.show(l,t.datepicker._get(s,"showOptions"),c):s.dpDiv[l||"show"](l?c:null),t.datepicker._shouldFocusInput(s)&&s.input.trigger("focus"),t.datepicker._curInst=s))
-}},_updateDatepicker:function(e){this.maxRows=4,m=e,e.dpDiv.empty().append(this._generateHTML(e)),this._attachHandlers(e);var i,s=this._getNumberOfMonths(e),n=s[1],a=17,r=e.dpDiv.find("."+this._dayOverClass+" a");r.length>0&&o.apply(r.get(0)),e.dpDiv.removeClass("ui-datepicker-multi-2 ui-datepicker-multi-3 ui-datepicker-multi-4").width(""),n>1&&e.dpDiv.addClass("ui-datepicker-multi-"+n).css("width",a*n+"em"),e.dpDiv[(1!==s[0]||1!==s[1]?"add":"remove")+"Class"]("ui-datepicker-multi"),e.dpDiv[(this._get(e,"isRTL")?"add":"remove")+"Class"]("ui-datepicker-rtl"),e===t.datepicker._curInst&&t.datepicker._datepickerShowing&&t.datepicker._shouldFocusInput(e)&&e.input.trigger("focus"),e.yearshtml&&(i=e.yearshtml,setTimeout(function(){i===e.yearshtml&&e.yearshtml&&e.dpDiv.find("select.ui-datepicker-year:first").replaceWith(e.yearshtml),i=e.yearshtml=null},0))},_shouldFocusInput:function(t){return t.input&&t.input.is(":visible")&&!t.input.is(":disabled")&&!t.input.is(":focus")},_checkOffset:function(e,i,s){var n=e.dpDiv.outerWidth(),o=e.dpDiv.outerHeight(),a=e.input?e.input.outerWidth():0,r=e.input?e.input.outerHeight():0,h=document.documentElement.clientWidth+(s?0:t(document).scrollLeft()),l=document.documentElement.clientHeight+(s?0:t(document).scrollTop());return i.left-=this._get(e,"isRTL")?n-a:0,i.left-=s&&i.left===e.input.offset().left?t(document).scrollLeft():0,i.top-=s&&i.top===e.input.offset().top+r?t(document).scrollTop():0,i.left-=Math.min(i.left,i.left+n>h&&h>n?Math.abs(i.left+n-h):0),i.top-=Math.min(i.top,i.top+o>l&&l>o?Math.abs(o+r):0),i},_findPos:function(e){for(var i,s=this._getInst(e),n=this._get(s,"isRTL");e&&("hidden"===e.type||1!==e.nodeType||t.expr.filters.hidden(e));)e=e[n?"previousSibling":"nextSibling"];return i=t(e).offset(),[i.left,i.top]},_hideDatepicker:function(e){var i,s,n,o,a=this._curInst;!a||e&&a!==t.data(e,"datepicker")||this._datepickerShowing&&(i=this._get(a,"showAnim"),s=this._get(a,"duration"),n=function(){t.datepicker._tidyDialog(a)},t.effects&&(t.effects.effect[i]||t.effects[i])?a.dpDiv.hide(i,t.datepicker._get(a,"showOptions"),s,n):a.dpDiv["slideDown"===i?"slideUp":"fadeIn"===i?"fadeOut":"hide"](i?s:null,n),i||n(),this._datepickerShowing=!1,o=this._get(a,"onClose"),o&&o.apply(a.input?a.input[0]:null,[a.input?a.input.val():"",a]),this._lastInput=null,this._inDialog&&(this._dialogInput.css({position:"absolute",left:"0",top:"-100px"}),t.blockUI&&(t.unblockUI(),t("body").append(this.dpDiv))),this._inDialog=!1)},_tidyDialog:function(t){t.dpDiv.removeClass(this._dialogClass).off(".ui-datepicker-calendar")},_checkExternalClick:function(e){if(t.datepicker._curInst){var i=t(e.target),s=t.datepicker._getInst(i[0]);(i[0].id!==t.datepicker._mainDivId&&0===i.parents("#"+t.datepicker._mainDivId).length&&!i.hasClass(t.datepicker.markerClassName)&&!i.closest("."+t.datepicker._triggerClass).length&&t.datepicker._datepickerShowing&&(!t.datepicker._inDialog||!t.blockUI)||i.hasClass(t.datepicker.markerClassName)&&t.datepicker._curInst!==s)&&t.datepicker._hideDatepicker()}},_adjustDate:function(e,i,s){var n=t(e),o=this._getInst(n[0]);this._isDisabledDatepicker(n[0])||(this._adjustInstDate(o,i+("M"===s?this._get(o,"showCurrentAtPos"):0),s),this._updateDatepicker(o))},_gotoToday:function(e){var i,s=t(e),n=this._getInst(s[0]);this._get(n,"gotoCurrent")&&n.currentDay?(n.selectedDay=n.currentDay,n.drawMonth=n.selectedMonth=n.currentMonth,n.drawYear=n.selectedYear=n.currentYear):(i=new Date,n.selectedDay=i.getDate(),n.drawMonth=n.selectedMonth=i.getMonth(),n.drawYear=n.selectedYear=i.getFullYear()),this._notifyChange(n),this._adjustDate(s)},_selectMonthYear:function(e,i,s){var n=t(e),o=this._getInst(n[0]);o["selected"+("M"===s?"Month":"Year")]=o["draw"+("M"===s?"Month":"Year")]=parseInt(i.options[i.selectedIndex].value,10),this._notifyChange(o),this._adjustDate(n)},_selectDay:function(e,i,s,n){var o,a=t(e);t(n).hasClass(this._unselectableClass)||this._isDisabledDatepicker(a[0])||(o=this._getInst(a[0]),o.selectedDay=o.currentDay=t("a",n).html(),o.selectedMonth=o.currentMonth=i,o.selectedYear=o.currentYear=s,this._selectDate(e,this._formatDate(o,o.currentDay,o.currentMonth,o.currentYear)))},_clearDate:function(e){var i=t(e);this._selectDate(i,"")},_selectDate:function(e,i){var s,n=t(e),o=this._getInst(n[0]);i=null!=i?i:this._formatDate(o),o.input&&o.input.val(i),this._updateAlternate(o),s=this._get(o,"onSelect"),s?s.apply(o.input?o.input[0]:null,[i,o]):o.input&&o.input.trigger("change"),o.inline?this._updateDatepicker(o):(this._hideDatepicker(),this._lastInput=o.input[0],"object"!=typeof o.input[0]&&o.input.trigger("focus"),this._lastInput=null)},_updateAlternate:function(e){var i,s,n,o=this._get(e,"altField");o&&(i=this._get(e,"altFormat")||this._get(e,"dateFormat"),s=this._getDate(e),n=this.formatDate(i,s,this._getFormatConfig(e)),t(o).val(n))},noWeekends:function(t){var e=t.getDay();return[e>0&&6>e,""]},iso8601Week:function(t){var e,i=new Date(t.getTime());return i.setDate(i.getDate()+4-(i.getDay()||7)),e=i.getTime(),i.setMonth(0),i.setDate(1),Math.floor(Math.round((e-i)/864e5)/7)+1},parseDate:function(e,i,s){if(null==e||null==i)throw"Invalid arguments";if(i="object"==typeof i?""+i:i+"",""===i)return null;var n,o,a,r,h=0,l=(s?s.shortYearCutoff:null)||this._defaults.shortYearCutoff,c="string"!=typeof l?l:(new Date).getFullYear()%100+parseInt(l,10),u=(s?s.dayNamesShort:null)||this._defaults.dayNamesShort,d=(s?s.dayNames:null)||this._defaults.dayNames,p=(s?s.monthNamesShort:null)||this._defaults.monthNamesShort,f=(s?s.monthNames:null)||this._defaults.monthNames,g=-1,m=-1,_=-1,v=-1,b=!1,y=function(t){var i=e.length>n+1&&e.charAt(n+1)===t;return i&&n++,i},w=function(t){var e=y(t),s="@"===t?14:"!"===t?20:"y"===t&&e?4:"o"===t?3:2,n="y"===t?s:1,o=RegExp("^\\d{"+n+","+s+"}"),a=i.substring(h).match(o);if(!a)throw"Missing number at position "+h;return h+=a[0].length,parseInt(a[0],10)},k=function(e,s,n){var o=-1,a=t.map(y(e)?n:s,function(t,e){return[[e,t]]}).sort(function(t,e){return-(t[1].length-e[1].length)});if(t.each(a,function(t,e){var s=e[1];return i.substr(h,s.length).toLowerCase()===s.toLowerCase()?(o=e[0],h+=s.length,!1):void 0}),-1!==o)return o+1;throw"Unknown name at position "+h},x=function(){if(i.charAt(h)!==e.charAt(n))throw"Unexpected literal at position "+h;h++};for(n=0;e.length>n;n++)if(b)"'"!==e.charAt(n)||y("'")?x():b=!1;else switch(e.charAt(n)){case"d":_=w("d");break;case"D":k("D",u,d);break;case"o":v=w("o");break;case"m":m=w("m");break;case"M":m=k("M",p,f);break;case"y":g=w("y");break;case"@":r=new Date(w("@")),g=r.getFullYear(),m=r.getMonth()+1,_=r.getDate();break;case"!":r=new Date((w("!")-this._ticksTo1970)/1e4),g=r.getFullYear(),m=r.getMonth()+1,_=r.getDate();break;case"'":y("'")?x():b=!0;break;default:x()}if(i.length>h&&(a=i.substr(h),!/^\s+/.test(a)))throw"Extra/unparsed characters found in date: "+a;if(-1===g?g=(new Date).getFullYear():100>g&&(g+=(new Date).getFullYear()-(new Date).getFullYear()%100+(c>=g?0:-100)),v>-1)for(m=1,_=v;;){if(o=this._getDaysInMonth(g,m-1),o>=_)break;m++,_-=o}if(r=this._daylightSavingAdjust(new Date(g,m-1,_)),r.getFullYear()!==g||r.getMonth()+1!==m||r.getDate()!==_)throw"Invalid date";return r},ATOM:"yy-mm-dd",COOKIE:"D, dd M yy",ISO_8601:"yy-mm-dd",RFC_822:"D, d M y",RFC_850:"DD, dd-M-y",RFC_1036:"D, d M y",RFC_1123:"D, d M yy",RFC_2822:"D, d M yy",RSS:"D, d M y",TICKS:"!",TIMESTAMP:"@",W3C:"yy-mm-dd",_ticksTo1970:1e7*60*60*24*(718685+Math.floor(492.5)-Math.floor(19.7)+Math.floor(4.925)),formatDate:function(t,e,i){if(!e)return"";var s,n=(i?i.dayNamesShort:null)||this._defaults.dayNamesShort,o=(i?i.dayNames:null)||this._defaults.dayNames,a=(i?i.monthNamesShort:null)||this._defaults.monthNamesShort,r=(i?i.monthNames:null)||this._defaults.monthNames,h=function(e){var i=t.length>s+1&&t.charAt(s+1)===e;return i&&s++,i},l=function(t,e,i){var s=""+e;if(h(t))for(;i>s.length;)s="0"+s;return s},c=function(t,e,i,s){return h(t)?s[e]:i[e]},u="",d=!1;if(e)for(s=0;t.length>s;s++)if(d)"'"!==t.charAt(s)||h("'")?u+=t.charAt(s):d=!1;else switch(t.charAt(s)){case"d":u+=l("d",e.getDate(),2);break;case"D":u+=c("D",e.getDay(),n,o);break;case"o":u+=l("o",Math.round((new Date(e.getFullYear(),e.getMonth(),e.getDate()).getTime()-new Date(e.getFullYear(),0,0).getTime())/864e5),3);break;case"m":u+=l("m",e.getMonth()+1,2);break;case"M":u+=c("M",e.getMonth(),a,r);break;case"y":u+=h("y")?e.getFullYear():(10>e.getFullYear()%100?"0":"")+e.getFullYear()%100;break;case"@":u+=e.getTime();break;case"!":u+=1e4*e.getTime()+this._ticksTo1970;break;case"'":h("'")?u+="'":d=!0;break;default:u+=t.charAt(s)}return u},_possibleChars:function(t){var e,i="",s=!1,n=function(i){var s=t.length>e+1&&t.charAt(e+1)===i;return s&&e++,s};for(e=0;t.length>e;e++)if(s)"'"!==t.charAt(e)||n("'")?i+=t.charAt(e):s=!1;else switch(t.charAt(e)){case"d":case"m":case"y":case"@":i+="0123456789";break;case"D":case"M":return null;case"'":n("'")?i+="'":s=!0;break;default:i+=t.charAt(e)}return i},_get:function(t,e){return void 0!==t.settings[e]?t.settings[e]:this._defaults[e]},_setDateFromField:function(t,e){if(t.input.val()!==t.lastVal){var i=this._get(t,"dateFormat"),s=t.lastVal=t.input?t.input.val():null,n=this._getDefaultDate(t),o=n,a=this._getFormatConfig(t);try{o=this.parseDate(i,s,a)||n}catch(r){s=e?"":s}t.selectedDay=o.getDate(),t.drawMonth=t.selectedMonth=o.getMonth(),t.drawYear=t.selectedYear=o.getFullYear(),t.currentDay=s?o.getDate():0,t.currentMonth=s?o.getMonth():0,t.currentYear=s?o.getFullYear():0,this._adjustInstDate(t)}},_getDefaultDate:function(t){return this._restrictMinMax(t,this._determineDate(t,this._get(t,"defaultDate"),new Date))},_determineDate:function(e,i,s){var n=function(t){var e=new Date;return e.setDate(e.getDate()+t),e},o=function(i){try{return t.datepicker.parseDate(t.datepicker._get(e,"dateFormat"),i,t.datepicker._getFormatConfig(e))}catch(s){}for(var n=(i.toLowerCase().match(/^c/)?t.datepicker._getDate(e):null)||new Date,o=n.getFullYear(),a=n.getMonth(),r=n.getDate(),h=/([+\-]?[0-9]+)\s*(d|D|w|W|m|M|y|Y)?/g,l=h.exec(i);l;){switch(l[2]||"d"){case"d":case"D":r+=parseInt(l[1],10);break;case"w":case"W":r+=7*parseInt(l[1],10);break;case"m":case"M":a+=parseInt(l[1],10),r=Math.min(r,t.datepicker._getDaysInMonth(o,a));break;case"y":case"Y":o+=parseInt(l[1],10),r=Math.min(r,t.datepicker._getDaysInMonth(o,a))}l=h.exec(i)}return new Date(o,a,r)},a=null==i||""===i?s:"string"==typeof i?o(i):"number"==typeof i?isNaN(i)?s:n(i):new Date(i.getTime());return a=a&&"Invalid Date"==""+a?s:a,a&&(a.setHours(0),a.setMinutes(0),a.setSeconds(0),a.setMilliseconds(0)),this._daylightSavingAdjust(a)},_daylightSavingAdjust:function(t){return t?(t.setHours(t.getHours()>12?t.getHours()+2:0),t):null},_setDate:function(t,e,i){var s=!e,n=t.selectedMonth,o=t.selectedYear,a=this._restrictMinMax(t,this._determineDate(t,e,new Date));t.selectedDay=t.currentDay=a.getDate(),t.drawMonth=t.selectedMonth=t.currentMonth=a.getMonth(),t.drawYear=t.selectedYear=t.currentYear=a.getFullYear(),n===t.selectedMonth&&o===t.selectedYear||i||this._notifyChange(t),this._adjustInstDate(t),t.input&&t.input.val(s?"":this._formatDate(t))},_getDate:function(t){var e=!t.currentYear||t.input&&""===t.input.val()?null:this._daylightSavingAdjust(new Date(t.currentYear,t.currentMonth,t.currentDay));return e},_attachHandlers:function(e){var i=this._get(e,"stepMonths"),s="#"+e.id.replace(/\\\\/g,"\\");e.dpDiv.find("[data-handler]").map(function(){var e={prev:function(){t.datepicker._adjustDate(s,-i,"M")},next:function(){t.datepicker._adjustDate(s,+i,"M")},hide:function(){t.datepicker._hideDatepicker()},today:function(){t.datepicker._gotoToday(s)},selectDay:function(){return t.datepicker._selectDay(s,+this.getAttribute("data-month"),+this.getAttribute("data-year"),this),!1},selectMonth:function(){return t.datepicker._selectMonthYear(s,this,"M"),!1},selectYear:function(){return t.datepicker._selectMonthYear(s,this,"Y"),!1}};t(this).on(this.getAttribute("data-event"),e[this.getAttribute("data-handler")])})},_generateHTML:function(t){var e,i,s,n,o,a,r,h,l,c,u,d,p,f,g,m,_,v,b,y,w,k,x,C,D,I,T,P,M,S,H,z,O,A,N,W,E,F,L,R=new Date,B=this._daylightSavingAdjust(new Date(R.getFullYear(),R.getMonth(),R.getDate())),Y=this._get(t,"isRTL"),j=this._get(t,"showButtonPanel"),q=this._get(t,"hideIfNoPrevNext"),K=this._get(t,"navigationAsDateFormat"),U=this._getNumberOfMonths(t),V=this._get(t,"showCurrentAtPos"),$=this._get(t,"stepMonths"),X=1!==U[0]||1!==U[1],G=this._daylightSavingAdjust(t.currentDay?new Date(t.currentYear,t.currentMonth,t.currentDay):new Date(9999,9,9)),Q=this._getMinMaxDate(t,"min"),J=this._getMinMaxDate(t,"max"),Z=t.drawMonth-V,te=t.drawYear;if(0>Z&&(Z+=12,te--),J)for(e=this._daylightSavingAdjust(new Date(J.getFullYear(),J.getMonth()-U[0]*U[1]+1,J.getDate())),e=Q&&Q>e?Q:e;this._daylightSavingAdjust(new Date(te,Z,1))>e;)Z--,0>Z&&(Z=11,te--);for(t.drawMonth=Z,t.drawYear=te,i=this._get(t,"prevText"),i=K?this.formatDate(i,this._daylightSavingAdjust(new Date(te,Z-$,1)),this._getFormatConfig(t)):i,s=this._canAdjustMonth(t,-1,te,Z)?"<a class='ui-datepicker-prev ui-corner-all' data-handler='prev' data-event='click' title='"+i+"'><span class='ui-icon ui-icon-circle-triangle-"+(Y?"e":"w")+"'>"+i+"</span></a>":q?"":"<a class='ui-datepicker-prev ui-corner-all ui-state-disabled' title='"+i+"'><span class='ui-icon ui-icon-circle-triangle-"+(Y?"e":"w")+"'>"+i+"</span></a>",n=this._get(t,"nextText"),n=K?this.formatDate(n,this._daylightSavingAdjust(new Date(te,Z+$,1)),this._getFormatConfig(t)):n,o=this._canAdjustMonth(t,1,te,Z)?"<a class='ui-datepicker-next ui-corner-all' data-handler='next' data-event='click' title='"+n+"'><span class='ui-icon ui-icon-circle-triangle-"+(Y?"w":"e")+"'>"+n+"</span></a>":q?"":"<a class='ui-datepicker-next ui-corner-all ui-state-disabled' title='"+n+"'><span class='ui-icon ui-icon-circle-triangle-"+(Y?"w":"e")+"'>"+n+"</span></a>",a=this._get(t,"currentText"),r=this._get(t,"gotoCurrent")&&t.currentDay?G:B,a=K?this.formatDate(a,r,this._getFormatConfig(t)):a,h=t.inline?"":"<button type='button' class='ui-datepicker-close ui-state-default ui-priority-primary ui-corner-all' data-handler='hide' data-event='click'>"+this._get(t,"closeText")+"</button>",l=j?"<div class='ui-datepicker-buttonpane ui-widget-content'>"+(Y?h:"")+(this._isInRange(t,r)?"<button type='button' class='ui-datepicker-current ui-state-default ui-priority-secondary ui-corner-all' data-handler='today' data-event='click'>"+a+"</button>":"")+(Y?"":h)+"</div>":"",c=parseInt(this._get(t,"firstDay"),10),c=isNaN(c)?0:c,u=this._get(t,"showWeek"),d=this._get(t,"dayNames"),p=this._get(t,"dayNamesMin"),f=this._get(t,"monthNames"),g=this._get(t,"monthNamesShort"),m=this._get(t,"beforeShowDay"),_=this._get(t,"showOtherMonths"),v=this._get(t,"selectOtherMonths"),b=this._getDefaultDate(t),y="",k=0;U[0]>k;k++){for(x="",this.maxRows=4,C=0;U[1]>C;C++){if(D=this._daylightSavingAdjust(new Date(te,Z,t.selectedDay)),I=" ui-corner-all",T="",X){if(T+="<div class='ui-datepicker-group",U[1]>1)switch(C){case 0:T+=" ui-datepicker-group-first",I=" ui-corner-"+(Y?"right":"left");break;case U[1]-1:T+=" ui-datepicker-group-last",I=" ui-corner-"+(Y?"left":"right");break;default:T+=" ui-datepicker-group-middle",I=""}T+="'>"}for(T+="<div class='ui-datepicker-header ui-widget-header ui-helper-clearfix"+I+"'>"+(/all|left/.test(I)&&0===k?Y?o:s:"")+(/all|right/.test(I)&&0===k?Y?s:o:"")+this._generateMonthYearHeader(t,Z,te,Q,J,k>0||C>0,f,g)+"</div><table class='ui-datepicker-calendar'><thead>"+"<tr>",P=u?"<th class='ui-datepicker-week-col'>"+this._get(t,"weekHeader")+"</th>":"",w=0;7>w;w++)M=(w+c)%7,P+="<th scope='col'"+((w+c+6)%7>=5?" class='ui-datepicker-week-end'":"")+">"+"<span title='"+d[M]+"'>"+p[M]+"</span></th>";for(T+=P+"</tr></thead><tbody>",S=this._getDaysInMonth(te,Z),te===t.selectedYear&&Z===t.selectedMonth&&(t.selectedDay=Math.min(t.selectedDay,S)),H=(this._getFirstDayOfMonth(te,Z)-c+7)%7,z=Math.ceil((H+S)/7),O=X?this.maxRows>z?this.maxRows:z:z,this.maxRows=O,A=this._daylightSavingAdjust(new Date(te,Z,1-H)),N=0;O>N;N++){for(T+="<tr>",W=u?"<td class='ui-datepicker-week-col'>"+this._get(t,"calculateWeek")(A)+"</td>":"",w=0;7>w;w++)E=m?m.apply(t.input?t.input[0]:null,[A]):[!0,""],F=A.getMonth()!==Z,L=F&&!v||!E[0]||Q&&Q>A||J&&A>J,W+="<td class='"+((w+c+6)%7>=5?" ui-datepicker-week-end":"")+(F?" ui-datepicker-other-month":"")+(A.getTime()===D.getTime()&&Z===t.selectedMonth&&t._keyEvent||b.getTime()===A.getTime()&&b.getTime()===D.getTime()?" "+this._dayOverClass:"")+(L?" "+this._unselectableClass+" ui-state-disabled":"")+(F&&!_?"":" "+E[1]+(A.getTime()===G.getTime()?" "+this._currentClass:"")+(A.getTime()===B.getTime()?" ui-datepicker-today":""))+"'"+(F&&!_||!E[2]?"":" title='"+E[2].replace(/'/g,"&#39;")+"'")+(L?"":" data-handler='selectDay' data-event='click' data-month='"+A.getMonth()+"' data-year='"+A.getFullYear()+"'")+">"+(F&&!_?"&#xa0;":L?"<span class='ui-state-default'>"+A.getDate()+"</span>":"<a class='ui-state-default"+(A.getTime()===B.getTime()?" ui-state-highlight":"")+(A.getTime()===G.getTime()?" ui-state-active":"")+(F?" ui-priority-secondary":"")+"' href='#'>"+A.getDate()+"</a>")+"</td>",A.setDate(A.getDate()+1),A=this._daylightSavingAdjust(A);T+=W+"</tr>"}Z++,Z>11&&(Z=0,te++),T+="</tbody></table>"+(X?"</div>"+(U[0]>0&&C===U[1]-1?"<div class='ui-datepicker-row-break'></div>":""):""),x+=T}y+=x}return y+=l,t._keyEvent=!1,y},_generateMonthYearHeader:function(t,e,i,s,n,o,a,r){var h,l,c,u,d,p,f,g,m=this._get(t,"changeMonth"),_=this._get(t,"changeYear"),v=this._get(t,"showMonthAfterYear"),b="<div class='ui-datepicker-title'>",y="";if(o||!m)y+="<span class='ui-datepicker-month'>"+a[e]+"</span>";else{for(h=s&&s.getFullYear()===i,l=n&&n.getFullYear()===i,y+="<select class='ui-datepicker-month' data-handler='selectMonth' data-event='change'>",c=0;12>c;c++)(!h||c>=s.getMonth())&&(!l||n.getMonth()>=c)&&(y+="<option value='"+c+"'"+(c===e?" selected='selected'":"")+">"+r[c]+"</option>");y+="</select>"}if(v||(b+=y+(!o&&m&&_?"":"&#xa0;")),!t.yearshtml)if(t.yearshtml="",o||!_)b+="<span class='ui-datepicker-year'>"+i+"</span>";else{for(u=this._get(t,"yearRange").split(":"),d=(new Date).getFullYear(),p=function(t){var e=t.match(/c[+\-].*/)?i+parseInt(t.substring(1),10):t.match(/[+\-].*/)?d+parseInt(t,10):parseInt(t,10);return isNaN(e)?d:e},f=p(u[0]),g=Math.max(f,p(u[1]||"")),f=s?Math.max(f,s.getFullYear()):f,g=n?Math.min(g,n.getFullYear()):g,t.yearshtml+="<select class='ui-datepicker-year' data-handler='selectYear' data-event='change'>";g>=f;f++)t.yearshtml+="<option value='"+f+"'"+(f===i?" selected='selected'":"")+">"+f+"</option>";t.yearshtml+="</select>",b+=t.yearshtml,t.yearshtml=null}return b+=this._get(t,"yearSuffix"),v&&(b+=(!o&&m&&_?"":"&#xa0;")+y),b+="</div>"},_adjustInstDate:function(t,e,i){var s=t.selectedYear+("Y"===i?e:0),n=t.selectedMonth+("M"===i?e:0),o=Math.min(t.selectedDay,this._getDaysInMonth(s,n))+("D"===i?e:0),a=this._restrictMinMax(t,this._daylightSavingAdjust(new Date(s,n,o)));t.selectedDay=a.getDate(),t.drawMonth=t.selectedMonth=a.getMonth(),t.drawYear=t.selectedYear=a.getFullYear(),("M"===i||"Y"===i)&&this._notifyChange(t)},_restrictMinMax:function(t,e){var i=this._getMinMaxDate(t,"min"),s=this._getMinMaxDate(t,"max"),n=i&&i>e?i:e;return s&&n>s?s:n},_notifyChange:function(t){var e=this._get(t,"onChangeMonthYear");e&&e.apply(t.input?t.input[0]:null,[t.selectedYear,t.selectedMonth+1,t])},_getNumberOfMonths:function(t){var e=this._get(t,"numberOfMonths");return null==e?[1,1]:"number"==typeof e?[1,e]:e},_getMinMaxDate:function(t,e){return this._determineDate(t,this._get(t,e+"Date"),null)},_getDaysInMonth:function(t,e){return 32-this._daylightSavingAdjust(new Date(t,e,32)).getDate()},_getFirstDayOfMonth:function(t,e){return new Date(t,e,1).getDay()},_canAdjustMonth:function(t,e,i,s){var n=this._getNumberOfMonths(t),o=this._daylightSavingAdjust(new Date(i,s+(0>e?e:n[0]*n[1]),1));return 0>e&&o.setDate(this._getDaysInMonth(o.getFullYear(),o.getMonth())),this._isInRange(t,o)},_isInRange:function(t,e){var i,s,n=this._getMinMaxDate(t,"min"),o=this._getMinMaxDate(t,"max"),a=null,r=null,h=this._get(t,"yearRange");return h&&(i=h.split(":"),s=(new Date).getFullYear(),a=parseInt(i[0],10),r=parseInt(i[1],10),i[0].match(/[+\-].*/)&&(a+=s),i[1].match(/[+\-].*/)&&(r+=s)),(!n||e.getTime()>=n.getTime())&&(!o||e.getTime()<=o.getTime())&&(!a||e.getFullYear()>=a)&&(!r||r>=e.getFullYear())},_getFormatConfig:function(t){var e=this._get(t,"shortYearCutoff");return e="string"!=typeof e?e:(new Date).getFullYear()%100+parseInt(e,10),{shortYearCutoff:e,dayNamesShort:this._get(t,"dayNamesShort"),dayNames:this._get(t,"dayNames"),monthNamesShort:this._get(t,"monthNamesShort"),monthNames:this._get(t,"monthNames")}},_formatDate:function(t,e,i,s){e||(t.currentDay=t.selectedDay,t.currentMonth=t.selectedMonth,t.currentYear=t.selectedYear);var n=e?"object"==typeof e?e:this._daylightSavingAdjust(new Date(s,i,e)):this._daylightSavingAdjust(new Date(t.currentYear,t.currentMonth,t.currentDay));return this.formatDate(this._get(t,"dateFormat"),n,this._getFormatConfig(t))}}),t.fn.datepicker=function(e){if(!this.length)return this;t.datepicker.initialized||(t(document).on("mousedown",t.datepicker._checkExternalClick),t.datepicker.initialized=!0),0===t("#"+t.datepicker._mainDivId).length&&t("body").append(t.datepicker.dpDiv);var i=Array.prototype.slice.call(arguments,1);return"string"!=typeof e||"isDisabled"!==e&&"getDate"!==e&&"widget"!==e?"option"===e&&2===arguments.length&&"string"==typeof arguments[1]?t.datepicker["_"+e+"Datepicker"].apply(t.datepicker,[this[0]].concat(i)):this.each(function(){"string"==typeof e?t.datepicker["_"+e+"Datepicker"].apply(t.datepicker,[this].concat(i)):t.datepicker._attachDatepicker(this,e)}):t.datepicker["_"+e+"Datepicker"].apply(t.datepicker,[this[0]].concat(i))},t.datepicker=new s,t.datepicker.initialized=!1,t.datepicker.uuid=(new Date).getTime(),t.datepicker.version="1.12.1",t.datepicker,t.ui.ie=!!/msie [\w.]+/.exec(navigator.userAgent.toLowerCase());var _=!1;t(document).on("mouseup",function(){_=!1}),t.widget("ui.mouse",{version:"1.12.1",options:{cancel:"input, textarea, button, select, option",distance:1,delay:0},_mouseInit:function(){var e=this;this.element.on("mousedown."+this.widgetName,function(t){return e._mouseDown(t)}).on("click."+this.widgetName,function(i){return!0===t.data(i.target,e.widgetName+".preventClickEvent")?(t.removeData(i.target,e.widgetName+".preventClickEvent"),i.stopImmediatePropagation(),!1):void 0}),this.started=!1},_mouseDestroy:function(){this.element.off("."+this.widgetName),this._mouseMoveDelegate&&this.document.off("mousemove."+this.widgetName,this._mouseMoveDelegate).off("mouseup."+this.widgetName,this._mouseUpDelegate)},_mouseDown:function(e){if(!_){this._mouseMoved=!1,this._mouseStarted&&this._mouseUp(e),this._mouseDownEvent=e;var i=this,s=1===e.which,n="string"==typeof this.options.cancel&&e.target.nodeName?t(e.target).closest(this.options.cancel).length:!1;return s&&!n&&this._mouseCapture(e)?(this.mouseDelayMet=!this.options.delay,this.mouseDelayMet||(this._mouseDelayTimer=setTimeout(function(){i.mouseDelayMet=!0},this.options.delay)),this._mouseDistanceMet(e)&&this._mouseDelayMet(e)&&(this._mouseStarted=this._mouseStart(e)!==!1,!this._mouseStarted)?(e.preventDefault(),!0):(!0===t.data(e.target,this.widgetName+".preventClickEvent")&&t.removeData(e.target,this.widgetName+".preventClickEvent"),this._mouseMoveDelegate=function(t){return i._mouseMove(t)},this._mouseUpDelegate=function(t){return i._mouseUp(t)},this.document.on("mousemove."+this.widgetName,this._mouseMoveDelegate).on("mouseup."+this.widgetName,this._mouseUpDelegate),e.preventDefault(),_=!0,!0)):!0}},_mouseMove:function(e){if(this._mouseMoved){if(t.ui.ie&&(!document.documentMode||9>document.documentMode)&&!e.button)return this._mouseUp(e);if(!e.which)if(e.originalEvent.altKey||e.originalEvent.ctrlKey||e.originalEvent.metaKey||e.originalEvent.shiftKey)this.ignoreMissingWhich=!0;else if(!this.ignoreMissingWhich)return this._mouseUp(e)}return(e.which||e.button)&&(this._mouseMoved=!0),this._mouseStarted?(this._mouseDrag(e),e.preventDefault()):(this._mouseDistanceMet(e)&&this._mouseDelayMet(e)&&(this._mouseStarted=this._mouseStart(this._mouseDownEvent,e)!==!1,this._mouseStarted?this._mouseDrag(e):this._mouseUp(e)),!this._mouseStarted)},_mouseUp:function(e){this.document.off("mousemove."+this.widgetName,this._mouseMoveDelegate).off("mouseup."+this.widgetName,this._mouseUpDelegate),this._mouseStarted&&(this._mouseStarted=!1,e.target===this._mouseDownEvent.target&&t.data(e.target,this.widgetName+".preventClickEvent",!0),this._mouseStop(e)),this._mouseDelayTimer&&(clearTimeout(this._mouseDelayTimer),delete this._mouseDelayTimer),this.ignoreMissingWhich=!1,_=!1,e.preventDefault()},_mouseDistanceMet:function(t){return Math.max(Math.abs(this._mouseDownEvent.pageX-t.pageX),Math.abs(this._mouseDownEvent.pageY-t.pageY))>=this.options.distance},_mouseDelayMet:function(){return this.mouseDelayMet},_mouseStart:function(){},_mouseDrag:function(){},_mouseStop:function(){},_mouseCapture:function(){return!0}}),t.ui.plugin={add:function(e,i,s){var n,o=t.ui[e].prototype;for(n in s)o.plugins[n]=o.plugins[n]||[],o.plugins[n].push([i,s[n]])},call:function(t,e,i,s){var n,o=t.plugins[e];if(o&&(s||t.element[0].parentNode&&11!==t.element[0].parentNode.nodeType))for(n=0;o.length>n;n++)t.options[o[n][0]]&&o[n][1].apply(t.element,i)}},t.ui.safeBlur=function(e){e&&"body"!==e.nodeName.toLowerCase()&&t(e).trigger("blur")},t.widget("ui.draggable",t.ui.mouse,{version:"1.12.1",widgetEventPrefix:"drag",options:{addClasses:!0,appendTo:"parent",axis:!1,connectToSortable:!1,containment:!1,cursor:"auto",cursorAt:!1,grid:!1,handle:!1,helper:"original",iframeFix:!1,opacity:!1,refreshPositions:!1,revert:!1,revertDuration:500,scope:"default",scroll:!0,scrollSensitivity:20,scrollSpeed:20,snap:!1,snapMode:"both",snapTolerance:20,stack:!1,zIndex:!1,drag:null,start:null,stop:null},_create:function(){"original"===this.options.helper&&this._setPositionRelative(),this.options.addClasses&&this._addClass("ui-draggable"),this._setHandleClassName(),this._mouseInit()},_setOption:function(t,e){this._super(t,e),"handle"===t&&(this._removeHandleClassName(),this._setHandleClassName())},_destroy:function(){return(this.helper||this.element).is(".ui-draggable-dragging")?(this.destroyOnClear=!0,void 0):(this._removeHandleClassName(),this._mouseDestroy(),void 0)},_mouseCapture:function(e){var i=this.options;return this.helper||i.disabled||t(e.target).closest(".ui-resizable-handle").length>0?!1:(this.handle=this._getHandle(e),this.handle?(this._blurActiveElement(e),this._blockFrames(i.iframeFix===!0?"iframe":i.iframeFix),!0):!1)},_blockFrames:function(e){this.iframeBlocks=this.document.find(e).map(function(){var e=t(this);return t("<div>").css("position","absolute").appendTo(e.parent()).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()).offset(e.offset())[0]})},_unblockFrames:function(){this.iframeBlocks&&(this.iframeBlocks.remove(),delete this.iframeBlocks)},_blurActiveElement:function(e){var i=t.ui.safeActiveElement(this.document[0]),s=t(e.target);s.closest(i).length||t.ui.safeBlur(i)},_mouseStart:function(e){var i=this.options;return this.helper=this._createHelper(e),this._addClass(this.helper,"ui-draggable-dragging"),this._cacheHelperProportions(),t.ui.ddmanager&&(t.ui.ddmanager.current=this),this._cacheMargins(),this.cssPosition=this.helper.css("position"),this.scrollParent=this.helper.scrollParent(!0),this.offsetParent=this.helper.offsetParent(),this.hasFixedAncestor=this.helper.parents().filter(function(){return"fixed"===t(this).css("position")}).length>0,this.positionAbs=this.element.offset(),this._refreshOffsets(e),this.originalPosition=this.position=this._generatePosition(e,!1),this.originalPageX=e.pageX,this.originalPageY=e.pageY,i.cursorAt&&this._adjustOffsetFromHelper(i.cursorAt),this._setContainment(),this._trigger("start",e)===!1?(this._clear(),!1):(this._cacheHelperProportions(),t.ui.ddmanager&&!i.dropBehaviour&&t.ui.ddmanager.prepareOffsets(this,e),this._mouseDrag(e,!0),t.ui.ddmanager&&t.ui.ddmanager.dragStart(this,e),!0)},_refreshOffsets:function(t){this.offset={top:this.positionAbs.top-this.margins.top,left:this.positionAbs.left-this.margins.left,scroll:!1,parent:this._getParentOffset(),relative:this._getRelativeOffset()},this.offset.click={left:t.pageX-this.offset.left,top:t.pageY-this.offset.top}},_mouseDrag:function(e,i){if(this.hasFixedAncestor&&(this.offset.parent=this._getParentOffset()),this.position=this._generatePosition(e,!0),this.positionAbs=this._convertPositionTo("absolute"),!i){var s=this._uiHash();if(this._trigger("drag",e,s)===!1)return this._mouseUp(new t.Event("mouseup",e)),!1;this.position=s.position}return this.helper[0].style.left=this.position.left+"px",this.helper[0].style.top=this.position.top+"px",t.ui.ddmanager&&t.ui.ddmanager.drag(this,e),!1},_mouseStop:function(e){var i=this,s=!1;return t.ui.ddmanager&&!this.options.dropBehaviour&&(s=t.ui.ddmanager.drop(this,e)),this.dropped&&(s=this.dropped,this.dropped=!1),"invalid"===this.options.revert&&!s||"valid"===this.options.revert&&s||this.options.revert===!0||t.isFunction(this.options.revert)&&this.options.revert.call(this.element,s)?t(this.helper).animate(this.originalPosition,parseInt(this.options.revertDuration,10),function(){i._trigger("stop",e)!==!1&&i._clear()}):this._trigger("stop",e)!==!1&&this._clear(),!1},_mouseUp:function(e){return this._unblockFrames(),t.ui.ddmanager&&t.ui.ddmanager.dragStop(this,e),this.handleElement.is(e.target)&&this.element.trigger("focus"),t.ui.mouse.prototype._mouseUp.call(this,e)},cancel:function(){return this.helper.is(".ui-draggable-dragging")?this._mouseUp(new t.Event("mouseup",{target:this.element[0]})):this._clear(),this},_getHandle:function(e){return this.options.handle?!!t(e.target).closest(this.element.find(this.options.handle)).length:!0},_setHandleClassName:function(){this.handleElement=this.options.handle?this.element.find(this.options.handle):this.element,this._addClass(this.handleElement,"ui-draggable-handle")},_removeHandleClassName:function(){this._removeClass(this.handleElement,"ui-draggable-handle")},_createHelper:function(e){var i=this.options,s=t.isFunction(i.helper),n=s?t(i.helper.apply(this.element[0],[e])):"clone"===i.helper?this.element.clone().removeAttr("id"):this.element;return n.parents("body").length||n.appendTo("parent"===i.appendTo?this.element[0].parentNode:i.appendTo),s&&n[0]===this.element[0]&&this._setPositionRelative(),n[0]===this.element[0]||/(fixed|absolute)/.test(n.css("position"))||n.css("position","absolute"),n},_setPositionRelative:function(){/^(?:r|a|f)/.test(this.element.css("position"))||(this.element[0].style.position="relative")},_adjustOffsetFromHelper:function(e){"string"==typeof e&&(e=e.split(" ")),t.isArray(e)&&(e={left:+e[0],top:+e[1]||0}),"left"in e&&(this.offset.click.left=e.left+this.margins.left),"right"in e&&(this.offset.click.left=this.helperProportions.width-e.right+this.margins.left),"top"in e&&(this.offset.click.top=e.top+this.margins.top),"bottom"in e&&(this.offset.click.top=this.helperProportions.height-e.bottom+this.margins.top)},_isRootNode:function(t){return/(html|body)/i.test(t.tagName)||t===this.document[0]},_getParentOffset:function(){var e=this.offsetParent.offset(),i=this.document[0];return"absolute"===this.cssPosition&&this.scrollParent[0]!==i&&t.contains(this.scrollParent[0],this.offsetParent[0])&&(e.left+=this.scrollParent.scrollLeft(),e.top+=this.scrollParent.scrollTop()),this._isRootNode(this.offsetParent[0])&&(e={top:0,left:0}),{top:e.top+(parseInt(this.offsetParent.css("borderTopWidth"),10)||0),left:e.left+(parseInt(this.offsetParent.css("borderLeftWidth"),10)||0)}},_getRelativeOffset:function(){if("relative"!==this.cssPosition)return{top:0,left:0};var t=this.element.position(),e=this._isRootNode(this.scrollParent[0]);return{top:t.top-(parseInt(this.helper.css("top"),10)||0)+(e?0:this.scrollParent.scrollTop()),left:t.left-(parseInt(this.helper.css("left"),10)||0)+(e?0:this.scrollParent.scrollLeft())}
-},_cacheMargins:function(){this.margins={left:parseInt(this.element.css("marginLeft"),10)||0,top:parseInt(this.element.css("marginTop"),10)||0,right:parseInt(this.element.css("marginRight"),10)||0,bottom:parseInt(this.element.css("marginBottom"),10)||0}},_cacheHelperProportions:function(){this.helperProportions={width:this.helper.outerWidth(),height:this.helper.outerHeight()}},_setContainment:function(){var e,i,s,n=this.options,o=this.document[0];return this.relativeContainer=null,n.containment?"window"===n.containment?(this.containment=[t(window).scrollLeft()-this.offset.relative.left-this.offset.parent.left,t(window).scrollTop()-this.offset.relative.top-this.offset.parent.top,t(window).scrollLeft()+t(window).width()-this.helperProportions.width-this.margins.left,t(window).scrollTop()+(t(window).height()||o.body.parentNode.scrollHeight)-this.helperProportions.height-this.margins.top],void 0):"document"===n.containment?(this.containment=[0,0,t(o).width()-this.helperProportions.width-this.margins.left,(t(o).height()||o.body.parentNode.scrollHeight)-this.helperProportions.height-this.margins.top],void 0):n.containment.constructor===Array?(this.containment=n.containment,void 0):("parent"===n.containment&&(n.containment=this.helper[0].parentNode),i=t(n.containment),s=i[0],s&&(e=/(scroll|auto)/.test(i.css("overflow")),this.containment=[(parseInt(i.css("borderLeftWidth"),10)||0)+(parseInt(i.css("paddingLeft"),10)||0),(parseInt(i.css("borderTopWidth"),10)||0)+(parseInt(i.css("paddingTop"),10)||0),(e?Math.max(s.scrollWidth,s.offsetWidth):s.offsetWidth)-(parseInt(i.css("borderRightWidth"),10)||0)-(parseInt(i.css("paddingRight"),10)||0)-this.helperProportions.width-this.margins.left-this.margins.right,(e?Math.max(s.scrollHeight,s.offsetHeight):s.offsetHeight)-(parseInt(i.css("borderBottomWidth"),10)||0)-(parseInt(i.css("paddingBottom"),10)||0)-this.helperProportions.height-this.margins.top-this.margins.bottom],this.relativeContainer=i),void 0):(this.containment=null,void 0)},_convertPositionTo:function(t,e){e||(e=this.position);var i="absolute"===t?1:-1,s=this._isRootNode(this.scrollParent[0]);return{top:e.top+this.offset.relative.top*i+this.offset.parent.top*i-("fixed"===this.cssPosition?-this.offset.scroll.top:s?0:this.offset.scroll.top)*i,left:e.left+this.offset.relative.left*i+this.offset.parent.left*i-("fixed"===this.cssPosition?-this.offset.scroll.left:s?0:this.offset.scroll.left)*i}},_generatePosition:function(t,e){var i,s,n,o,a=this.options,r=this._isRootNode(this.scrollParent[0]),h=t.pageX,l=t.pageY;return r&&this.offset.scroll||(this.offset.scroll={top:this.scrollParent.scrollTop(),left:this.scrollParent.scrollLeft()}),e&&(this.containment&&(this.relativeContainer?(s=this.relativeContainer.offset(),i=[this.containment[0]+s.left,this.containment[1]+s.top,this.containment[2]+s.left,this.containment[3]+s.top]):i=this.containment,t.pageX-this.offset.click.left<i[0]&&(h=i[0]+this.offset.click.left),t.pageY-this.offset.click.top<i[1]&&(l=i[1]+this.offset.click.top),t.pageX-this.offset.click.left>i[2]&&(h=i[2]+this.offset.click.left),t.pageY-this.offset.click.top>i[3]&&(l=i[3]+this.offset.click.top)),a.grid&&(n=a.grid[1]?this.originalPageY+Math.round((l-this.originalPageY)/a.grid[1])*a.grid[1]:this.originalPageY,l=i?n-this.offset.click.top>=i[1]||n-this.offset.click.top>i[3]?n:n-this.offset.click.top>=i[1]?n-a.grid[1]:n+a.grid[1]:n,o=a.grid[0]?this.originalPageX+Math.round((h-this.originalPageX)/a.grid[0])*a.grid[0]:this.originalPageX,h=i?o-this.offset.click.left>=i[0]||o-this.offset.click.left>i[2]?o:o-this.offset.click.left>=i[0]?o-a.grid[0]:o+a.grid[0]:o),"y"===a.axis&&(h=this.originalPageX),"x"===a.axis&&(l=this.originalPageY)),{top:l-this.offset.click.top-this.offset.relative.top-this.offset.parent.top+("fixed"===this.cssPosition?-this.offset.scroll.top:r?0:this.offset.scroll.top),left:h-this.offset.click.left-this.offset.relative.left-this.offset.parent.left+("fixed"===this.cssPosition?-this.offset.scroll.left:r?0:this.offset.scroll.left)}},_clear:function(){this._removeClass(this.helper,"ui-draggable-dragging"),this.helper[0]===this.element[0]||this.cancelHelperRemoval||this.helper.remove(),this.helper=null,this.cancelHelperRemoval=!1,this.destroyOnClear&&this.destroy()},_trigger:function(e,i,s){return s=s||this._uiHash(),t.ui.plugin.call(this,e,[i,s,this],!0),/^(drag|start|stop)/.test(e)&&(this.positionAbs=this._convertPositionTo("absolute"),s.offset=this.positionAbs),t.Widget.prototype._trigger.call(this,e,i,s)},plugins:{},_uiHash:function(){return{helper:this.helper,position:this.position,originalPosition:this.originalPosition,offset:this.positionAbs}}}),t.ui.plugin.add("draggable","connectToSortable",{start:function(e,i,s){var n=t.extend({},i,{item:s.element});s.sortables=[],t(s.options.connectToSortable).each(function(){var i=t(this).sortable("instance");i&&!i.options.disabled&&(s.sortables.push(i),i.refreshPositions(),i._trigger("activate",e,n))})},stop:function(e,i,s){var n=t.extend({},i,{item:s.element});s.cancelHelperRemoval=!1,t.each(s.sortables,function(){var t=this;t.isOver?(t.isOver=0,s.cancelHelperRemoval=!0,t.cancelHelperRemoval=!1,t._storedCSS={position:t.placeholder.css("position"),top:t.placeholder.css("top"),left:t.placeholder.css("left")},t._mouseStop(e),t.options.helper=t.options._helper):(t.cancelHelperRemoval=!0,t._trigger("deactivate",e,n))})},drag:function(e,i,s){t.each(s.sortables,function(){var n=!1,o=this;o.positionAbs=s.positionAbs,o.helperProportions=s.helperProportions,o.offset.click=s.offset.click,o._intersectsWith(o.containerCache)&&(n=!0,t.each(s.sortables,function(){return this.positionAbs=s.positionAbs,this.helperProportions=s.helperProportions,this.offset.click=s.offset.click,this!==o&&this._intersectsWith(this.containerCache)&&t.contains(o.element[0],this.element[0])&&(n=!1),n})),n?(o.isOver||(o.isOver=1,s._parent=i.helper.parent(),o.currentItem=i.helper.appendTo(o.element).data("ui-sortable-item",!0),o.options._helper=o.options.helper,o.options.helper=function(){return i.helper[0]},e.target=o.currentItem[0],o._mouseCapture(e,!0),o._mouseStart(e,!0,!0),o.offset.click.top=s.offset.click.top,o.offset.click.left=s.offset.click.left,o.offset.parent.left-=s.offset.parent.left-o.offset.parent.left,o.offset.parent.top-=s.offset.parent.top-o.offset.parent.top,s._trigger("toSortable",e),s.dropped=o.element,t.each(s.sortables,function(){this.refreshPositions()}),s.currentItem=s.element,o.fromOutside=s),o.currentItem&&(o._mouseDrag(e),i.position=o.position)):o.isOver&&(o.isOver=0,o.cancelHelperRemoval=!0,o.options._revert=o.options.revert,o.options.revert=!1,o._trigger("out",e,o._uiHash(o)),o._mouseStop(e,!0),o.options.revert=o.options._revert,o.options.helper=o.options._helper,o.placeholder&&o.placeholder.remove(),i.helper.appendTo(s._parent),s._refreshOffsets(e),i.position=s._generatePosition(e,!0),s._trigger("fromSortable",e),s.dropped=!1,t.each(s.sortables,function(){this.refreshPositions()}))})}}),t.ui.plugin.add("draggable","cursor",{start:function(e,i,s){var n=t("body"),o=s.options;n.css("cursor")&&(o._cursor=n.css("cursor")),n.css("cursor",o.cursor)},stop:function(e,i,s){var n=s.options;n._cursor&&t("body").css("cursor",n._cursor)}}),t.ui.plugin.add("draggable","opacity",{start:function(e,i,s){var n=t(i.helper),o=s.options;n.css("opacity")&&(o._opacity=n.css("opacity")),n.css("opacity",o.opacity)},stop:function(e,i,s){var n=s.options;n._opacity&&t(i.helper).css("opacity",n._opacity)}}),t.ui.plugin.add("draggable","scroll",{start:function(t,e,i){i.scrollParentNotHidden||(i.scrollParentNotHidden=i.helper.scrollParent(!1)),i.scrollParentNotHidden[0]!==i.document[0]&&"HTML"!==i.scrollParentNotHidden[0].tagName&&(i.overflowOffset=i.scrollParentNotHidden.offset())},drag:function(e,i,s){var n=s.options,o=!1,a=s.scrollParentNotHidden[0],r=s.document[0];a!==r&&"HTML"!==a.tagName?(n.axis&&"x"===n.axis||(s.overflowOffset.top+a.offsetHeight-e.pageY<n.scrollSensitivity?a.scrollTop=o=a.scrollTop+n.scrollSpeed:e.pageY-s.overflowOffset.top<n.scrollSensitivity&&(a.scrollTop=o=a.scrollTop-n.scrollSpeed)),n.axis&&"y"===n.axis||(s.overflowOffset.left+a.offsetWidth-e.pageX<n.scrollSensitivity?a.scrollLeft=o=a.scrollLeft+n.scrollSpeed:e.pageX-s.overflowOffset.left<n.scrollSensitivity&&(a.scrollLeft=o=a.scrollLeft-n.scrollSpeed))):(n.axis&&"x"===n.axis||(e.pageY-t(r).scrollTop()<n.scrollSensitivity?o=t(r).scrollTop(t(r).scrollTop()-n.scrollSpeed):t(window).height()-(e.pageY-t(r).scrollTop())<n.scrollSensitivity&&(o=t(r).scrollTop(t(r).scrollTop()+n.scrollSpeed))),n.axis&&"y"===n.axis||(e.pageX-t(r).scrollLeft()<n.scrollSensitivity?o=t(r).scrollLeft(t(r).scrollLeft()-n.scrollSpeed):t(window).width()-(e.pageX-t(r).scrollLeft())<n.scrollSensitivity&&(o=t(r).scrollLeft(t(r).scrollLeft()+n.scrollSpeed)))),o!==!1&&t.ui.ddmanager&&!n.dropBehaviour&&t.ui.ddmanager.prepareOffsets(s,e)}}),t.ui.plugin.add("draggable","snap",{start:function(e,i,s){var n=s.options;s.snapElements=[],t(n.snap.constructor!==String?n.snap.items||":data(ui-draggable)":n.snap).each(function(){var e=t(this),i=e.offset();this!==s.element[0]&&s.snapElements.push({item:this,width:e.outerWidth(),height:e.outerHeight(),top:i.top,left:i.left})})},drag:function(e,i,s){var n,o,a,r,h,l,c,u,d,p,f=s.options,g=f.snapTolerance,m=i.offset.left,_=m+s.helperProportions.width,v=i.offset.top,b=v+s.helperProportions.height;for(d=s.snapElements.length-1;d>=0;d--)h=s.snapElements[d].left-s.margins.left,l=h+s.snapElements[d].width,c=s.snapElements[d].top-s.margins.top,u=c+s.snapElements[d].height,h-g>_||m>l+g||c-g>b||v>u+g||!t.contains(s.snapElements[d].item.ownerDocument,s.snapElements[d].item)?(s.snapElements[d].snapping&&s.options.snap.release&&s.options.snap.release.call(s.element,e,t.extend(s._uiHash(),{snapItem:s.snapElements[d].item})),s.snapElements[d].snapping=!1):("inner"!==f.snapMode&&(n=g>=Math.abs(c-b),o=g>=Math.abs(u-v),a=g>=Math.abs(h-_),r=g>=Math.abs(l-m),n&&(i.position.top=s._convertPositionTo("relative",{top:c-s.helperProportions.height,left:0}).top),o&&(i.position.top=s._convertPositionTo("relative",{top:u,left:0}).top),a&&(i.position.left=s._convertPositionTo("relative",{top:0,left:h-s.helperProportions.width}).left),r&&(i.position.left=s._convertPositionTo("relative",{top:0,left:l}).left)),p=n||o||a||r,"outer"!==f.snapMode&&(n=g>=Math.abs(c-v),o=g>=Math.abs(u-b),a=g>=Math.abs(h-m),r=g>=Math.abs(l-_),n&&(i.position.top=s._convertPositionTo("relative",{top:c,left:0}).top),o&&(i.position.top=s._convertPositionTo("relative",{top:u-s.helperProportions.height,left:0}).top),a&&(i.position.left=s._convertPositionTo("relative",{top:0,left:h}).left),r&&(i.position.left=s._convertPositionTo("relative",{top:0,left:l-s.helperProportions.width}).left)),!s.snapElements[d].snapping&&(n||o||a||r||p)&&s.options.snap.snap&&s.options.snap.snap.call(s.element,e,t.extend(s._uiHash(),{snapItem:s.snapElements[d].item})),s.snapElements[d].snapping=n||o||a||r||p)}}),t.ui.plugin.add("draggable","stack",{start:function(e,i,s){var n,o=s.options,a=t.makeArray(t(o.stack)).sort(function(e,i){return(parseInt(t(e).css("zIndex"),10)||0)-(parseInt(t(i).css("zIndex"),10)||0)});a.length&&(n=parseInt(t(a[0]).css("zIndex"),10)||0,t(a).each(function(e){t(this).css("zIndex",n+e)}),this.css("zIndex",n+a.length))}}),t.ui.plugin.add("draggable","zIndex",{start:function(e,i,s){var n=t(i.helper),o=s.options;n.css("zIndex")&&(o._zIndex=n.css("zIndex")),n.css("zIndex",o.zIndex)},stop:function(e,i,s){var n=s.options;n._zIndex&&t(i.helper).css("zIndex",n._zIndex)}}),t.ui.draggable,t.widget("ui.resizable",t.ui.mouse,{version:"1.12.1",widgetEventPrefix:"resize",options:{alsoResize:!1,animate:!1,animateDuration:"slow",animateEasing:"swing",aspectRatio:!1,autoHide:!1,classes:{"ui-resizable-se":"ui-icon ui-icon-gripsmall-diagonal-se"},containment:!1,ghost:!1,grid:!1,handles:"e,s,se",helper:!1,maxHeight:null,maxWidth:null,minHeight:10,minWidth:10,zIndex:90,resize:null,start:null,stop:null},_num:function(t){return parseFloat(t)||0},_isNumber:function(t){return!isNaN(parseFloat(t))},_hasScroll:function(e,i){if("hidden"===t(e).css("overflow"))return!1;var s=i&&"left"===i?"scrollLeft":"scrollTop",n=!1;return e[s]>0?!0:(e[s]=1,n=e[s]>0,e[s]=0,n)},_create:function(){var e,i=this.options,s=this;this._addClass("ui-resizable"),t.extend(this,{_aspectRatio:!!i.aspectRatio,aspectRatio:i.aspectRatio,originalElement:this.element,_proportionallyResizeElements:[],_helper:i.helper||i.ghost||i.animate?i.helper||"ui-resizable-helper":null}),this.element[0].nodeName.match(/^(canvas|textarea|input|select|button|img)$/i)&&(this.element.wrap(t("<div class='ui-wrapper' style='overflow: hidden;'></div>").css({position:this.element.css("position"),width:this.element.outerWidth(),height:this.element.outerHeight(),top:this.element.css("top"),left:this.element.css("left")})),this.element=this.element.parent().data("ui-resizable",this.element.resizable("instance")),this.elementIsWrapper=!0,e={marginTop:this.originalElement.css("marginTop"),marginRight:this.originalElement.css("marginRight"),marginBottom:this.originalElement.css("marginBottom"),marginLeft:this.originalElement.css("marginLeft")},this.element.css(e),this.originalElement.css("margin",0),this.originalResizeStyle=this.originalElement.css("resize"),this.originalElement.css("resize","none"),this._proportionallyResizeElements.push(this.originalElement.css({position:"static",zoom:1,display:"block"})),this.originalElement.css(e),this._proportionallyResize()),this._setupHandles(),i.autoHide&&t(this.element).on("mouseenter",function(){i.disabled||(s._removeClass("ui-resizable-autohide"),s._handles.show())}).on("mouseleave",function(){i.disabled||s.resizing||(s._addClass("ui-resizable-autohide"),s._handles.hide())}),this._mouseInit()},_destroy:function(){this._mouseDestroy();var e,i=function(e){t(e).removeData("resizable").removeData("ui-resizable").off(".resizable").find(".ui-resizable-handle").remove()};return this.elementIsWrapper&&(i(this.element),e=this.element,this.originalElement.css({position:e.css("position"),width:e.outerWidth(),height:e.outerHeight(),top:e.css("top"),left:e.css("left")}).insertAfter(e),e.remove()),this.originalElement.css("resize",this.originalResizeStyle),i(this.originalElement),this},_setOption:function(t,e){switch(this._super(t,e),t){case"handles":this._removeHandles(),this._setupHandles();break;default:}},_setupHandles:function(){var e,i,s,n,o,a=this.options,r=this;if(this.handles=a.handles||(t(".ui-resizable-handle",this.element).length?{n:".ui-resizable-n",e:".ui-resizable-e",s:".ui-resizable-s",w:".ui-resizable-w",se:".ui-resizable-se",sw:".ui-resizable-sw",ne:".ui-resizable-ne",nw:".ui-resizable-nw"}:"e,s,se"),this._handles=t(),this.handles.constructor===String)for("all"===this.handles&&(this.handles="n,e,s,w,se,sw,ne,nw"),s=this.handles.split(","),this.handles={},i=0;s.length>i;i++)e=t.trim(s[i]),n="ui-resizable-"+e,o=t("<div>"),this._addClass(o,"ui-resizable-handle "+n),o.css({zIndex:a.zIndex}),this.handles[e]=".ui-resizable-"+e,this.element.append(o);this._renderAxis=function(e){var i,s,n,o;e=e||this.element;for(i in this.handles)this.handles[i].constructor===String?this.handles[i]=this.element.children(this.handles[i]).first().show():(this.handles[i].jquery||this.handles[i].nodeType)&&(this.handles[i]=t(this.handles[i]),this._on(this.handles[i],{mousedown:r._mouseDown})),this.elementIsWrapper&&this.originalElement[0].nodeName.match(/^(textarea|input|select|button)$/i)&&(s=t(this.handles[i],this.element),o=/sw|ne|nw|se|n|s/.test(i)?s.outerHeight():s.outerWidth(),n=["padding",/ne|nw|n/.test(i)?"Top":/se|sw|s/.test(i)?"Bottom":/^e$/.test(i)?"Right":"Left"].join(""),e.css(n,o),this._proportionallyResize()),this._handles=this._handles.add(this.handles[i])},this._renderAxis(this.element),this._handles=this._handles.add(this.element.find(".ui-resizable-handle")),this._handles.disableSelection(),this._handles.on("mouseover",function(){r.resizing||(this.className&&(o=this.className.match(/ui-resizable-(se|sw|ne|nw|n|e|s|w)/i)),r.axis=o&&o[1]?o[1]:"se")}),a.autoHide&&(this._handles.hide(),this._addClass("ui-resizable-autohide"))},_removeHandles:function(){this._handles.remove()},_mouseCapture:function(e){var i,s,n=!1;for(i in this.handles)s=t(this.handles[i])[0],(s===e.target||t.contains(s,e.target))&&(n=!0);return!this.options.disabled&&n},_mouseStart:function(e){var i,s,n,o=this.options,a=this.element;return this.resizing=!0,this._renderProxy(),i=this._num(this.helper.css("left")),s=this._num(this.helper.css("top")),o.containment&&(i+=t(o.containment).scrollLeft()||0,s+=t(o.containment).scrollTop()||0),this.offset=this.helper.offset(),this.position={left:i,top:s},this.size=this._helper?{width:this.helper.width(),height:this.helper.height()}:{width:a.width(),height:a.height()},this.originalSize=this._helper?{width:a.outerWidth(),height:a.outerHeight()}:{width:a.width(),height:a.height()},this.sizeDiff={width:a.outerWidth()-a.width(),height:a.outerHeight()-a.height()},this.originalPosition={left:i,top:s},this.originalMousePosition={left:e.pageX,top:e.pageY},this.aspectRatio="number"==typeof o.aspectRatio?o.aspectRatio:this.originalSize.width/this.originalSize.height||1,n=t(".ui-resizable-"+this.axis).css("cursor"),t("body").css("cursor","auto"===n?this.axis+"-resize":n),this._addClass("ui-resizable-resizing"),this._propagate("start",e),!0},_mouseDrag:function(e){var i,s,n=this.originalMousePosition,o=this.axis,a=e.pageX-n.left||0,r=e.pageY-n.top||0,h=this._change[o];return this._updatePrevProperties(),h?(i=h.apply(this,[e,a,r]),this._updateVirtualBoundaries(e.shiftKey),(this._aspectRatio||e.shiftKey)&&(i=this._updateRatio(i,e)),i=this._respectSize(i,e),this._updateCache(i),this._propagate("resize",e),s=this._applyChanges(),!this._helper&&this._proportionallyResizeElements.length&&this._proportionallyResize(),t.isEmptyObject(s)||(this._updatePrevProperties(),this._trigger("resize",e,this.ui()),this._applyChanges()),!1):!1},_mouseStop:function(e){this.resizing=!1;var i,s,n,o,a,r,h,l=this.options,c=this;return this._helper&&(i=this._proportionallyResizeElements,s=i.length&&/textarea/i.test(i[0].nodeName),n=s&&this._hasScroll(i[0],"left")?0:c.sizeDiff.height,o=s?0:c.sizeDiff.width,a={width:c.helper.width()-o,height:c.helper.height()-n},r=parseFloat(c.element.css("left"))+(c.position.left-c.originalPosition.left)||null,h=parseFloat(c.element.css("top"))+(c.position.top-c.originalPosition.top)||null,l.animate||this.element.css(t.extend(a,{top:h,left:r})),c.helper.height(c.size.height),c.helper.width(c.size.width),this._helper&&!l.animate&&this._proportionallyResize()),t("body").css("cursor","auto"),this._removeClass("ui-resizable-resizing"),this._propagate("stop",e),this._helper&&this.helper.remove(),!1},_updatePrevProperties:function(){this.prevPosition={top:this.position.top,left:this.position.left},this.prevSize={width:this.size.width,height:this.size.height}},_applyChanges:function(){var t={};return this.position.top!==this.prevPosition.top&&(t.top=this.position.top+"px"),this.position.left!==this.prevPosition.left&&(t.left=this.position.left+"px"),this.size.width!==this.prevSize.width&&(t.width=this.size.width+"px"),this.size.height!==this.prevSize.height&&(t.height=this.size.height+"px"),this.helper.css(t),t},_updateVirtualBoundaries:function(t){var e,i,s,n,o,a=this.options;o={minWidth:this._isNumber(a.minWidth)?a.minWidth:0,maxWidth:this._isNumber(a.maxWidth)?a.maxWidth:1/0,minHeight:this._isNumber(a.minHeight)?a.minHeight:0,maxHeight:this._isNumber(a.maxHeight)?a.maxHeight:1/0},(this._aspectRatio||t)&&(e=o.minHeight*this.aspectRatio,s=o.minWidth/this.aspectRatio,i=o.maxHeight*this.aspectRatio,n=o.maxWidth/this.aspectRatio,e>o.minWidth&&(o.minWidth=e),s>o.minHeight&&(o.minHeight=s),o.maxWidth>i&&(o.maxWidth=i),o.maxHeight>n&&(o.maxHeight=n)),this._vBoundaries=o},_updateCache:function(t){this.offset=this.helper.offset(),this._isNumber(t.left)&&(this.position.left=t.left),this._isNumber(t.top)&&(this.position.top=t.top),this._isNumber(t.height)&&(this.size.height=t.height),this._isNumber(t.width)&&(this.size.width=t.width)},_updateRatio:function(t){var e=this.position,i=this.size,s=this.axis;return this._isNumber(t.height)?t.width=t.height*this.aspectRatio:this._isNumber(t.width)&&(t.height=t.width/this.aspectRatio),"sw"===s&&(t.left=e.left+(i.width-t.width),t.top=null),"nw"===s&&(t.top=e.top+(i.height-t.height),t.left=e.left+(i.width-t.width)),t},_respectSize:function(t){var e=this._vBoundaries,i=this.axis,s=this._isNumber(t.width)&&e.maxWidth&&e.maxWidth<t.width,n=this._isNumber(t.height)&&e.maxHeight&&e.maxHeight<t.height,o=this._isNumber(t.width)&&e.minWidth&&e.minWidth>t.width,a=this._isNumber(t.height)&&e.minHeight&&e.minHeight>t.height,r=this.originalPosition.left+this.originalSize.width,h=this.originalPosition.top+this.originalSize.height,l=/sw|nw|w/.test(i),c=/nw|ne|n/.test(i);return o&&(t.width=e.minWidth),a&&(t.height=e.minHeight),s&&(t.width=e.maxWidth),n&&(t.height=e.maxHeight),o&&l&&(t.left=r-e.minWidth),s&&l&&(t.left=r-e.maxWidth),a&&c&&(t.top=h-e.minHeight),n&&c&&(t.top=h-e.maxHeight),t.width||t.height||t.left||!t.top?t.width||t.height||t.top||!t.left||(t.left=null):t.top=null,t},_getPaddingPlusBorderDimensions:function(t){for(var e=0,i=[],s=[t.css("borderTopWidth"),t.css("borderRightWidth"),t.css("borderBottomWidth"),t.css("borderLeftWidth")],n=[t.css("paddingTop"),t.css("paddingRight"),t.css("paddingBottom"),t.css("paddingLeft")];4>e;e++)i[e]=parseFloat(s[e])||0,i[e]+=parseFloat(n[e])||0;return{height:i[0]+i[2],width:i[1]+i[3]}},_proportionallyResize:function(){if(this._proportionallyResizeElements.length)for(var t,e=0,i=this.helper||this.element;this._proportionallyResizeElements.length>e;e++)t=this._proportionallyResizeElements[e],this.outerDimensions||(this.outerDimensions=this._getPaddingPlusBorderDimensions(t)),t.css({height:i.height()-this.outerDimensions.height||0,width:i.width()-this.outerDimensions.width||0})},_renderProxy:function(){var e=this.element,i=this.options;this.elementOffset=e.offset(),this._helper?(this.helper=this.helper||t("<div style='overflow:hidden;'></div>"),this._addClass(this.helper,this._helper),this.helper.css({width:this.element.outerWidth(),height:this.element.outerHeight(),position:"absolute",left:this.elementOffset.left+"px",top:this.elementOffset.top+"px",zIndex:++i.zIndex}),this.helper.appendTo("body").disableSelection()):this.helper=this.element},_change:{e:function(t,e){return{width:this.originalSize.width+e}},w:function(t,e){var i=this.originalSize,s=this.originalPosition;return{left:s.left+e,width:i.width-e}},n:function(t,e,i){var s=this.originalSize,n=this.originalPosition;return{top:n.top+i,height:s.height-i}},s:function(t,e,i){return{height:this.originalSize.height+i}},se:function(e,i,s){return t.extend(this._change.s.apply(this,arguments),this._change.e.apply(this,[e,i,s]))},sw:function(e,i,s){return t.extend(this._change.s.apply(this,arguments),this._change.w.apply(this,[e,i,s]))},ne:function(e,i,s){return t.extend(this._change.n.apply(this,arguments),this._change.e.apply(this,[e,i,s]))},nw:function(e,i,s){return t.extend(this._change.n.apply(this,arguments),this._change.w.apply(this,[e,i,s]))}},_propagate:function(e,i){t.ui.plugin.call(this,e,[i,this.ui()]),"resize"!==e&&this._trigger(e,i,this.ui())},plugins:{},ui:function(){return{originalElement:this.originalElement,element:this.element,helper:this.helper,position:this.position,size:this.size,originalSize:this.originalSize,originalPosition:this.originalPosition}}}),t.ui.plugin.add("resizable","animate",{stop:function(e){var i=t(this).resizable("instance"),s=i.options,n=i._proportionallyResizeElements,o=n.length&&/textarea/i.test(n[0].nodeName),a=o&&i._hasScroll(n[0],"left")?0:i.sizeDiff.height,r=o?0:i.sizeDiff.width,h={width:i.size.width-r,height:i.size.height-a},l=parseFloat(i.element.css("left"))+(i.position.left-i.originalPosition.left)||null,c=parseFloat(i.element.css("top"))+(i.position.top-i.originalPosition.top)||null;i.element.animate(t.extend(h,c&&l?{top:c,left:l}:{}),{duration:s.animateDuration,easing:s.animateEasing,step:function(){var s={width:parseFloat(i.element.css("width")),height:parseFloat(i.element.css("height")),top:parseFloat(i.element.css("top")),left:parseFloat(i.element.css("left"))};n&&n.length&&t(n[0]).css({width:s.width,height:s.height}),i._updateCache(s),i._propagate("resize",e)}})}}),t.ui.plugin.add("resizable","containment",{start:function(){var e,i,s,n,o,a,r,h=t(this).resizable("instance"),l=h.options,c=h.element,u=l.containment,d=u instanceof t?u.get(0):/parent/.test(u)?c.parent().get(0):u;d&&(h.containerElement=t(d),/document/.test(u)||u===document?(h.containerOffset={left:0,top:0},h.containerPosition={left:0,top:0},h.parentData={element:t(document),left:0,top:0,width:t(document).width(),height:t(document).height()||document.body.parentNode.scrollHeight}):(e=t(d),i=[],t(["Top","Right","Left","Bottom"]).each(function(t,s){i[t]=h._num(e.css("padding"+s))}),h.containerOffset=e.offset(),h.containerPosition=e.position(),h.containerSize={height:e.innerHeight()-i[3],width:e.innerWidth()-i[1]},s=h.containerOffset,n=h.containerSize.height,o=h.containerSize.width,a=h._hasScroll(d,"left")?d.scrollWidth:o,r=h._hasScroll(d)?d.scrollHeight:n,h.parentData={element:d,left:s.left,top:s.top,width:a,height:r}))},resize:function(e){var i,s,n,o,a=t(this).resizable("instance"),r=a.options,h=a.containerOffset,l=a.position,c=a._aspectRatio||e.shiftKey,u={top:0,left:0},d=a.containerElement,p=!0;d[0]!==document&&/static/.test(d.css("position"))&&(u=h),l.left<(a._helper?h.left:0)&&(a.size.width=a.size.width+(a._helper?a.position.left-h.left:a.position.left-u.left),c&&(a.size.height=a.size.width/a.aspectRatio,p=!1),a.position.left=r.helper?h.left:0),l.top<(a._helper?h.top:0)&&(a.size.height=a.size.height+(a._helper?a.position.top-h.top:a.position.top),c&&(a.size.width=a.size.height*a.aspectRatio,p=!1),a.position.top=a._helper?h.top:0),n=a.containerElement.get(0)===a.element.parent().get(0),o=/relative|absolute/.test(a.containerElement.css("position")),n&&o?(a.offset.left=a.parentData.left+a.position.left,a.offset.top=a.parentData.top+a.position.top):(a.offset.left=a.element.offset().left,a.offset.top=a.element.offset().top),i=Math.abs(a.sizeDiff.width+(a._helper?a.offset.left-u.left:a.offset.left-h.left)),s=Math.abs(a.sizeDiff.height+(a._helper?a.offset.top-u.top:a.offset.top-h.top)),i+a.size.width>=a.parentData.width&&(a.size.width=a.parentData.width-i,c&&(a.size.height=a.size.width/a.aspectRatio,p=!1)),s+a.size.height>=a.parentData.height&&(a.size.height=a.parentData.height-s,c&&(a.size.width=a.size.height*a.aspectRatio,p=!1)),p||(a.position.left=a.prevPosition.left,a.position.top=a.prevPosition.top,a.size.width=a.prevSize.width,a.size.height=a.prevSize.height)},stop:function(){var e=t(this).resizable("instance"),i=e.options,s=e.containerOffset,n=e.containerPosition,o=e.containerElement,a=t(e.helper),r=a.offset(),h=a.outerWidth()-e.sizeDiff.width,l=a.outerHeight()-e.sizeDiff.height;e._helper&&!i.animate&&/relative/.test(o.css("position"))&&t(this).css({left:r.left-n.left-s.left,width:h,height:l}),e._helper&&!i.animate&&/static/.test(o.css("position"))&&t(this).css({left:r.left-n.left-s.left,width:h,height:l})}}),t.ui.plugin.add("resizable","alsoResize",{start:function(){var e=t(this).resizable("instance"),i=e.options;t(i.alsoResize).each(function(){var e=t(this);e.data("ui-resizable-alsoresize",{width:parseFloat(e.width()),height:parseFloat(e.height()),left:parseFloat(e.css("left")),top:parseFloat(e.css("top"))})})},resize:function(e,i){var s=t(this).resizable("instance"),n=s.options,o=s.originalSize,a=s.originalPosition,r={height:s.size.height-o.height||0,width:s.size.width-o.width||0,top:s.position.top-a.top||0,left:s.position.left-a.left||0};t(n.alsoResize).each(function(){var e=t(this),s=t(this).data("ui-resizable-alsoresize"),n={},o=e.parents(i.originalElement[0]).length?["width","height"]:["width","height","top","left"];t.each(o,function(t,e){var i=(s[e]||0)+(r[e]||0);i&&i>=0&&(n[e]=i||null)}),e.css(n)})},stop:function(){t(this).removeData("ui-resizable-alsoresize")}}),t.ui.plugin.add("resizable","ghost",{start:function(){var e=t(this).resizable("instance"),i=e.size;e.ghost=e.originalElement.clone(),e.ghost.css({opacity:.25,display:"block",position:"relative",height:i.height,width:i.width,margin:0,left:0,top:0}),e._addClass(e.ghost,"ui-resizable-ghost"),t.uiBackCompat!==!1&&"string"==typeof e.options.ghost&&e.ghost.addClass(this.options.ghost),e.ghost.appendTo(e.helper)},resize:function(){var e=t(this).resizable("instance");e.ghost&&e.ghost.css({position:"relative",height:e.size.height,width:e.size.width})},stop:function(){var e=t(this).resizable("instance");e.ghost&&e.helper&&e.helper.get(0).removeChild(e.ghost.get(0))}}),t.ui.plugin.add("resizable","grid",{resize:function(){var e,i=t(this).resizable("instance"),s=i.options,n=i.size,o=i.originalSize,a=i.originalPosition,r=i.axis,h="number"==typeof s.grid?[s.grid,s.grid]:s.grid,l=h[0]||1,c=h[1]||1,u=Math.round((n.width-o.width)/l)*l,d=Math.round((n.height-o.height)/c)*c,p=o.width+u,f=o.height+d,g=s.maxWidth&&p>s.maxWidth,m=s.maxHeight&&f>s.maxHeight,_=s.minWidth&&s.minWidth>p,v=s.minHeight&&s.minHeight>f;s.grid=h,_&&(p+=l),v&&(f+=c),g&&(p-=l),m&&(f-=c),/^(se|s|e)$/.test(r)?(i.size.width=p,i.size.height=f):/^(ne)$/.test(r)?(i.size.width=p,i.size.height=f,i.position.top=a.top-d):/^(sw)$/.test(r)?(i.size.width=p,i.size.height=f,i.position.left=a.left-u):((0>=f-c||0>=p-l)&&(e=i._getPaddingPlusBorderDimensions(this)),f-c>0?(i.size.height=f,i.position.top=a.top-d):(f=c-e.height,i.size.height=f,i.position.top=a.top+o.height-f),p-l>0?(i.size.width=p,i.position.left=a.left-u):(p=l-e.width,i.size.width=p,i.position.left=a.left+o.width-p))}}),t.ui.resizable,t.widget("ui.dialog",{version:"1.12.1",options:{appendTo:"body",autoOpen:!0,buttons:[],classes:{"ui-dialog":"ui-corner-all","ui-dialog-titlebar":"ui-corner-all"},closeOnEscape:!0,closeText:"Close",draggable:!0,hide:null,height:"auto",maxHeight:null,maxWidth:null,minHeight:150,minWidth:150,modal:!1,position:{my:"center",at:"center",of:window,collision:"fit",using:function(e){var i=t(this).css(e).offset().top;0>i&&t(this).css("top",e.top-i)}},resizable:!0,show:null,title:null,width:300,beforeClose:null,close:null,drag:null,dragStart:null,dragStop:null,focus:null,open:null,resize:null,resizeStart:null,resizeStop:null},sizeRelatedOptions:{buttons:!0,height:!0,maxHeight:!0,maxWidth:!0,minHeight:!0,minWidth:!0,width:!0},resizableRelatedOptions:{maxHeight:!0,maxWidth:!0,minHeight:!0,minWidth:!0},_create:function(){this.originalCss={display:this.element[0].style.display,width:this.element[0].style.width,minHeight:this.element[0].style.minHeight,maxHeight:this.element[0].style.maxHeight,height:this.element[0].style.height},this.originalPosition={parent:this.element.parent(),index:this.element.parent().children().index(this.element)},this.originalTitle=this.element.attr("title"),null==this.options.title&&null!=this.originalTitle&&(this.options.title=this.originalTitle),this.options.disabled&&(this.options.disabled=!1),this._createWrapper(),this.element.show().removeAttr("title").appendTo(this.uiDialog),this._addClass("ui-dialog-content","ui-widget-content"),this._createTitlebar(),this._createButtonPane(),this.options.draggable&&t.fn.draggable&&this._makeDraggable(),this.options.resizable&&t.fn.resizable&&this._makeResizable(),this._isOpen=!1,this._trackFocus()},_init:function(){this.options.autoOpen&&this.open()},_appendTo:function(){var e=this.options.appendTo;return e&&(e.jquery||e.nodeType)?t(e):this.document.find(e||"body").eq(0)},_destroy:function(){var t,e=this.originalPosition;this._untrackInstance(),this._destroyOverlay(),this.element.removeUniqueId().css(this.originalCss).detach(),this.uiDialog.remove(),this.originalTitle&&this.element.attr("title",this.originalTitle),t=e.parent.children().eq(e.index),t.length&&t[0]!==this.element[0]?t.before(this.element):e.parent.append(this.element)},widget:function(){return this.uiDialog
-},disable:t.noop,enable:t.noop,close:function(e){var i=this;this._isOpen&&this._trigger("beforeClose",e)!==!1&&(this._isOpen=!1,this._focusedElement=null,this._destroyOverlay(),this._untrackInstance(),this.opener.filter(":focusable").trigger("focus").length||t.ui.safeBlur(t.ui.safeActiveElement(this.document[0])),this._hide(this.uiDialog,this.options.hide,function(){i._trigger("close",e)}))},isOpen:function(){return this._isOpen},moveToTop:function(){this._moveToTop()},_moveToTop:function(e,i){var s=!1,n=this.uiDialog.siblings(".ui-front:visible").map(function(){return+t(this).css("z-index")}).get(),o=Math.max.apply(null,n);return o>=+this.uiDialog.css("z-index")&&(this.uiDialog.css("z-index",o+1),s=!0),s&&!i&&this._trigger("focus",e),s},open:function(){var e=this;return this._isOpen?(this._moveToTop()&&this._focusTabbable(),void 0):(this._isOpen=!0,this.opener=t(t.ui.safeActiveElement(this.document[0])),this._size(),this._position(),this._createOverlay(),this._moveToTop(null,!0),this.overlay&&this.overlay.css("z-index",this.uiDialog.css("z-index")-1),this._show(this.uiDialog,this.options.show,function(){e._focusTabbable(),e._trigger("focus")}),this._makeFocusTarget(),this._trigger("open"),void 0)},_focusTabbable:function(){var t=this._focusedElement;t||(t=this.element.find("[autofocus]")),t.length||(t=this.element.find(":tabbable")),t.length||(t=this.uiDialogButtonPane.find(":tabbable")),t.length||(t=this.uiDialogTitlebarClose.filter(":tabbable")),t.length||(t=this.uiDialog),t.eq(0).trigger("focus")},_keepFocus:function(e){function i(){var e=t.ui.safeActiveElement(this.document[0]),i=this.uiDialog[0]===e||t.contains(this.uiDialog[0],e);i||this._focusTabbable()}e.preventDefault(),i.call(this),this._delay(i)},_createWrapper:function(){this.uiDialog=t("<div>").hide().attr({tabIndex:-1,role:"dialog"}).appendTo(this._appendTo()),this._addClass(this.uiDialog,"ui-dialog","ui-widget ui-widget-content ui-front"),this._on(this.uiDialog,{keydown:function(e){if(this.options.closeOnEscape&&!e.isDefaultPrevented()&&e.keyCode&&e.keyCode===t.ui.keyCode.ESCAPE)return e.preventDefault(),this.close(e),void 0;if(e.keyCode===t.ui.keyCode.TAB&&!e.isDefaultPrevented()){var i=this.uiDialog.find(":tabbable"),s=i.filter(":first"),n=i.filter(":last");e.target!==n[0]&&e.target!==this.uiDialog[0]||e.shiftKey?e.target!==s[0]&&e.target!==this.uiDialog[0]||!e.shiftKey||(this._delay(function(){n.trigger("focus")}),e.preventDefault()):(this._delay(function(){s.trigger("focus")}),e.preventDefault())}},mousedown:function(t){this._moveToTop(t)&&this._focusTabbable()}}),this.element.find("[aria-describedby]").length||this.uiDialog.attr({"aria-describedby":this.element.uniqueId().attr("id")})},_createTitlebar:function(){var e;this.uiDialogTitlebar=t("<div>"),this._addClass(this.uiDialogTitlebar,"ui-dialog-titlebar","ui-widget-header ui-helper-clearfix"),this._on(this.uiDialogTitlebar,{mousedown:function(e){t(e.target).closest(".ui-dialog-titlebar-close")||this.uiDialog.trigger("focus")}}),this.uiDialogTitlebarClose=t("<button type='button'></button>").button({label:t("<a>").text(this.options.closeText).html(),icon:"ui-icon-closethick",showLabel:!1}).appendTo(this.uiDialogTitlebar),this._addClass(this.uiDialogTitlebarClose,"ui-dialog-titlebar-close"),this._on(this.uiDialogTitlebarClose,{click:function(t){t.preventDefault(),this.close(t)}}),e=t("<span>").uniqueId().prependTo(this.uiDialogTitlebar),this._addClass(e,"ui-dialog-title"),this._title(e),this.uiDialogTitlebar.prependTo(this.uiDialog),this.uiDialog.attr({"aria-labelledby":e.attr("id")})},_title:function(t){this.options.title?t.text(this.options.title):t.html("&#160;")},_createButtonPane:function(){this.uiDialogButtonPane=t("<div>"),this._addClass(this.uiDialogButtonPane,"ui-dialog-buttonpane","ui-widget-content ui-helper-clearfix"),this.uiButtonSet=t("<div>").appendTo(this.uiDialogButtonPane),this._addClass(this.uiButtonSet,"ui-dialog-buttonset"),this._createButtons()},_createButtons:function(){var e=this,i=this.options.buttons;return this.uiDialogButtonPane.remove(),this.uiButtonSet.empty(),t.isEmptyObject(i)||t.isArray(i)&&!i.length?(this._removeClass(this.uiDialog,"ui-dialog-buttons"),void 0):(t.each(i,function(i,s){var n,o;s=t.isFunction(s)?{click:s,text:i}:s,s=t.extend({type:"button"},s),n=s.click,o={icon:s.icon,iconPosition:s.iconPosition,showLabel:s.showLabel,icons:s.icons,text:s.text},delete s.click,delete s.icon,delete s.iconPosition,delete s.showLabel,delete s.icons,"boolean"==typeof s.text&&delete s.text,t("<button></button>",s).button(o).appendTo(e.uiButtonSet).on("click",function(){n.apply(e.element[0],arguments)})}),this._addClass(this.uiDialog,"ui-dialog-buttons"),this.uiDialogButtonPane.appendTo(this.uiDialog),void 0)},_makeDraggable:function(){function e(t){return{position:t.position,offset:t.offset}}var i=this,s=this.options;this.uiDialog.draggable({cancel:".ui-dialog-content, .ui-dialog-titlebar-close",handle:".ui-dialog-titlebar",containment:"document",start:function(s,n){i._addClass(t(this),"ui-dialog-dragging"),i._blockFrames(),i._trigger("dragStart",s,e(n))},drag:function(t,s){i._trigger("drag",t,e(s))},stop:function(n,o){var a=o.offset.left-i.document.scrollLeft(),r=o.offset.top-i.document.scrollTop();s.position={my:"left top",at:"left"+(a>=0?"+":"")+a+" "+"top"+(r>=0?"+":"")+r,of:i.window},i._removeClass(t(this),"ui-dialog-dragging"),i._unblockFrames(),i._trigger("dragStop",n,e(o))}})},_makeResizable:function(){function e(t){return{originalPosition:t.originalPosition,originalSize:t.originalSize,position:t.position,size:t.size}}var i=this,s=this.options,n=s.resizable,o=this.uiDialog.css("position"),a="string"==typeof n?n:"n,e,s,w,se,sw,ne,nw";this.uiDialog.resizable({cancel:".ui-dialog-content",containment:"document",alsoResize:this.element,maxWidth:s.maxWidth,maxHeight:s.maxHeight,minWidth:s.minWidth,minHeight:this._minHeight(),handles:a,start:function(s,n){i._addClass(t(this),"ui-dialog-resizing"),i._blockFrames(),i._trigger("resizeStart",s,e(n))},resize:function(t,s){i._trigger("resize",t,e(s))},stop:function(n,o){var a=i.uiDialog.offset(),r=a.left-i.document.scrollLeft(),h=a.top-i.document.scrollTop();s.height=i.uiDialog.height(),s.width=i.uiDialog.width(),s.position={my:"left top",at:"left"+(r>=0?"+":"")+r+" "+"top"+(h>=0?"+":"")+h,of:i.window},i._removeClass(t(this),"ui-dialog-resizing"),i._unblockFrames(),i._trigger("resizeStop",n,e(o))}}).css("position",o)},_trackFocus:function(){this._on(this.widget(),{focusin:function(e){this._makeFocusTarget(),this._focusedElement=t(e.target)}})},_makeFocusTarget:function(){this._untrackInstance(),this._trackingInstances().unshift(this)},_untrackInstance:function(){var e=this._trackingInstances(),i=t.inArray(this,e);-1!==i&&e.splice(i,1)},_trackingInstances:function(){var t=this.document.data("ui-dialog-instances");return t||(t=[],this.document.data("ui-dialog-instances",t)),t},_minHeight:function(){var t=this.options;return"auto"===t.height?t.minHeight:Math.min(t.minHeight,t.height)},_position:function(){var t=this.uiDialog.is(":visible");t||this.uiDialog.show(),this.uiDialog.position(this.options.position),t||this.uiDialog.hide()},_setOptions:function(e){var i=this,s=!1,n={};t.each(e,function(t,e){i._setOption(t,e),t in i.sizeRelatedOptions&&(s=!0),t in i.resizableRelatedOptions&&(n[t]=e)}),s&&(this._size(),this._position()),this.uiDialog.is(":data(ui-resizable)")&&this.uiDialog.resizable("option",n)},_setOption:function(e,i){var s,n,o=this.uiDialog;"disabled"!==e&&(this._super(e,i),"appendTo"===e&&this.uiDialog.appendTo(this._appendTo()),"buttons"===e&&this._createButtons(),"closeText"===e&&this.uiDialogTitlebarClose.button({label:t("<a>").text(""+this.options.closeText).html()}),"draggable"===e&&(s=o.is(":data(ui-draggable)"),s&&!i&&o.draggable("destroy"),!s&&i&&this._makeDraggable()),"position"===e&&this._position(),"resizable"===e&&(n=o.is(":data(ui-resizable)"),n&&!i&&o.resizable("destroy"),n&&"string"==typeof i&&o.resizable("option","handles",i),n||i===!1||this._makeResizable()),"title"===e&&this._title(this.uiDialogTitlebar.find(".ui-dialog-title")))},_size:function(){var t,e,i,s=this.options;this.element.show().css({width:"auto",minHeight:0,maxHeight:"none",height:0}),s.minWidth>s.width&&(s.width=s.minWidth),t=this.uiDialog.css({height:"auto",width:s.width}).outerHeight(),e=Math.max(0,s.minHeight-t),i="number"==typeof s.maxHeight?Math.max(0,s.maxHeight-t):"none","auto"===s.height?this.element.css({minHeight:e,maxHeight:i,height:"auto"}):this.element.height(Math.max(0,s.height-t)),this.uiDialog.is(":data(ui-resizable)")&&this.uiDialog.resizable("option","minHeight",this._minHeight())},_blockFrames:function(){this.iframeBlocks=this.document.find("iframe").map(function(){var e=t(this);return t("<div>").css({position:"absolute",width:e.outerWidth(),height:e.outerHeight()}).appendTo(e.parent()).offset(e.offset())[0]})},_unblockFrames:function(){this.iframeBlocks&&(this.iframeBlocks.remove(),delete this.iframeBlocks)},_allowInteraction:function(e){return t(e.target).closest(".ui-dialog").length?!0:!!t(e.target).closest(".ui-datepicker").length},_createOverlay:function(){if(this.options.modal){var e=!0;this._delay(function(){e=!1}),this.document.data("ui-dialog-overlays")||this._on(this.document,{focusin:function(t){e||this._allowInteraction(t)||(t.preventDefault(),this._trackingInstances()[0]._focusTabbable())}}),this.overlay=t("<div>").appendTo(this._appendTo()),this._addClass(this.overlay,null,"ui-widget-overlay ui-front"),this._on(this.overlay,{mousedown:"_keepFocus"}),this.document.data("ui-dialog-overlays",(this.document.data("ui-dialog-overlays")||0)+1)}},_destroyOverlay:function(){if(this.options.modal&&this.overlay){var t=this.document.data("ui-dialog-overlays")-1;t?this.document.data("ui-dialog-overlays",t):(this._off(this.document,"focusin"),this.document.removeData("ui-dialog-overlays")),this.overlay.remove(),this.overlay=null}}}),t.uiBackCompat!==!1&&t.widget("ui.dialog",t.ui.dialog,{options:{dialogClass:""},_createWrapper:function(){this._super(),this.uiDialog.addClass(this.options.dialogClass)},_setOption:function(t,e){"dialogClass"===t&&this.uiDialog.removeClass(this.options.dialogClass).addClass(e),this._superApply(arguments)}}),t.ui.dialog,t.widget("ui.droppable",{version:"1.12.1",widgetEventPrefix:"drop",options:{accept:"*",addClasses:!0,greedy:!1,scope:"default",tolerance:"intersect",activate:null,deactivate:null,drop:null,out:null,over:null},_create:function(){var e,i=this.options,s=i.accept;this.isover=!1,this.isout=!0,this.accept=t.isFunction(s)?s:function(t){return t.is(s)},this.proportions=function(){return arguments.length?(e=arguments[0],void 0):e?e:e={width:this.element[0].offsetWidth,height:this.element[0].offsetHeight}},this._addToManager(i.scope),i.addClasses&&this._addClass("ui-droppable")},_addToManager:function(e){t.ui.ddmanager.droppables[e]=t.ui.ddmanager.droppables[e]||[],t.ui.ddmanager.droppables[e].push(this)},_splice:function(t){for(var e=0;t.length>e;e++)t[e]===this&&t.splice(e,1)},_destroy:function(){var e=t.ui.ddmanager.droppables[this.options.scope];this._splice(e)},_setOption:function(e,i){if("accept"===e)this.accept=t.isFunction(i)?i:function(t){return t.is(i)};else if("scope"===e){var s=t.ui.ddmanager.droppables[this.options.scope];this._splice(s),this._addToManager(i)}this._super(e,i)},_activate:function(e){var i=t.ui.ddmanager.current;this._addActiveClass(),i&&this._trigger("activate",e,this.ui(i))},_deactivate:function(e){var i=t.ui.ddmanager.current;this._removeActiveClass(),i&&this._trigger("deactivate",e,this.ui(i))},_over:function(e){var i=t.ui.ddmanager.current;i&&(i.currentItem||i.element)[0]!==this.element[0]&&this.accept.call(this.element[0],i.currentItem||i.element)&&(this._addHoverClass(),this._trigger("over",e,this.ui(i)))},_out:function(e){var i=t.ui.ddmanager.current;i&&(i.currentItem||i.element)[0]!==this.element[0]&&this.accept.call(this.element[0],i.currentItem||i.element)&&(this._removeHoverClass(),this._trigger("out",e,this.ui(i)))},_drop:function(e,i){var s=i||t.ui.ddmanager.current,n=!1;return s&&(s.currentItem||s.element)[0]!==this.element[0]?(this.element.find(":data(ui-droppable)").not(".ui-draggable-dragging").each(function(){var i=t(this).droppable("instance");return i.options.greedy&&!i.options.disabled&&i.options.scope===s.options.scope&&i.accept.call(i.element[0],s.currentItem||s.element)&&v(s,t.extend(i,{offset:i.element.offset()}),i.options.tolerance,e)?(n=!0,!1):void 0}),n?!1:this.accept.call(this.element[0],s.currentItem||s.element)?(this._removeActiveClass(),this._removeHoverClass(),this._trigger("drop",e,this.ui(s)),this.element):!1):!1},ui:function(t){return{draggable:t.currentItem||t.element,helper:t.helper,position:t.position,offset:t.positionAbs}},_addHoverClass:function(){this._addClass("ui-droppable-hover")},_removeHoverClass:function(){this._removeClass("ui-droppable-hover")},_addActiveClass:function(){this._addClass("ui-droppable-active")},_removeActiveClass:function(){this._removeClass("ui-droppable-active")}});var v=t.ui.intersect=function(){function t(t,e,i){return t>=e&&e+i>t}return function(e,i,s,n){if(!i.offset)return!1;var o=(e.positionAbs||e.position.absolute).left+e.margins.left,a=(e.positionAbs||e.position.absolute).top+e.margins.top,r=o+e.helperProportions.width,h=a+e.helperProportions.height,l=i.offset.left,c=i.offset.top,u=l+i.proportions().width,d=c+i.proportions().height;switch(s){case"fit":return o>=l&&u>=r&&a>=c&&d>=h;case"intersect":return o+e.helperProportions.width/2>l&&u>r-e.helperProportions.width/2&&a+e.helperProportions.height/2>c&&d>h-e.helperProportions.height/2;case"pointer":return t(n.pageY,c,i.proportions().height)&&t(n.pageX,l,i.proportions().width);case"touch":return(a>=c&&d>=a||h>=c&&d>=h||c>a&&h>d)&&(o>=l&&u>=o||r>=l&&u>=r||l>o&&r>u);default:return!1}}}();t.ui.ddmanager={current:null,droppables:{"default":[]},prepareOffsets:function(e,i){var s,n,o=t.ui.ddmanager.droppables[e.options.scope]||[],a=i?i.type:null,r=(e.currentItem||e.element).find(":data(ui-droppable)").addBack();t:for(s=0;o.length>s;s++)if(!(o[s].options.disabled||e&&!o[s].accept.call(o[s].element[0],e.currentItem||e.element))){for(n=0;r.length>n;n++)if(r[n]===o[s].element[0]){o[s].proportions().height=0;continue t}o[s].visible="none"!==o[s].element.css("display"),o[s].visible&&("mousedown"===a&&o[s]._activate.call(o[s],i),o[s].offset=o[s].element.offset(),o[s].proportions({width:o[s].element[0].offsetWidth,height:o[s].element[0].offsetHeight}))}},drop:function(e,i){var s=!1;return t.each((t.ui.ddmanager.droppables[e.options.scope]||[]).slice(),function(){this.options&&(!this.options.disabled&&this.visible&&v(e,this,this.options.tolerance,i)&&(s=this._drop.call(this,i)||s),!this.options.disabled&&this.visible&&this.accept.call(this.element[0],e.currentItem||e.element)&&(this.isout=!0,this.isover=!1,this._deactivate.call(this,i)))}),s},dragStart:function(e,i){e.element.parentsUntil("body").on("scroll.droppable",function(){e.options.refreshPositions||t.ui.ddmanager.prepareOffsets(e,i)})},drag:function(e,i){e.options.refreshPositions&&t.ui.ddmanager.prepareOffsets(e,i),t.each(t.ui.ddmanager.droppables[e.options.scope]||[],function(){if(!this.options.disabled&&!this.greedyChild&&this.visible){var s,n,o,a=v(e,this,this.options.tolerance,i),r=!a&&this.isover?"isout":a&&!this.isover?"isover":null;r&&(this.options.greedy&&(n=this.options.scope,o=this.element.parents(":data(ui-droppable)").filter(function(){return t(this).droppable("instance").options.scope===n}),o.length&&(s=t(o[0]).droppable("instance"),s.greedyChild="isover"===r)),s&&"isover"===r&&(s.isover=!1,s.isout=!0,s._out.call(s,i)),this[r]=!0,this["isout"===r?"isover":"isout"]=!1,this["isover"===r?"_over":"_out"].call(this,i),s&&"isout"===r&&(s.isout=!1,s.isover=!0,s._over.call(s,i)))}})},dragStop:function(e,i){e.element.parentsUntil("body").off("scroll.droppable"),e.options.refreshPositions||t.ui.ddmanager.prepareOffsets(e,i)}},t.uiBackCompat!==!1&&t.widget("ui.droppable",t.ui.droppable,{options:{hoverClass:!1,activeClass:!1},_addActiveClass:function(){this._super(),this.options.activeClass&&this.element.addClass(this.options.activeClass)},_removeActiveClass:function(){this._super(),this.options.activeClass&&this.element.removeClass(this.options.activeClass)},_addHoverClass:function(){this._super(),this.options.hoverClass&&this.element.addClass(this.options.hoverClass)},_removeHoverClass:function(){this._super(),this.options.hoverClass&&this.element.removeClass(this.options.hoverClass)}}),t.ui.droppable,t.widget("ui.progressbar",{version:"1.12.1",options:{classes:{"ui-progressbar":"ui-corner-all","ui-progressbar-value":"ui-corner-left","ui-progressbar-complete":"ui-corner-right"},max:100,value:0,change:null,complete:null},min:0,_create:function(){this.oldValue=this.options.value=this._constrainedValue(),this.element.attr({role:"progressbar","aria-valuemin":this.min}),this._addClass("ui-progressbar","ui-widget ui-widget-content"),this.valueDiv=t("<div>").appendTo(this.element),this._addClass(this.valueDiv,"ui-progressbar-value","ui-widget-header"),this._refreshValue()},_destroy:function(){this.element.removeAttr("role aria-valuemin aria-valuemax aria-valuenow"),this.valueDiv.remove()},value:function(t){return void 0===t?this.options.value:(this.options.value=this._constrainedValue(t),this._refreshValue(),void 0)},_constrainedValue:function(t){return void 0===t&&(t=this.options.value),this.indeterminate=t===!1,"number"!=typeof t&&(t=0),this.indeterminate?!1:Math.min(this.options.max,Math.max(this.min,t))},_setOptions:function(t){var e=t.value;delete t.value,this._super(t),this.options.value=this._constrainedValue(e),this._refreshValue()},_setOption:function(t,e){"max"===t&&(e=Math.max(this.min,e)),this._super(t,e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t),this._toggleClass(null,"ui-state-disabled",!!t)},_percentage:function(){return this.indeterminate?100:100*(this.options.value-this.min)/(this.options.max-this.min)},_refreshValue:function(){var e=this.options.value,i=this._percentage();this.valueDiv.toggle(this.indeterminate||e>this.min).width(i.toFixed(0)+"%"),this._toggleClass(this.valueDiv,"ui-progressbar-complete",null,e===this.options.max)._toggleClass("ui-progressbar-indeterminate",null,this.indeterminate),this.indeterminate?(this.element.removeAttr("aria-valuenow"),this.overlayDiv||(this.overlayDiv=t("<div>").appendTo(this.valueDiv),this._addClass(this.overlayDiv,"ui-progressbar-overlay"))):(this.element.attr({"aria-valuemax":this.options.max,"aria-valuenow":e}),this.overlayDiv&&(this.overlayDiv.remove(),this.overlayDiv=null)),this.oldValue!==e&&(this.oldValue=e,this._trigger("change")),e===this.options.max&&this._trigger("complete")}}),t.widget("ui.selectable",t.ui.mouse,{version:"1.12.1",options:{appendTo:"body",autoRefresh:!0,distance:0,filter:"*",tolerance:"touch",selected:null,selecting:null,start:null,stop:null,unselected:null,unselecting:null},_create:function(){var e=this;this._addClass("ui-selectable"),this.dragged=!1,this.refresh=function(){e.elementPos=t(e.element[0]).offset(),e.selectees=t(e.options.filter,e.element[0]),e._addClass(e.selectees,"ui-selectee"),e.selectees.each(function(){var i=t(this),s=i.offset(),n={left:s.left-e.elementPos.left,top:s.top-e.elementPos.top};t.data(this,"selectable-item",{element:this,$element:i,left:n.left,top:n.top,right:n.left+i.outerWidth(),bottom:n.top+i.outerHeight(),startselected:!1,selected:i.hasClass("ui-selected"),selecting:i.hasClass("ui-selecting"),unselecting:i.hasClass("ui-unselecting")})})},this.refresh(),this._mouseInit(),this.helper=t("<div>"),this._addClass(this.helper,"ui-selectable-helper")},_destroy:function(){this.selectees.removeData("selectable-item"),this._mouseDestroy()},_mouseStart:function(e){var i=this,s=this.options;this.opos=[e.pageX,e.pageY],this.elementPos=t(this.element[0]).offset(),this.options.disabled||(this.selectees=t(s.filter,this.element[0]),this._trigger("start",e),t(s.appendTo).append(this.helper),this.helper.css({left:e.pageX,top:e.pageY,width:0,height:0}),s.autoRefresh&&this.refresh(),this.selectees.filter(".ui-selected").each(function(){var s=t.data(this,"selectable-item");s.startselected=!0,e.metaKey||e.ctrlKey||(i._removeClass(s.$element,"ui-selected"),s.selected=!1,i._addClass(s.$element,"ui-unselecting"),s.unselecting=!0,i._trigger("unselecting",e,{unselecting:s.element}))}),t(e.target).parents().addBack().each(function(){var s,n=t.data(this,"selectable-item");return n?(s=!e.metaKey&&!e.ctrlKey||!n.$element.hasClass("ui-selected"),i._removeClass(n.$element,s?"ui-unselecting":"ui-selected")._addClass(n.$element,s?"ui-selecting":"ui-unselecting"),n.unselecting=!s,n.selecting=s,n.selected=s,s?i._trigger("selecting",e,{selecting:n.element}):i._trigger("unselecting",e,{unselecting:n.element}),!1):void 0}))},_mouseDrag:function(e){if(this.dragged=!0,!this.options.disabled){var i,s=this,n=this.options,o=this.opos[0],a=this.opos[1],r=e.pageX,h=e.pageY;return o>r&&(i=r,r=o,o=i),a>h&&(i=h,h=a,a=i),this.helper.css({left:o,top:a,width:r-o,height:h-a}),this.selectees.each(function(){var i=t.data(this,"selectable-item"),l=!1,c={};i&&i.element!==s.element[0]&&(c.left=i.left+s.elementPos.left,c.right=i.right+s.elementPos.left,c.top=i.top+s.elementPos.top,c.bottom=i.bottom+s.elementPos.top,"touch"===n.tolerance?l=!(c.left>r||o>c.right||c.top>h||a>c.bottom):"fit"===n.tolerance&&(l=c.left>o&&r>c.right&&c.top>a&&h>c.bottom),l?(i.selected&&(s._removeClass(i.$element,"ui-selected"),i.selected=!1),i.unselecting&&(s._removeClass(i.$element,"ui-unselecting"),i.unselecting=!1),i.selecting||(s._addClass(i.$element,"ui-selecting"),i.selecting=!0,s._trigger("selecting",e,{selecting:i.element}))):(i.selecting&&((e.metaKey||e.ctrlKey)&&i.startselected?(s._removeClass(i.$element,"ui-selecting"),i.selecting=!1,s._addClass(i.$element,"ui-selected"),i.selected=!0):(s._removeClass(i.$element,"ui-selecting"),i.selecting=!1,i.startselected&&(s._addClass(i.$element,"ui-unselecting"),i.unselecting=!0),s._trigger("unselecting",e,{unselecting:i.element}))),i.selected&&(e.metaKey||e.ctrlKey||i.startselected||(s._removeClass(i.$element,"ui-selected"),i.selected=!1,s._addClass(i.$element,"ui-unselecting"),i.unselecting=!0,s._trigger("unselecting",e,{unselecting:i.element})))))}),!1}},_mouseStop:function(e){var i=this;return this.dragged=!1,t(".ui-unselecting",this.element[0]).each(function(){var s=t.data(this,"selectable-item");i._removeClass(s.$element,"ui-unselecting"),s.unselecting=!1,s.startselected=!1,i._trigger("unselected",e,{unselected:s.element})}),t(".ui-selecting",this.element[0]).each(function(){var s=t.data(this,"selectable-item");i._removeClass(s.$element,"ui-selecting")._addClass(s.$element,"ui-selected"),s.selecting=!1,s.selected=!0,s.startselected=!0,i._trigger("selected",e,{selected:s.element})}),this._trigger("stop",e),this.helper.remove(),!1}}),t.widget("ui.selectmenu",[t.ui.formResetMixin,{version:"1.12.1",defaultElement:"<select>",options:{appendTo:null,classes:{"ui-selectmenu-button-open":"ui-corner-top","ui-selectmenu-button-closed":"ui-corner-all"},disabled:null,icons:{button:"ui-icon-triangle-1-s"},position:{my:"left top",at:"left bottom",collision:"none"},width:!1,change:null,close:null,focus:null,open:null,select:null},_create:function(){var e=this.element.uniqueId().attr("id");this.ids={element:e,button:e+"-button",menu:e+"-menu"},this._drawButton(),this._drawMenu(),this._bindFormResetHandler(),this._rendered=!1,this.menuItems=t()},_drawButton:function(){var e,i=this,s=this._parseOption(this.element.find("option:selected"),this.element[0].selectedIndex);this.labels=this.element.labels().attr("for",this.ids.button),this._on(this.labels,{click:function(t){this.button.focus(),t.preventDefault()}}),this.element.hide(),this.button=t("<span>",{tabindex:this.options.disabled?-1:0,id:this.ids.button,role:"combobox","aria-expanded":"false","aria-autocomplete":"list","aria-owns":this.ids.menu,"aria-haspopup":"true",title:this.element.attr("title")}).insertAfter(this.element),this._addClass(this.button,"ui-selectmenu-button ui-selectmenu-button-closed","ui-button ui-widget"),e=t("<span>").appendTo(this.button),this._addClass(e,"ui-selectmenu-icon","ui-icon "+this.options.icons.button),this.buttonItem=this._renderButtonItem(s).appendTo(this.button),this.options.width!==!1&&this._resizeButton(),this._on(this.button,this._buttonEvents),this.button.one("focusin",function(){i._rendered||i._refreshMenu()})},_drawMenu:function(){var e=this;this.menu=t("<ul>",{"aria-hidden":"true","aria-labelledby":this.ids.button,id:this.ids.menu}),this.menuWrap=t("<div>").append(this.menu),this._addClass(this.menuWrap,"ui-selectmenu-menu","ui-front"),this.menuWrap.appendTo(this._appendTo()),this.menuInstance=this.menu.menu({classes:{"ui-menu":"ui-corner-bottom"},role:"listbox",select:function(t,i){t.preventDefault(),e._setSelection(),e._select(i.item.data("ui-selectmenu-item"),t)},focus:function(t,i){var s=i.item.data("ui-selectmenu-item");null!=e.focusIndex&&s.index!==e.focusIndex&&(e._trigger("focus",t,{item:s}),e.isOpen||e._select(s,t)),e.focusIndex=s.index,e.button.attr("aria-activedescendant",e.menuItems.eq(s.index).attr("id"))}}).menu("instance"),this.menuInstance._off(this.menu,"mouseleave"),this.menuInstance._closeOnDocumentClick=function(){return!1},this.menuInstance._isDivider=function(){return!1}},refresh:function(){this._refreshMenu(),this.buttonItem.replaceWith(this.buttonItem=this._renderButtonItem(this._getSelectedItem().data("ui-selectmenu-item")||{})),null===this.options.width&&this._resizeButton()},_refreshMenu:function(){var t,e=this.element.find("option");this.menu.empty(),this._parseOptions(e),this._renderMenu(this.menu,this.items),this.menuInstance.refresh(),this.menuItems=this.menu.find("li").not(".ui-selectmenu-optgroup").find(".ui-menu-item-wrapper"),this._rendered=!0,e.length&&(t=this._getSelectedItem(),this.menuInstance.focus(null,t),this._setAria(t.data("ui-selectmenu-item")),this._setOption("disabled",this.element.prop("disabled")))},open:function(t){this.options.disabled||(this._rendered?(this._removeClass(this.menu.find(".ui-state-active"),null,"ui-state-active"),this.menuInstance.focus(null,this._getSelectedItem())):this._refreshMenu(),this.menuItems.length&&(this.isOpen=!0,this._toggleAttr(),this._resizeMenu(),this._position(),this._on(this.document,this._documentClick),this._trigger("open",t)))},_position:function(){this.menuWrap.position(t.extend({of:this.button},this.options.position))},close:function(t){this.isOpen&&(this.isOpen=!1,this._toggleAttr(),this.range=null,this._off(this.document),this._trigger("close",t))},widget:function(){return this.button},menuWidget:function(){return this.menu},_renderButtonItem:function(e){var i=t("<span>");return this._setText(i,e.label),this._addClass(i,"ui-selectmenu-text"),i},_renderMenu:function(e,i){var s=this,n="";t.each(i,function(i,o){var a;o.optgroup!==n&&(a=t("<li>",{text:o.optgroup}),s._addClass(a,"ui-selectmenu-optgroup","ui-menu-divider"+(o.element.parent("optgroup").prop("disabled")?" ui-state-disabled":"")),a.appendTo(e),n=o.optgroup),s._renderItemData(e,o)})},_renderItemData:function(t,e){return this._renderItem(t,e).data("ui-selectmenu-item",e)},_renderItem:function(e,i){var s=t("<li>"),n=t("<div>",{title:i.element.attr("title")});return i.disabled&&this._addClass(s,null,"ui-state-disabled"),this._setText(n,i.label),s.append(n).appendTo(e)},_setText:function(t,e){e?t.text(e):t.html("&#160;")},_move:function(t,e){var i,s,n=".ui-menu-item";this.isOpen?i=this.menuItems.eq(this.focusIndex).parent("li"):(i=this.menuItems.eq(this.element[0].selectedIndex).parent("li"),n+=":not(.ui-state-disabled)"),s="first"===t||"last"===t?i["first"===t?"prevAll":"nextAll"](n).eq(-1):i[t+"All"](n).eq(0),s.length&&this.menuInstance.focus(e,s)},_getSelectedItem:function(){return this.menuItems.eq(this.element[0].selectedIndex).parent("li")},_toggle:function(t){this[this.isOpen?"close":"open"](t)},_setSelection:function(){var t;this.range&&(window.getSelection?(t=window.getSelection(),t.removeAllRanges(),t.addRange(this.range)):this.range.select(),this.button.focus())},_documentClick:{mousedown:function(e){this.isOpen&&(t(e.target).closest(".ui-selectmenu-menu, #"+t.ui.escapeSelector(this.ids.button)).length||this.close(e))}},_buttonEvents:{mousedown:function(){var t;window.getSelection?(t=window.getSelection(),t.rangeCount&&(this.range=t.getRangeAt(0))):this.range=document.selection.createRange()},click:function(t){this._setSelection(),this._toggle(t)},keydown:function(e){var i=!0;switch(e.keyCode){case t.ui.keyCode.TAB:case t.ui.keyCode.ESCAPE:this.close(e),i=!1;break;case t.ui.keyCode.ENTER:this.isOpen&&this._selectFocusedItem(e);break;case t.ui.keyCode.UP:e.altKey?this._toggle(e):this._move("prev",e);break;case t.ui.keyCode.DOWN:e.altKey?this._toggle(e):this._move("next",e);break;case t.ui.keyCode.SPACE:this.isOpen?this._selectFocusedItem(e):this._toggle(e);break;case t.ui.keyCode.LEFT:this._move("prev",e);break;case t.ui.keyCode.RIGHT:this._move("next",e);break;case t.ui.keyCode.HOME:case t.ui.keyCode.PAGE_UP:this._move("first",e);break;case t.ui.keyCode.END:case t.ui.keyCode.PAGE_DOWN:this._move("last",e);break;default:this.menu.trigger(e),i=!1}i&&e.preventDefault()}},_selectFocusedItem:function(t){var e=this.menuItems.eq(this.focusIndex).parent("li");e.hasClass("ui-state-disabled")||this._select(e.data("ui-selectmenu-item"),t)},_select:function(t,e){var i=this.element[0].selectedIndex;this.element[0].selectedIndex=t.index,this.buttonItem.replaceWith(this.buttonItem=this._renderButtonItem(t)),this._setAria(t),this._trigger("select",e,{item:t}),t.index!==i&&this._trigger("change",e,{item:t}),this.close(e)},_setAria:function(t){var e=this.menuItems.eq(t.index).attr("id");this.button.attr({"aria-labelledby":e,"aria-activedescendant":e}),this.menu.attr("aria-activedescendant",e)},_setOption:function(t,e){if("icons"===t){var i=this.button.find("span.ui-icon");this._removeClass(i,null,this.options.icons.button)._addClass(i,null,e.button)}this._super(t,e),"appendTo"===t&&this.menuWrap.appendTo(this._appendTo()),"width"===t&&this._resizeButton()},_setOptionDisabled:function(t){this._super(t),this.menuInstance.option("disabled",t),this.button.attr("aria-disabled",t),this._toggleClass(this.button,null,"ui-state-disabled",t),this.element.prop("disabled",t),t?(this.button.attr("tabindex",-1),this.close()):this.button.attr("tabindex",0)},_appendTo:function(){var e=this.options.appendTo;return e&&(e=e.jquery||e.nodeType?t(e):this.document.find(e).eq(0)),e&&e[0]||(e=this.element.closest(".ui-front, dialog")),e.length||(e=this.document[0].body),e},_toggleAttr:function(){this.button.attr("aria-expanded",this.isOpen),this._removeClass(this.button,"ui-selectmenu-button-"+(this.isOpen?"closed":"open"))._addClass(this.button,"ui-selectmenu-button-"+(this.isOpen?"open":"closed"))._toggleClass(this.menuWrap,"ui-selectmenu-open",null,this.isOpen),this.menu.attr("aria-hidden",!this.isOpen)},_resizeButton:function(){var t=this.options.width;return t===!1?(this.button.css("width",""),void 0):(null===t&&(t=this.element.show().outerWidth(),this.element.hide()),this.button.outerWidth(t),void 0)},_resizeMenu:function(){this.menu.outerWidth(Math.max(this.button.outerWidth(),this.menu.width("").outerWidth()+1))},_getCreateOptions:function(){var t=this._super();return t.disabled=this.element.prop("disabled"),t},_parseOptions:function(e){var i=this,s=[];e.each(function(e,n){s.push(i._parseOption(t(n),e))}),this.items=s},_parseOption:function(t,e){var i=t.parent("optgroup");return{element:t,index:e,value:t.val(),label:t.text(),optgroup:i.attr("label")||"",disabled:i.prop("disabled")||t.prop("disabled")}},_destroy:function(){this._unbindFormResetHandler(),this.menuWrap.remove(),this.button.remove(),this.element.show(),this.element.removeUniqueId(),this.labels.attr("for",this.ids.element)}}]),t.widget("ui.slider",t.ui.mouse,{version:"1.12.1",widgetEventPrefix:"slide",options:{animate:!1,classes:{"ui-slider":"ui-corner-all","ui-slider-handle":"ui-corner-all","ui-slider-range":"ui-corner-all ui-widget-header"},distance:0,max:100,min:0,orientation:"horizontal",range:!1,step:1,value:0,values:null,change:null,slide:null,start:null,stop:null},numPages:5,_create:function(){this._keySliding=!1,this._mouseSliding=!1,this._animateOff=!0,this._handleIndex=null,this._detectOrientation(),this._mouseInit(),this._calculateNewMax(),this._addClass("ui-slider ui-slider-"+this.orientation,"ui-widget ui-widget-content"),this._refresh(),this._animateOff=!1
-},_refresh:function(){this._createRange(),this._createHandles(),this._setupEvents(),this._refreshValue()},_createHandles:function(){var e,i,s=this.options,n=this.element.find(".ui-slider-handle"),o="<span tabindex='0'></span>",a=[];for(i=s.values&&s.values.length||1,n.length>i&&(n.slice(i).remove(),n=n.slice(0,i)),e=n.length;i>e;e++)a.push(o);this.handles=n.add(t(a.join("")).appendTo(this.element)),this._addClass(this.handles,"ui-slider-handle","ui-state-default"),this.handle=this.handles.eq(0),this.handles.each(function(e){t(this).data("ui-slider-handle-index",e).attr("tabIndex",0)})},_createRange:function(){var e=this.options;e.range?(e.range===!0&&(e.values?e.values.length&&2!==e.values.length?e.values=[e.values[0],e.values[0]]:t.isArray(e.values)&&(e.values=e.values.slice(0)):e.values=[this._valueMin(),this._valueMin()]),this.range&&this.range.length?(this._removeClass(this.range,"ui-slider-range-min ui-slider-range-max"),this.range.css({left:"",bottom:""})):(this.range=t("<div>").appendTo(this.element),this._addClass(this.range,"ui-slider-range")),("min"===e.range||"max"===e.range)&&this._addClass(this.range,"ui-slider-range-"+e.range)):(this.range&&this.range.remove(),this.range=null)},_setupEvents:function(){this._off(this.handles),this._on(this.handles,this._handleEvents),this._hoverable(this.handles),this._focusable(this.handles)},_destroy:function(){this.handles.remove(),this.range&&this.range.remove(),this._mouseDestroy()},_mouseCapture:function(e){var i,s,n,o,a,r,h,l,c=this,u=this.options;return u.disabled?!1:(this.elementSize={width:this.element.outerWidth(),height:this.element.outerHeight()},this.elementOffset=this.element.offset(),i={x:e.pageX,y:e.pageY},s=this._normValueFromMouse(i),n=this._valueMax()-this._valueMin()+1,this.handles.each(function(e){var i=Math.abs(s-c.values(e));(n>i||n===i&&(e===c._lastChangedValue||c.values(e)===u.min))&&(n=i,o=t(this),a=e)}),r=this._start(e,a),r===!1?!1:(this._mouseSliding=!0,this._handleIndex=a,this._addClass(o,null,"ui-state-active"),o.trigger("focus"),h=o.offset(),l=!t(e.target).parents().addBack().is(".ui-slider-handle"),this._clickOffset=l?{left:0,top:0}:{left:e.pageX-h.left-o.width()/2,top:e.pageY-h.top-o.height()/2-(parseInt(o.css("borderTopWidth"),10)||0)-(parseInt(o.css("borderBottomWidth"),10)||0)+(parseInt(o.css("marginTop"),10)||0)},this.handles.hasClass("ui-state-hover")||this._slide(e,a,s),this._animateOff=!0,!0))},_mouseStart:function(){return!0},_mouseDrag:function(t){var e={x:t.pageX,y:t.pageY},i=this._normValueFromMouse(e);return this._slide(t,this._handleIndex,i),!1},_mouseStop:function(t){return this._removeClass(this.handles,null,"ui-state-active"),this._mouseSliding=!1,this._stop(t,this._handleIndex),this._change(t,this._handleIndex),this._handleIndex=null,this._clickOffset=null,this._animateOff=!1,!1},_detectOrientation:function(){this.orientation="vertical"===this.options.orientation?"vertical":"horizontal"},_normValueFromMouse:function(t){var e,i,s,n,o;return"horizontal"===this.orientation?(e=this.elementSize.width,i=t.x-this.elementOffset.left-(this._clickOffset?this._clickOffset.left:0)):(e=this.elementSize.height,i=t.y-this.elementOffset.top-(this._clickOffset?this._clickOffset.top:0)),s=i/e,s>1&&(s=1),0>s&&(s=0),"vertical"===this.orientation&&(s=1-s),n=this._valueMax()-this._valueMin(),o=this._valueMin()+s*n,this._trimAlignValue(o)},_uiHash:function(t,e,i){var s={handle:this.handles[t],handleIndex:t,value:void 0!==e?e:this.value()};return this._hasMultipleValues()&&(s.value=void 0!==e?e:this.values(t),s.values=i||this.values()),s},_hasMultipleValues:function(){return this.options.values&&this.options.values.length},_start:function(t,e){return this._trigger("start",t,this._uiHash(e))},_slide:function(t,e,i){var s,n,o=this.value(),a=this.values();this._hasMultipleValues()&&(n=this.values(e?0:1),o=this.values(e),2===this.options.values.length&&this.options.range===!0&&(i=0===e?Math.min(n,i):Math.max(n,i)),a[e]=i),i!==o&&(s=this._trigger("slide",t,this._uiHash(e,i,a)),s!==!1&&(this._hasMultipleValues()?this.values(e,i):this.value(i)))},_stop:function(t,e){this._trigger("stop",t,this._uiHash(e))},_change:function(t,e){this._keySliding||this._mouseSliding||(this._lastChangedValue=e,this._trigger("change",t,this._uiHash(e)))},value:function(t){return arguments.length?(this.options.value=this._trimAlignValue(t),this._refreshValue(),this._change(null,0),void 0):this._value()},values:function(e,i){var s,n,o;if(arguments.length>1)return this.options.values[e]=this._trimAlignValue(i),this._refreshValue(),this._change(null,e),void 0;if(!arguments.length)return this._values();if(!t.isArray(arguments[0]))return this._hasMultipleValues()?this._values(e):this.value();for(s=this.options.values,n=arguments[0],o=0;s.length>o;o+=1)s[o]=this._trimAlignValue(n[o]),this._change(null,o);this._refreshValue()},_setOption:function(e,i){var s,n=0;switch("range"===e&&this.options.range===!0&&("min"===i?(this.options.value=this._values(0),this.options.values=null):"max"===i&&(this.options.value=this._values(this.options.values.length-1),this.options.values=null)),t.isArray(this.options.values)&&(n=this.options.values.length),this._super(e,i),e){case"orientation":this._detectOrientation(),this._removeClass("ui-slider-horizontal ui-slider-vertical")._addClass("ui-slider-"+this.orientation),this._refreshValue(),this.options.range&&this._refreshRange(i),this.handles.css("horizontal"===i?"bottom":"left","");break;case"value":this._animateOff=!0,this._refreshValue(),this._change(null,0),this._animateOff=!1;break;case"values":for(this._animateOff=!0,this._refreshValue(),s=n-1;s>=0;s--)this._change(null,s);this._animateOff=!1;break;case"step":case"min":case"max":this._animateOff=!0,this._calculateNewMax(),this._refreshValue(),this._animateOff=!1;break;case"range":this._animateOff=!0,this._refresh(),this._animateOff=!1}},_setOptionDisabled:function(t){this._super(t),this._toggleClass(null,"ui-state-disabled",!!t)},_value:function(){var t=this.options.value;return t=this._trimAlignValue(t)},_values:function(t){var e,i,s;if(arguments.length)return e=this.options.values[t],e=this._trimAlignValue(e);if(this._hasMultipleValues()){for(i=this.options.values.slice(),s=0;i.length>s;s+=1)i[s]=this._trimAlignValue(i[s]);return i}return[]},_trimAlignValue:function(t){if(this._valueMin()>=t)return this._valueMin();if(t>=this._valueMax())return this._valueMax();var e=this.options.step>0?this.options.step:1,i=(t-this._valueMin())%e,s=t-i;return 2*Math.abs(i)>=e&&(s+=i>0?e:-e),parseFloat(s.toFixed(5))},_calculateNewMax:function(){var t=this.options.max,e=this._valueMin(),i=this.options.step,s=Math.round((t-e)/i)*i;t=s+e,t>this.options.max&&(t-=i),this.max=parseFloat(t.toFixed(this._precision()))},_precision:function(){var t=this._precisionOf(this.options.step);return null!==this.options.min&&(t=Math.max(t,this._precisionOf(this.options.min))),t},_precisionOf:function(t){var e=""+t,i=e.indexOf(".");return-1===i?0:e.length-i-1},_valueMin:function(){return this.options.min},_valueMax:function(){return this.max},_refreshRange:function(t){"vertical"===t&&this.range.css({width:"",left:""}),"horizontal"===t&&this.range.css({height:"",bottom:""})},_refreshValue:function(){var e,i,s,n,o,a=this.options.range,r=this.options,h=this,l=this._animateOff?!1:r.animate,c={};this._hasMultipleValues()?this.handles.each(function(s){i=100*((h.values(s)-h._valueMin())/(h._valueMax()-h._valueMin())),c["horizontal"===h.orientation?"left":"bottom"]=i+"%",t(this).stop(1,1)[l?"animate":"css"](c,r.animate),h.options.range===!0&&("horizontal"===h.orientation?(0===s&&h.range.stop(1,1)[l?"animate":"css"]({left:i+"%"},r.animate),1===s&&h.range[l?"animate":"css"]({width:i-e+"%"},{queue:!1,duration:r.animate})):(0===s&&h.range.stop(1,1)[l?"animate":"css"]({bottom:i+"%"},r.animate),1===s&&h.range[l?"animate":"css"]({height:i-e+"%"},{queue:!1,duration:r.animate}))),e=i}):(s=this.value(),n=this._valueMin(),o=this._valueMax(),i=o!==n?100*((s-n)/(o-n)):0,c["horizontal"===this.orientation?"left":"bottom"]=i+"%",this.handle.stop(1,1)[l?"animate":"css"](c,r.animate),"min"===a&&"horizontal"===this.orientation&&this.range.stop(1,1)[l?"animate":"css"]({width:i+"%"},r.animate),"max"===a&&"horizontal"===this.orientation&&this.range.stop(1,1)[l?"animate":"css"]({width:100-i+"%"},r.animate),"min"===a&&"vertical"===this.orientation&&this.range.stop(1,1)[l?"animate":"css"]({height:i+"%"},r.animate),"max"===a&&"vertical"===this.orientation&&this.range.stop(1,1)[l?"animate":"css"]({height:100-i+"%"},r.animate))},_handleEvents:{keydown:function(e){var i,s,n,o,a=t(e.target).data("ui-slider-handle-index");switch(e.keyCode){case t.ui.keyCode.HOME:case t.ui.keyCode.END:case t.ui.keyCode.PAGE_UP:case t.ui.keyCode.PAGE_DOWN:case t.ui.keyCode.UP:case t.ui.keyCode.RIGHT:case t.ui.keyCode.DOWN:case t.ui.keyCode.LEFT:if(e.preventDefault(),!this._keySliding&&(this._keySliding=!0,this._addClass(t(e.target),null,"ui-state-active"),i=this._start(e,a),i===!1))return}switch(o=this.options.step,s=n=this._hasMultipleValues()?this.values(a):this.value(),e.keyCode){case t.ui.keyCode.HOME:n=this._valueMin();break;case t.ui.keyCode.END:n=this._valueMax();break;case t.ui.keyCode.PAGE_UP:n=this._trimAlignValue(s+(this._valueMax()-this._valueMin())/this.numPages);break;case t.ui.keyCode.PAGE_DOWN:n=this._trimAlignValue(s-(this._valueMax()-this._valueMin())/this.numPages);break;case t.ui.keyCode.UP:case t.ui.keyCode.RIGHT:if(s===this._valueMax())return;n=this._trimAlignValue(s+o);break;case t.ui.keyCode.DOWN:case t.ui.keyCode.LEFT:if(s===this._valueMin())return;n=this._trimAlignValue(s-o)}this._slide(e,a,n)},keyup:function(e){var i=t(e.target).data("ui-slider-handle-index");this._keySliding&&(this._keySliding=!1,this._stop(e,i),this._change(e,i),this._removeClass(t(e.target),null,"ui-state-active"))}}}),t.widget("ui.sortable",t.ui.mouse,{version:"1.12.1",widgetEventPrefix:"sort",ready:!1,options:{appendTo:"parent",axis:!1,connectWith:!1,containment:!1,cursor:"auto",cursorAt:!1,dropOnEmpty:!0,forcePlaceholderSize:!1,forceHelperSize:!1,grid:!1,handle:!1,helper:"original",items:"> *",opacity:!1,placeholder:!1,revert:!1,scroll:!0,scrollSensitivity:20,scrollSpeed:20,scope:"default",tolerance:"intersect",zIndex:1e3,activate:null,beforeStop:null,change:null,deactivate:null,out:null,over:null,receive:null,remove:null,sort:null,start:null,stop:null,update:null},_isOverAxis:function(t,e,i){return t>=e&&e+i>t},_isFloating:function(t){return/left|right/.test(t.css("float"))||/inline|table-cell/.test(t.css("display"))},_create:function(){this.containerCache={},this._addClass("ui-sortable"),this.refresh(),this.offset=this.element.offset(),this._mouseInit(),this._setHandleClassName(),this.ready=!0},_setOption:function(t,e){this._super(t,e),"handle"===t&&this._setHandleClassName()},_setHandleClassName:function(){var e=this;this._removeClass(this.element.find(".ui-sortable-handle"),"ui-sortable-handle"),t.each(this.items,function(){e._addClass(this.instance.options.handle?this.item.find(this.instance.options.handle):this.item,"ui-sortable-handle")})},_destroy:function(){this._mouseDestroy();for(var t=this.items.length-1;t>=0;t--)this.items[t].item.removeData(this.widgetName+"-item");return this},_mouseCapture:function(e,i){var s=null,n=!1,o=this;return this.reverting?!1:this.options.disabled||"static"===this.options.type?!1:(this._refreshItems(e),t(e.target).parents().each(function(){return t.data(this,o.widgetName+"-item")===o?(s=t(this),!1):void 0}),t.data(e.target,o.widgetName+"-item")===o&&(s=t(e.target)),s?!this.options.handle||i||(t(this.options.handle,s).find("*").addBack().each(function(){this===e.target&&(n=!0)}),n)?(this.currentItem=s,this._removeCurrentsFromItems(),!0):!1:!1)},_mouseStart:function(e,i,s){var n,o,a=this.options;if(this.currentContainer=this,this.refreshPositions(),this.helper=this._createHelper(e),this._cacheHelperProportions(),this._cacheMargins(),this.scrollParent=this.helper.scrollParent(),this.offset=this.currentItem.offset(),this.offset={top:this.offset.top-this.margins.top,left:this.offset.left-this.margins.left},t.extend(this.offset,{click:{left:e.pageX-this.offset.left,top:e.pageY-this.offset.top},parent:this._getParentOffset(),relative:this._getRelativeOffset()}),this.helper.css("position","absolute"),this.cssPosition=this.helper.css("position"),this.originalPosition=this._generatePosition(e),this.originalPageX=e.pageX,this.originalPageY=e.pageY,a.cursorAt&&this._adjustOffsetFromHelper(a.cursorAt),this.domPosition={prev:this.currentItem.prev()[0],parent:this.currentItem.parent()[0]},this.helper[0]!==this.currentItem[0]&&this.currentItem.hide(),this._createPlaceholder(),a.containment&&this._setContainment(),a.cursor&&"auto"!==a.cursor&&(o=this.document.find("body"),this.storedCursor=o.css("cursor"),o.css("cursor",a.cursor),this.storedStylesheet=t("<style>*{ cursor: "+a.cursor+" !important; }</style>").appendTo(o)),a.opacity&&(this.helper.css("opacity")&&(this._storedOpacity=this.helper.css("opacity")),this.helper.css("opacity",a.opacity)),a.zIndex&&(this.helper.css("zIndex")&&(this._storedZIndex=this.helper.css("zIndex")),this.helper.css("zIndex",a.zIndex)),this.scrollParent[0]!==this.document[0]&&"HTML"!==this.scrollParent[0].tagName&&(this.overflowOffset=this.scrollParent.offset()),this._trigger("start",e,this._uiHash()),this._preserveHelperProportions||this._cacheHelperProportions(),!s)for(n=this.containers.length-1;n>=0;n--)this.containers[n]._trigger("activate",e,this._uiHash(this));return t.ui.ddmanager&&(t.ui.ddmanager.current=this),t.ui.ddmanager&&!a.dropBehaviour&&t.ui.ddmanager.prepareOffsets(this,e),this.dragging=!0,this._addClass(this.helper,"ui-sortable-helper"),this._mouseDrag(e),!0},_mouseDrag:function(e){var i,s,n,o,a=this.options,r=!1;for(this.position=this._generatePosition(e),this.positionAbs=this._convertPositionTo("absolute"),this.lastPositionAbs||(this.lastPositionAbs=this.positionAbs),this.options.scroll&&(this.scrollParent[0]!==this.document[0]&&"HTML"!==this.scrollParent[0].tagName?(this.overflowOffset.top+this.scrollParent[0].offsetHeight-e.pageY<a.scrollSensitivity?this.scrollParent[0].scrollTop=r=this.scrollParent[0].scrollTop+a.scrollSpeed:e.pageY-this.overflowOffset.top<a.scrollSensitivity&&(this.scrollParent[0].scrollTop=r=this.scrollParent[0].scrollTop-a.scrollSpeed),this.overflowOffset.left+this.scrollParent[0].offsetWidth-e.pageX<a.scrollSensitivity?this.scrollParent[0].scrollLeft=r=this.scrollParent[0].scrollLeft+a.scrollSpeed:e.pageX-this.overflowOffset.left<a.scrollSensitivity&&(this.scrollParent[0].scrollLeft=r=this.scrollParent[0].scrollLeft-a.scrollSpeed)):(e.pageY-this.document.scrollTop()<a.scrollSensitivity?r=this.document.scrollTop(this.document.scrollTop()-a.scrollSpeed):this.window.height()-(e.pageY-this.document.scrollTop())<a.scrollSensitivity&&(r=this.document.scrollTop(this.document.scrollTop()+a.scrollSpeed)),e.pageX-this.document.scrollLeft()<a.scrollSensitivity?r=this.document.scrollLeft(this.document.scrollLeft()-a.scrollSpeed):this.window.width()-(e.pageX-this.document.scrollLeft())<a.scrollSensitivity&&(r=this.document.scrollLeft(this.document.scrollLeft()+a.scrollSpeed))),r!==!1&&t.ui.ddmanager&&!a.dropBehaviour&&t.ui.ddmanager.prepareOffsets(this,e)),this.positionAbs=this._convertPositionTo("absolute"),this.options.axis&&"y"===this.options.axis||(this.helper[0].style.left=this.position.left+"px"),this.options.axis&&"x"===this.options.axis||(this.helper[0].style.top=this.position.top+"px"),i=this.items.length-1;i>=0;i--)if(s=this.items[i],n=s.item[0],o=this._intersectsWithPointer(s),o&&s.instance===this.currentContainer&&n!==this.currentItem[0]&&this.placeholder[1===o?"next":"prev"]()[0]!==n&&!t.contains(this.placeholder[0],n)&&("semi-dynamic"===this.options.type?!t.contains(this.element[0],n):!0)){if(this.direction=1===o?"down":"up","pointer"!==this.options.tolerance&&!this._intersectsWithSides(s))break;this._rearrange(e,s),this._trigger("change",e,this._uiHash());break}return this._contactContainers(e),t.ui.ddmanager&&t.ui.ddmanager.drag(this,e),this._trigger("sort",e,this._uiHash()),this.lastPositionAbs=this.positionAbs,!1},_mouseStop:function(e,i){if(e){if(t.ui.ddmanager&&!this.options.dropBehaviour&&t.ui.ddmanager.drop(this,e),this.options.revert){var s=this,n=this.placeholder.offset(),o=this.options.axis,a={};o&&"x"!==o||(a.left=n.left-this.offset.parent.left-this.margins.left+(this.offsetParent[0]===this.document[0].body?0:this.offsetParent[0].scrollLeft)),o&&"y"!==o||(a.top=n.top-this.offset.parent.top-this.margins.top+(this.offsetParent[0]===this.document[0].body?0:this.offsetParent[0].scrollTop)),this.reverting=!0,t(this.helper).animate(a,parseInt(this.options.revert,10)||500,function(){s._clear(e)})}else this._clear(e,i);return!1}},cancel:function(){if(this.dragging){this._mouseUp(new t.Event("mouseup",{target:null})),"original"===this.options.helper?(this.currentItem.css(this._storedCSS),this._removeClass(this.currentItem,"ui-sortable-helper")):this.currentItem.show();for(var e=this.containers.length-1;e>=0;e--)this.containers[e]._trigger("deactivate",null,this._uiHash(this)),this.containers[e].containerCache.over&&(this.containers[e]._trigger("out",null,this._uiHash(this)),this.containers[e].containerCache.over=0)}return this.placeholder&&(this.placeholder[0].parentNode&&this.placeholder[0].parentNode.removeChild(this.placeholder[0]),"original"!==this.options.helper&&this.helper&&this.helper[0].parentNode&&this.helper.remove(),t.extend(this,{helper:null,dragging:!1,reverting:!1,_noFinalSort:null}),this.domPosition.prev?t(this.domPosition.prev).after(this.currentItem):t(this.domPosition.parent).prepend(this.currentItem)),this},serialize:function(e){var i=this._getItemsAsjQuery(e&&e.connected),s=[];return e=e||{},t(i).each(function(){var i=(t(e.item||this).attr(e.attribute||"id")||"").match(e.expression||/(.+)[\-=_](.+)/);i&&s.push((e.key||i[1]+"[]")+"="+(e.key&&e.expression?i[1]:i[2]))}),!s.length&&e.key&&s.push(e.key+"="),s.join("&")},toArray:function(e){var i=this._getItemsAsjQuery(e&&e.connected),s=[];return e=e||{},i.each(function(){s.push(t(e.item||this).attr(e.attribute||"id")||"")}),s},_intersectsWith:function(t){var e=this.positionAbs.left,i=e+this.helperProportions.width,s=this.positionAbs.top,n=s+this.helperProportions.height,o=t.left,a=o+t.width,r=t.top,h=r+t.height,l=this.offset.click.top,c=this.offset.click.left,u="x"===this.options.axis||s+l>r&&h>s+l,d="y"===this.options.axis||e+c>o&&a>e+c,p=u&&d;return"pointer"===this.options.tolerance||this.options.forcePointerForContainers||"pointer"!==this.options.tolerance&&this.helperProportions[this.floating?"width":"height"]>t[this.floating?"width":"height"]?p:e+this.helperProportions.width/2>o&&a>i-this.helperProportions.width/2&&s+this.helperProportions.height/2>r&&h>n-this.helperProportions.height/2},_intersectsWithPointer:function(t){var e,i,s="x"===this.options.axis||this._isOverAxis(this.positionAbs.top+this.offset.click.top,t.top,t.height),n="y"===this.options.axis||this._isOverAxis(this.positionAbs.left+this.offset.click.left,t.left,t.width),o=s&&n;return o?(e=this._getDragVerticalDirection(),i=this._getDragHorizontalDirection(),this.floating?"right"===i||"down"===e?2:1:e&&("down"===e?2:1)):!1},_intersectsWithSides:function(t){var e=this._isOverAxis(this.positionAbs.top+this.offset.click.top,t.top+t.height/2,t.height),i=this._isOverAxis(this.positionAbs.left+this.offset.click.left,t.left+t.width/2,t.width),s=this._getDragVerticalDirection(),n=this._getDragHorizontalDirection();return this.floating&&n?"right"===n&&i||"left"===n&&!i:s&&("down"===s&&e||"up"===s&&!e)},_getDragVerticalDirection:function(){var t=this.positionAbs.top-this.lastPositionAbs.top;return 0!==t&&(t>0?"down":"up")},_getDragHorizontalDirection:function(){var t=this.positionAbs.left-this.lastPositionAbs.left;return 0!==t&&(t>0?"right":"left")},refresh:function(t){return this._refreshItems(t),this._setHandleClassName(),this.refreshPositions(),this},_connectWith:function(){var t=this.options;return t.connectWith.constructor===String?[t.connectWith]:t.connectWith},_getItemsAsjQuery:function(e){function i(){r.push(this)}var s,n,o,a,r=[],h=[],l=this._connectWith();if(l&&e)for(s=l.length-1;s>=0;s--)for(o=t(l[s],this.document[0]),n=o.length-1;n>=0;n--)a=t.data(o[n],this.widgetFullName),a&&a!==this&&!a.options.disabled&&h.push([t.isFunction(a.options.items)?a.options.items.call(a.element):t(a.options.items,a.element).not(".ui-sortable-helper").not(".ui-sortable-placeholder"),a]);for(h.push([t.isFunction(this.options.items)?this.options.items.call(this.element,null,{options:this.options,item:this.currentItem}):t(this.options.items,this.element).not(".ui-sortable-helper").not(".ui-sortable-placeholder"),this]),s=h.length-1;s>=0;s--)h[s][0].each(i);return t(r)},_removeCurrentsFromItems:function(){var e=this.currentItem.find(":data("+this.widgetName+"-item)");this.items=t.grep(this.items,function(t){for(var i=0;e.length>i;i++)if(e[i]===t.item[0])return!1;return!0})},_refreshItems:function(e){this.items=[],this.containers=[this];var i,s,n,o,a,r,h,l,c=this.items,u=[[t.isFunction(this.options.items)?this.options.items.call(this.element[0],e,{item:this.currentItem}):t(this.options.items,this.element),this]],d=this._connectWith();if(d&&this.ready)for(i=d.length-1;i>=0;i--)for(n=t(d[i],this.document[0]),s=n.length-1;s>=0;s--)o=t.data(n[s],this.widgetFullName),o&&o!==this&&!o.options.disabled&&(u.push([t.isFunction(o.options.items)?o.options.items.call(o.element[0],e,{item:this.currentItem}):t(o.options.items,o.element),o]),this.containers.push(o));for(i=u.length-1;i>=0;i--)for(a=u[i][1],r=u[i][0],s=0,l=r.length;l>s;s++)h=t(r[s]),h.data(this.widgetName+"-item",a),c.push({item:h,instance:a,width:0,height:0,left:0,top:0})},refreshPositions:function(e){this.floating=this.items.length?"x"===this.options.axis||this._isFloating(this.items[0].item):!1,this.offsetParent&&this.helper&&(this.offset.parent=this._getParentOffset());var i,s,n,o;for(i=this.items.length-1;i>=0;i--)s=this.items[i],s.instance!==this.currentContainer&&this.currentContainer&&s.item[0]!==this.currentItem[0]||(n=this.options.toleranceElement?t(this.options.toleranceElement,s.item):s.item,e||(s.width=n.outerWidth(),s.height=n.outerHeight()),o=n.offset(),s.left=o.left,s.top=o.top);if(this.options.custom&&this.options.custom.refreshContainers)this.options.custom.refreshContainers.call(this);else for(i=this.containers.length-1;i>=0;i--)o=this.containers[i].element.offset(),this.containers[i].containerCache.left=o.left,this.containers[i].containerCache.top=o.top,this.containers[i].containerCache.width=this.containers[i].element.outerWidth(),this.containers[i].containerCache.height=this.containers[i].element.outerHeight();return this},_createPlaceholder:function(e){e=e||this;var i,s=e.options;s.placeholder&&s.placeholder.constructor!==String||(i=s.placeholder,s.placeholder={element:function(){var s=e.currentItem[0].nodeName.toLowerCase(),n=t("<"+s+">",e.document[0]);return e._addClass(n,"ui-sortable-placeholder",i||e.currentItem[0].className)._removeClass(n,"ui-sortable-helper"),"tbody"===s?e._createTrPlaceholder(e.currentItem.find("tr").eq(0),t("<tr>",e.document[0]).appendTo(n)):"tr"===s?e._createTrPlaceholder(e.currentItem,n):"img"===s&&n.attr("src",e.currentItem.attr("src")),i||n.css("visibility","hidden"),n},update:function(t,n){(!i||s.forcePlaceholderSize)&&(n.height()||n.height(e.currentItem.innerHeight()-parseInt(e.currentItem.css("paddingTop")||0,10)-parseInt(e.currentItem.css("paddingBottom")||0,10)),n.width()||n.width(e.currentItem.innerWidth()-parseInt(e.currentItem.css("paddingLeft")||0,10)-parseInt(e.currentItem.css("paddingRight")||0,10)))}}),e.placeholder=t(s.placeholder.element.call(e.element,e.currentItem)),e.currentItem.after(e.placeholder),s.placeholder.update(e,e.placeholder)},_createTrPlaceholder:function(e,i){var s=this;e.children().each(function(){t("<td>&#160;</td>",s.document[0]).attr("colspan",t(this).attr("colspan")||1).appendTo(i)})},_contactContainers:function(e){var i,s,n,o,a,r,h,l,c,u,d=null,p=null;for(i=this.containers.length-1;i>=0;i--)if(!t.contains(this.currentItem[0],this.containers[i].element[0]))if(this._intersectsWith(this.containers[i].containerCache)){if(d&&t.contains(this.containers[i].element[0],d.element[0]))continue;d=this.containers[i],p=i}else this.containers[i].containerCache.over&&(this.containers[i]._trigger("out",e,this._uiHash(this)),this.containers[i].containerCache.over=0);if(d)if(1===this.containers.length)this.containers[p].containerCache.over||(this.containers[p]._trigger("over",e,this._uiHash(this)),this.containers[p].containerCache.over=1);else{for(n=1e4,o=null,c=d.floating||this._isFloating(this.currentItem),a=c?"left":"top",r=c?"width":"height",u=c?"pageX":"pageY",s=this.items.length-1;s>=0;s--)t.contains(this.containers[p].element[0],this.items[s].item[0])&&this.items[s].item[0]!==this.currentItem[0]&&(h=this.items[s].item.offset()[a],l=!1,e[u]-h>this.items[s][r]/2&&(l=!0),n>Math.abs(e[u]-h)&&(n=Math.abs(e[u]-h),o=this.items[s],this.direction=l?"up":"down"));if(!o&&!this.options.dropOnEmpty)return;if(this.currentContainer===this.containers[p])return this.currentContainer.containerCache.over||(this.containers[p]._trigger("over",e,this._uiHash()),this.currentContainer.containerCache.over=1),void 0;o?this._rearrange(e,o,null,!0):this._rearrange(e,null,this.containers[p].element,!0),this._trigger("change",e,this._uiHash()),this.containers[p]._trigger("change",e,this._uiHash(this)),this.currentContainer=this.containers[p],this.options.placeholder.update(this.currentContainer,this.placeholder),this.containers[p]._trigger("over",e,this._uiHash(this)),this.containers[p].containerCache.over=1}},_createHelper:function(e){var i=this.options,s=t.isFunction(i.helper)?t(i.helper.apply(this.element[0],[e,this.currentItem])):"clone"===i.helper?this.currentItem.clone():this.currentItem;return s.parents("body").length||t("parent"!==i.appendTo?i.appendTo:this.currentItem[0].parentNode)[0].appendChild(s[0]),s[0]===this.currentItem[0]&&(this._storedCSS={width:this.currentItem[0].style.width,height:this.currentItem[0].style.height,position:this.currentItem.css("position"),top:this.currentItem.css("top"),left:this.currentItem.css("left")}),(!s[0].style.width||i.forceHelperSize)&&s.width(this.currentItem.width()),(!s[0].style.height||i.forceHelperSize)&&s.height(this.currentItem.height()),s},_adjustOffsetFromHelper:function(e){"string"==typeof e&&(e=e.split(" ")),t.isArray(e)&&(e={left:+e[0],top:+e[1]||0}),"left"in e&&(this.offset.click.left=e.left+this.margins.left),"right"in e&&(this.offset.click.left=this.helperProportions.width-e.right+this.margins.left),"top"in e&&(this.offset.click.top=e.top+this.margins.top),"bottom"in e&&(this.offset.click.top=this.helperProportions.height-e.bottom+this.margins.top)},_getParentOffset:function(){this.offsetParent=this.helper.offsetParent();var e=this.offsetParent.offset();return"absolute"===this.cssPosition&&this.scrollParent[0]!==this.document[0]&&t.contains(this.scrollParent[0],this.offsetParent[0])&&(e.left+=this.scrollParent.scrollLeft(),e.top+=this.scrollParent.scrollTop()),(this.offsetParent[0]===this.document[0].body||this.offsetParent[0].tagName&&"html"===this.offsetParent[0].tagName.toLowerCase()&&t.ui.ie)&&(e={top:0,left:0}),{top:e.top+(parseInt(this.offsetParent.css("borderTopWidth"),10)||0),left:e.left+(parseInt(this.offsetParent.css("borderLeftWidth"),10)||0)}},_getRelativeOffset:function(){if("relative"===this.cssPosition){var t=this.currentItem.position();return{top:t.top-(parseInt(this.helper.css("top"),10)||0)+this.scrollParent.scrollTop(),left:t.left-(parseInt(this.helper.css("left"),10)||0)+this.scrollParent.scrollLeft()}}return{top:0,left:0}},_cacheMargins:function(){this.margins={left:parseInt(this.currentItem.css("marginLeft"),10)||0,top:parseInt(this.currentItem.css("marginTop"),10)||0}},_cacheHelperProportions:function(){this.helperProportions={width:this.helper.outerWidth(),height:this.helper.outerHeight()}},_setContainment:function(){var e,i,s,n=this.options;"parent"===n.containment&&(n.containment=this.helper[0].parentNode),("document"===n.containment||"window"===n.containment)&&(this.containment=[0-this.offset.relative.left-this.offset.parent.left,0-this.offset.relative.top-this.offset.parent.top,"document"===n.containment?this.document.width():this.window.width()-this.helperProportions.width-this.margins.left,("document"===n.containment?this.document.height()||document.body.parentNode.scrollHeight:this.window.height()||this.document[0].body.parentNode.scrollHeight)-this.helperProportions.height-this.margins.top]),/^(document|window|parent)$/.test(n.containment)||(e=t(n.containment)[0],i=t(n.containment).offset(),s="hidden"!==t(e).css("overflow"),this.containment=[i.left+(parseInt(t(e).css("borderLeftWidth"),10)||0)+(parseInt(t(e).css("paddingLeft"),10)||0)-this.margins.left,i.top+(parseInt(t(e).css("borderTopWidth"),10)||0)+(parseInt(t(e).css("paddingTop"),10)||0)-this.margins.top,i.left+(s?Math.max(e.scrollWidth,e.offsetWidth):e.offsetWidth)-(parseInt(t(e).css("borderLeftWidth"),10)||0)-(parseInt(t(e).css("paddingRight"),10)||0)-this.helperProportions.width-this.margins.left,i.top+(s?Math.max(e.scrollHeight,e.offsetHeight):e.offsetHeight)-(parseInt(t(e).css("borderTopWidth"),10)||0)-(parseInt(t(e).css("paddingBottom"),10)||0)-this.helperProportions.height-this.margins.top])},_convertPositionTo:function(e,i){i||(i=this.position);var s="absolute"===e?1:-1,n="absolute"!==this.cssPosition||this.scrollParent[0]!==this.document[0]&&t.contains(this.scrollParent[0],this.offsetParent[0])?this.scrollParent:this.offsetParent,o=/(html|body)/i.test(n[0].tagName);return{top:i.top+this.offset.relative.top*s+this.offset.parent.top*s-("fixed"===this.cssPosition?-this.scrollParent.scrollTop():o?0:n.scrollTop())*s,left:i.left+this.offset.relative.left*s+this.offset.parent.left*s-("fixed"===this.cssPosition?-this.scrollParent.scrollLeft():o?0:n.scrollLeft())*s}},_generatePosition:function(e){var i,s,n=this.options,o=e.pageX,a=e.pageY,r="absolute"!==this.cssPosition||this.scrollParent[0]!==this.document[0]&&t.contains(this.scrollParent[0],this.offsetParent[0])?this.scrollParent:this.offsetParent,h=/(html|body)/i.test(r[0].tagName);return"relative"!==this.cssPosition||this.scrollParent[0]!==this.document[0]&&this.scrollParent[0]!==this.offsetParent[0]||(this.offset.relative=this._getRelativeOffset()),this.originalPosition&&(this.containment&&(e.pageX-this.offset.click.left<this.containment[0]&&(o=this.containment[0]+this.offset.click.left),e.pageY-this.offset.click.top<this.containment[1]&&(a=this.containment[1]+this.offset.click.top),e.pageX-this.offset.click.left>this.containment[2]&&(o=this.containment[2]+this.offset.click.left),e.pageY-this.offset.click.top>this.containment[3]&&(a=this.containment[3]+this.offset.click.top)),n.grid&&(i=this.originalPageY+Math.round((a-this.originalPageY)/n.grid[1])*n.grid[1],a=this.containment?i-this.offset.click.top>=this.containment[1]&&i-this.offset.click.top<=this.containment[3]?i:i-this.offset.click.top>=this.containment[1]?i-n.grid[1]:i+n.grid[1]:i,s=this.originalPageX+Math.round((o-this.originalPageX)/n.grid[0])*n.grid[0],o=this.containment?s-this.offset.click.left>=this.containment[0]&&s-this.offset.click.left<=this.containment[2]?s:s-this.offset.click.left>=this.containment[0]?s-n.grid[0]:s+n.grid[0]:s)),{top:a-this.offset.click.top-this.offset.relative.top-this.offset.parent.top+("fixed"===this.cssPosition?-this.scrollParent.scrollTop():h?0:r.scrollTop()),left:o-this.offset.click.left-this.offset.relative.left-this.offset.parent.left+("fixed"===this.cssPosition?-this.scrollParent.scrollLeft():h?0:r.scrollLeft())}},_rearrange:function(t,e,i,s){i?i[0].appendChild(this.placeholder[0]):e.item[0].parentNode.insertBefore(this.placeholder[0],"down"===this.direction?e.item[0]:e.item[0].nextSibling),this.counter=this.counter?++this.counter:1;var n=this.counter;
-this._delay(function(){n===this.counter&&this.refreshPositions(!s)})},_clear:function(t,e){function i(t,e,i){return function(s){i._trigger(t,s,e._uiHash(e))}}this.reverting=!1;var s,n=[];if(!this._noFinalSort&&this.currentItem.parent().length&&this.placeholder.before(this.currentItem),this._noFinalSort=null,this.helper[0]===this.currentItem[0]){for(s in this._storedCSS)("auto"===this._storedCSS[s]||"static"===this._storedCSS[s])&&(this._storedCSS[s]="");this.currentItem.css(this._storedCSS),this._removeClass(this.currentItem,"ui-sortable-helper")}else this.currentItem.show();for(this.fromOutside&&!e&&n.push(function(t){this._trigger("receive",t,this._uiHash(this.fromOutside))}),!this.fromOutside&&this.domPosition.prev===this.currentItem.prev().not(".ui-sortable-helper")[0]&&this.domPosition.parent===this.currentItem.parent()[0]||e||n.push(function(t){this._trigger("update",t,this._uiHash())}),this!==this.currentContainer&&(e||(n.push(function(t){this._trigger("remove",t,this._uiHash())}),n.push(function(t){return function(e){t._trigger("receive",e,this._uiHash(this))}}.call(this,this.currentContainer)),n.push(function(t){return function(e){t._trigger("update",e,this._uiHash(this))}}.call(this,this.currentContainer)))),s=this.containers.length-1;s>=0;s--)e||n.push(i("deactivate",this,this.containers[s])),this.containers[s].containerCache.over&&(n.push(i("out",this,this.containers[s])),this.containers[s].containerCache.over=0);if(this.storedCursor&&(this.document.find("body").css("cursor",this.storedCursor),this.storedStylesheet.remove()),this._storedOpacity&&this.helper.css("opacity",this._storedOpacity),this._storedZIndex&&this.helper.css("zIndex","auto"===this._storedZIndex?"":this._storedZIndex),this.dragging=!1,e||this._trigger("beforeStop",t,this._uiHash()),this.placeholder[0].parentNode.removeChild(this.placeholder[0]),this.cancelHelperRemoval||(this.helper[0]!==this.currentItem[0]&&this.helper.remove(),this.helper=null),!e){for(s=0;n.length>s;s++)n[s].call(this,t);this._trigger("stop",t,this._uiHash())}return this.fromOutside=!1,!this.cancelHelperRemoval},_trigger:function(){t.Widget.prototype._trigger.apply(this,arguments)===!1&&this.cancel()},_uiHash:function(e){var i=e||this;return{helper:i.helper,placeholder:i.placeholder||t([]),position:i.position,originalPosition:i.originalPosition,offset:i.positionAbs,item:i.currentItem,sender:e?e.element:null}}}),t.widget("ui.spinner",{version:"1.12.1",defaultElement:"<input>",widgetEventPrefix:"spin",options:{classes:{"ui-spinner":"ui-corner-all","ui-spinner-down":"ui-corner-br","ui-spinner-up":"ui-corner-tr"},culture:null,icons:{down:"ui-icon-triangle-1-s",up:"ui-icon-triangle-1-n"},incremental:!0,max:null,min:null,numberFormat:null,page:10,step:1,change:null,spin:null,start:null,stop:null},_create:function(){this._setOption("max",this.options.max),this._setOption("min",this.options.min),this._setOption("step",this.options.step),""!==this.value()&&this._value(this.element.val(),!0),this._draw(),this._on(this._events),this._refresh(),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_getCreateOptions:function(){var e=this._super(),i=this.element;return t.each(["min","max","step"],function(t,s){var n=i.attr(s);null!=n&&n.length&&(e[s]=n)}),e},_events:{keydown:function(t){this._start(t)&&this._keydown(t)&&t.preventDefault()},keyup:"_stop",focus:function(){this.previous=this.element.val()},blur:function(t){return this.cancelBlur?(delete this.cancelBlur,void 0):(this._stop(),this._refresh(),this.previous!==this.element.val()&&this._trigger("change",t),void 0)},mousewheel:function(t,e){if(e){if(!this.spinning&&!this._start(t))return!1;this._spin((e>0?1:-1)*this.options.step,t),clearTimeout(this.mousewheelTimer),this.mousewheelTimer=this._delay(function(){this.spinning&&this._stop(t)},100),t.preventDefault()}},"mousedown .ui-spinner-button":function(e){function i(){var e=this.element[0]===t.ui.safeActiveElement(this.document[0]);e||(this.element.trigger("focus"),this.previous=s,this._delay(function(){this.previous=s}))}var s;s=this.element[0]===t.ui.safeActiveElement(this.document[0])?this.previous:this.element.val(),e.preventDefault(),i.call(this),this.cancelBlur=!0,this._delay(function(){delete this.cancelBlur,i.call(this)}),this._start(e)!==!1&&this._repeat(null,t(e.currentTarget).hasClass("ui-spinner-up")?1:-1,e)},"mouseup .ui-spinner-button":"_stop","mouseenter .ui-spinner-button":function(e){return t(e.currentTarget).hasClass("ui-state-active")?this._start(e)===!1?!1:(this._repeat(null,t(e.currentTarget).hasClass("ui-spinner-up")?1:-1,e),void 0):void 0},"mouseleave .ui-spinner-button":"_stop"},_enhance:function(){this.uiSpinner=this.element.attr("autocomplete","off").wrap("<span>").parent().append("<a></a><a></a>")},_draw:function(){this._enhance(),this._addClass(this.uiSpinner,"ui-spinner","ui-widget ui-widget-content"),this._addClass("ui-spinner-input"),this.element.attr("role","spinbutton"),this.buttons=this.uiSpinner.children("a").attr("tabIndex",-1).attr("aria-hidden",!0).button({classes:{"ui-button":""}}),this._removeClass(this.buttons,"ui-corner-all"),this._addClass(this.buttons.first(),"ui-spinner-button ui-spinner-up"),this._addClass(this.buttons.last(),"ui-spinner-button ui-spinner-down"),this.buttons.first().button({icon:this.options.icons.up,showLabel:!1}),this.buttons.last().button({icon:this.options.icons.down,showLabel:!1}),this.buttons.height()>Math.ceil(.5*this.uiSpinner.height())&&this.uiSpinner.height()>0&&this.uiSpinner.height(this.uiSpinner.height())},_keydown:function(e){var i=this.options,s=t.ui.keyCode;switch(e.keyCode){case s.UP:return this._repeat(null,1,e),!0;case s.DOWN:return this._repeat(null,-1,e),!0;case s.PAGE_UP:return this._repeat(null,i.page,e),!0;case s.PAGE_DOWN:return this._repeat(null,-i.page,e),!0}return!1},_start:function(t){return this.spinning||this._trigger("start",t)!==!1?(this.counter||(this.counter=1),this.spinning=!0,!0):!1},_repeat:function(t,e,i){t=t||500,clearTimeout(this.timer),this.timer=this._delay(function(){this._repeat(40,e,i)},t),this._spin(e*this.options.step,i)},_spin:function(t,e){var i=this.value()||0;this.counter||(this.counter=1),i=this._adjustValue(i+t*this._increment(this.counter)),this.spinning&&this._trigger("spin",e,{value:i})===!1||(this._value(i),this.counter++)},_increment:function(e){var i=this.options.incremental;return i?t.isFunction(i)?i(e):Math.floor(e*e*e/5e4-e*e/500+17*e/200+1):1},_precision:function(){var t=this._precisionOf(this.options.step);return null!==this.options.min&&(t=Math.max(t,this._precisionOf(this.options.min))),t},_precisionOf:function(t){var e=""+t,i=e.indexOf(".");return-1===i?0:e.length-i-1},_adjustValue:function(t){var e,i,s=this.options;return e=null!==s.min?s.min:0,i=t-e,i=Math.round(i/s.step)*s.step,t=e+i,t=parseFloat(t.toFixed(this._precision())),null!==s.max&&t>s.max?s.max:null!==s.min&&s.min>t?s.min:t},_stop:function(t){this.spinning&&(clearTimeout(this.timer),clearTimeout(this.mousewheelTimer),this.counter=0,this.spinning=!1,this._trigger("stop",t))},_setOption:function(t,e){var i,s,n;return"culture"===t||"numberFormat"===t?(i=this._parse(this.element.val()),this.options[t]=e,this.element.val(this._format(i)),void 0):(("max"===t||"min"===t||"step"===t)&&"string"==typeof e&&(e=this._parse(e)),"icons"===t&&(s=this.buttons.first().find(".ui-icon"),this._removeClass(s,null,this.options.icons.up),this._addClass(s,null,e.up),n=this.buttons.last().find(".ui-icon"),this._removeClass(n,null,this.options.icons.down),this._addClass(n,null,e.down)),this._super(t,e),void 0)},_setOptionDisabled:function(t){this._super(t),this._toggleClass(this.uiSpinner,null,"ui-state-disabled",!!t),this.element.prop("disabled",!!t),this.buttons.button(t?"disable":"enable")},_setOptions:r(function(t){this._super(t)}),_parse:function(t){return"string"==typeof t&&""!==t&&(t=window.Globalize&&this.options.numberFormat?Globalize.parseFloat(t,10,this.options.culture):+t),""===t||isNaN(t)?null:t},_format:function(t){return""===t?"":window.Globalize&&this.options.numberFormat?Globalize.format(t,this.options.numberFormat,this.options.culture):t},_refresh:function(){this.element.attr({"aria-valuemin":this.options.min,"aria-valuemax":this.options.max,"aria-valuenow":this._parse(this.element.val())})},isValid:function(){var t=this.value();return null===t?!1:t===this._adjustValue(t)},_value:function(t,e){var i;""!==t&&(i=this._parse(t),null!==i&&(e||(i=this._adjustValue(i)),t=this._format(i))),this.element.val(t),this._refresh()},_destroy:function(){this.element.prop("disabled",!1).removeAttr("autocomplete role aria-valuemin aria-valuemax aria-valuenow"),this.uiSpinner.replaceWith(this.element)},stepUp:r(function(t){this._stepUp(t)}),_stepUp:function(t){this._start()&&(this._spin((t||1)*this.options.step),this._stop())},stepDown:r(function(t){this._stepDown(t)}),_stepDown:function(t){this._start()&&(this._spin((t||1)*-this.options.step),this._stop())},pageUp:r(function(t){this._stepUp((t||1)*this.options.page)}),pageDown:r(function(t){this._stepDown((t||1)*this.options.page)}),value:function(t){return arguments.length?(r(this._value).call(this,t),void 0):this._parse(this.element.val())},widget:function(){return this.uiSpinner}}),t.uiBackCompat!==!1&&t.widget("ui.spinner",t.ui.spinner,{_enhance:function(){this.uiSpinner=this.element.attr("autocomplete","off").wrap(this._uiSpinnerHtml()).parent().append(this._buttonHtml())},_uiSpinnerHtml:function(){return"<span>"},_buttonHtml:function(){return"<a></a><a></a>"}}),t.ui.spinner,t.widget("ui.tabs",{version:"1.12.1",delay:300,options:{active:null,classes:{"ui-tabs":"ui-corner-all","ui-tabs-nav":"ui-corner-all","ui-tabs-panel":"ui-corner-bottom","ui-tabs-tab":"ui-corner-top"},collapsible:!1,event:"click",heightStyle:"content",hide:null,show:null,activate:null,beforeActivate:null,beforeLoad:null,load:null},_isLocal:function(){var t=/#.*$/;return function(e){var i,s;i=e.href.replace(t,""),s=location.href.replace(t,"");try{i=decodeURIComponent(i)}catch(n){}try{s=decodeURIComponent(s)}catch(n){}return e.hash.length>1&&i===s}}(),_create:function(){var e=this,i=this.options;this.running=!1,this._addClass("ui-tabs","ui-widget ui-widget-content"),this._toggleClass("ui-tabs-collapsible",null,i.collapsible),this._processTabs(),i.active=this._initialActive(),t.isArray(i.disabled)&&(i.disabled=t.unique(i.disabled.concat(t.map(this.tabs.filter(".ui-state-disabled"),function(t){return e.tabs.index(t)}))).sort()),this.active=this.options.active!==!1&&this.anchors.length?this._findActive(i.active):t(),this._refresh(),this.active.length&&this.load(i.active)},_initialActive:function(){var e=this.options.active,i=this.options.collapsible,s=location.hash.substring(1);return null===e&&(s&&this.tabs.each(function(i,n){return t(n).attr("aria-controls")===s?(e=i,!1):void 0}),null===e&&(e=this.tabs.index(this.tabs.filter(".ui-tabs-active"))),(null===e||-1===e)&&(e=this.tabs.length?0:!1)),e!==!1&&(e=this.tabs.index(this.tabs.eq(e)),-1===e&&(e=i?!1:0)),!i&&e===!1&&this.anchors.length&&(e=0),e},_getCreateEventData:function(){return{tab:this.active,panel:this.active.length?this._getPanelForTab(this.active):t()}},_tabKeydown:function(e){var i=t(t.ui.safeActiveElement(this.document[0])).closest("li"),s=this.tabs.index(i),n=!0;if(!this._handlePageNav(e)){switch(e.keyCode){case t.ui.keyCode.RIGHT:case t.ui.keyCode.DOWN:s++;break;case t.ui.keyCode.UP:case t.ui.keyCode.LEFT:n=!1,s--;break;case t.ui.keyCode.END:s=this.anchors.length-1;break;case t.ui.keyCode.HOME:s=0;break;case t.ui.keyCode.SPACE:return e.preventDefault(),clearTimeout(this.activating),this._activate(s),void 0;case t.ui.keyCode.ENTER:return e.preventDefault(),clearTimeout(this.activating),this._activate(s===this.options.active?!1:s),void 0;default:return}e.preventDefault(),clearTimeout(this.activating),s=this._focusNextTab(s,n),e.ctrlKey||e.metaKey||(i.attr("aria-selected","false"),this.tabs.eq(s).attr("aria-selected","true"),this.activating=this._delay(function(){this.option("active",s)},this.delay))}},_panelKeydown:function(e){this._handlePageNav(e)||e.ctrlKey&&e.keyCode===t.ui.keyCode.UP&&(e.preventDefault(),this.active.trigger("focus"))},_handlePageNav:function(e){return e.altKey&&e.keyCode===t.ui.keyCode.PAGE_UP?(this._activate(this._focusNextTab(this.options.active-1,!1)),!0):e.altKey&&e.keyCode===t.ui.keyCode.PAGE_DOWN?(this._activate(this._focusNextTab(this.options.active+1,!0)),!0):void 0},_findNextTab:function(e,i){function s(){return e>n&&(e=0),0>e&&(e=n),e}for(var n=this.tabs.length-1;-1!==t.inArray(s(),this.options.disabled);)e=i?e+1:e-1;return e},_focusNextTab:function(t,e){return t=this._findNextTab(t,e),this.tabs.eq(t).trigger("focus"),t},_setOption:function(t,e){return"active"===t?(this._activate(e),void 0):(this._super(t,e),"collapsible"===t&&(this._toggleClass("ui-tabs-collapsible",null,e),e||this.options.active!==!1||this._activate(0)),"event"===t&&this._setupEvents(e),"heightStyle"===t&&this._setupHeightStyle(e),void 0)},_sanitizeSelector:function(t){return t?t.replace(/[!"$%&'()*+,.\/:;<=>?@\[\]\^`{|}~]/g,"\\$&"):""},refresh:function(){var e=this.options,i=this.tablist.children(":has(a[href])");e.disabled=t.map(i.filter(".ui-state-disabled"),function(t){return i.index(t)}),this._processTabs(),e.active!==!1&&this.anchors.length?this.active.length&&!t.contains(this.tablist[0],this.active[0])?this.tabs.length===e.disabled.length?(e.active=!1,this.active=t()):this._activate(this._findNextTab(Math.max(0,e.active-1),!1)):e.active=this.tabs.index(this.active):(e.active=!1,this.active=t()),this._refresh()},_refresh:function(){this._setOptionDisabled(this.options.disabled),this._setupEvents(this.options.event),this._setupHeightStyle(this.options.heightStyle),this.tabs.not(this.active).attr({"aria-selected":"false","aria-expanded":"false",tabIndex:-1}),this.panels.not(this._getPanelForTab(this.active)).hide().attr({"aria-hidden":"true"}),this.active.length?(this.active.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0}),this._addClass(this.active,"ui-tabs-active","ui-state-active"),this._getPanelForTab(this.active).show().attr({"aria-hidden":"false"})):this.tabs.eq(0).attr("tabIndex",0)},_processTabs:function(){var e=this,i=this.tabs,s=this.anchors,n=this.panels;this.tablist=this._getList().attr("role","tablist"),this._addClass(this.tablist,"ui-tabs-nav","ui-helper-reset ui-helper-clearfix ui-widget-header"),this.tablist.on("mousedown"+this.eventNamespace,"> li",function(e){t(this).is(".ui-state-disabled")&&e.preventDefault()}).on("focus"+this.eventNamespace,".ui-tabs-anchor",function(){t(this).closest("li").is(".ui-state-disabled")&&this.blur()}),this.tabs=this.tablist.find("> li:has(a[href])").attr({role:"tab",tabIndex:-1}),this._addClass(this.tabs,"ui-tabs-tab","ui-state-default"),this.anchors=this.tabs.map(function(){return t("a",this)[0]}).attr({role:"presentation",tabIndex:-1}),this._addClass(this.anchors,"ui-tabs-anchor"),this.panels=t(),this.anchors.each(function(i,s){var n,o,a,r=t(s).uniqueId().attr("id"),h=t(s).closest("li"),l=h.attr("aria-controls");e._isLocal(s)?(n=s.hash,a=n.substring(1),o=e.element.find(e._sanitizeSelector(n))):(a=h.attr("aria-controls")||t({}).uniqueId()[0].id,n="#"+a,o=e.element.find(n),o.length||(o=e._createPanel(a),o.insertAfter(e.panels[i-1]||e.tablist)),o.attr("aria-live","polite")),o.length&&(e.panels=e.panels.add(o)),l&&h.data("ui-tabs-aria-controls",l),h.attr({"aria-controls":a,"aria-labelledby":r}),o.attr("aria-labelledby",r)}),this.panels.attr("role","tabpanel"),this._addClass(this.panels,"ui-tabs-panel","ui-widget-content"),i&&(this._off(i.not(this.tabs)),this._off(s.not(this.anchors)),this._off(n.not(this.panels)))},_getList:function(){return this.tablist||this.element.find("ol, ul").eq(0)},_createPanel:function(e){return t("<div>").attr("id",e).data("ui-tabs-destroy",!0)},_setOptionDisabled:function(e){var i,s,n;for(t.isArray(e)&&(e.length?e.length===this.anchors.length&&(e=!0):e=!1),n=0;s=this.tabs[n];n++)i=t(s),e===!0||-1!==t.inArray(n,e)?(i.attr("aria-disabled","true"),this._addClass(i,null,"ui-state-disabled")):(i.removeAttr("aria-disabled"),this._removeClass(i,null,"ui-state-disabled"));this.options.disabled=e,this._toggleClass(this.widget(),this.widgetFullName+"-disabled",null,e===!0)},_setupEvents:function(e){var i={};e&&t.each(e.split(" "),function(t,e){i[e]="_eventHandler"}),this._off(this.anchors.add(this.tabs).add(this.panels)),this._on(!0,this.anchors,{click:function(t){t.preventDefault()}}),this._on(this.anchors,i),this._on(this.tabs,{keydown:"_tabKeydown"}),this._on(this.panels,{keydown:"_panelKeydown"}),this._focusable(this.tabs),this._hoverable(this.tabs)},_setupHeightStyle:function(e){var i,s=this.element.parent();"fill"===e?(i=s.height(),i-=this.element.outerHeight()-this.element.height(),this.element.siblings(":visible").each(function(){var e=t(this),s=e.css("position");"absolute"!==s&&"fixed"!==s&&(i-=e.outerHeight(!0))}),this.element.children().not(this.panels).each(function(){i-=t(this).outerHeight(!0)}),this.panels.each(function(){t(this).height(Math.max(0,i-t(this).innerHeight()+t(this).height()))}).css("overflow","auto")):"auto"===e&&(i=0,this.panels.each(function(){i=Math.max(i,t(this).height("").height())}).height(i))},_eventHandler:function(e){var i=this.options,s=this.active,n=t(e.currentTarget),o=n.closest("li"),a=o[0]===s[0],r=a&&i.collapsible,h=r?t():this._getPanelForTab(o),l=s.length?this._getPanelForTab(s):t(),c={oldTab:s,oldPanel:l,newTab:r?t():o,newPanel:h};e.preventDefault(),o.hasClass("ui-state-disabled")||o.hasClass("ui-tabs-loading")||this.running||a&&!i.collapsible||this._trigger("beforeActivate",e,c)===!1||(i.active=r?!1:this.tabs.index(o),this.active=a?t():o,this.xhr&&this.xhr.abort(),l.length||h.length||t.error("jQuery UI Tabs: Mismatching fragment identifier."),h.length&&this.load(this.tabs.index(o),e),this._toggle(e,c))},_toggle:function(e,i){function s(){o.running=!1,o._trigger("activate",e,i)}function n(){o._addClass(i.newTab.closest("li"),"ui-tabs-active","ui-state-active"),a.length&&o.options.show?o._show(a,o.options.show,s):(a.show(),s())}var o=this,a=i.newPanel,r=i.oldPanel;this.running=!0,r.length&&this.options.hide?this._hide(r,this.options.hide,function(){o._removeClass(i.oldTab.closest("li"),"ui-tabs-active","ui-state-active"),n()}):(this._removeClass(i.oldTab.closest("li"),"ui-tabs-active","ui-state-active"),r.hide(),n()),r.attr("aria-hidden","true"),i.oldTab.attr({"aria-selected":"false","aria-expanded":"false"}),a.length&&r.length?i.oldTab.attr("tabIndex",-1):a.length&&this.tabs.filter(function(){return 0===t(this).attr("tabIndex")}).attr("tabIndex",-1),a.attr("aria-hidden","false"),i.newTab.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0})},_activate:function(e){var i,s=this._findActive(e);s[0]!==this.active[0]&&(s.length||(s=this.active),i=s.find(".ui-tabs-anchor")[0],this._eventHandler({target:i,currentTarget:i,preventDefault:t.noop}))},_findActive:function(e){return e===!1?t():this.tabs.eq(e)},_getIndex:function(e){return"string"==typeof e&&(e=this.anchors.index(this.anchors.filter("[href$='"+t.ui.escapeSelector(e)+"']"))),e},_destroy:function(){this.xhr&&this.xhr.abort(),this.tablist.removeAttr("role").off(this.eventNamespace),this.anchors.removeAttr("role tabIndex").removeUniqueId(),this.tabs.add(this.panels).each(function(){t.data(this,"ui-tabs-destroy")?t(this).remove():t(this).removeAttr("role tabIndex aria-live aria-busy aria-selected aria-labelledby aria-hidden aria-expanded")}),this.tabs.each(function(){var e=t(this),i=e.data("ui-tabs-aria-controls");i?e.attr("aria-controls",i).removeData("ui-tabs-aria-controls"):e.removeAttr("aria-controls")}),this.panels.show(),"content"!==this.options.heightStyle&&this.panels.css("height","")},enable:function(e){var i=this.options.disabled;i!==!1&&(void 0===e?i=!1:(e=this._getIndex(e),i=t.isArray(i)?t.map(i,function(t){return t!==e?t:null}):t.map(this.tabs,function(t,i){return i!==e?i:null})),this._setOptionDisabled(i))},disable:function(e){var i=this.options.disabled;if(i!==!0){if(void 0===e)i=!0;else{if(e=this._getIndex(e),-1!==t.inArray(e,i))return;i=t.isArray(i)?t.merge([e],i).sort():[e]}this._setOptionDisabled(i)}},load:function(e,i){e=this._getIndex(e);var s=this,n=this.tabs.eq(e),o=n.find(".ui-tabs-anchor"),a=this._getPanelForTab(n),r={tab:n,panel:a},h=function(t,e){"abort"===e&&s.panels.stop(!1,!0),s._removeClass(n,"ui-tabs-loading"),a.removeAttr("aria-busy"),t===s.xhr&&delete s.xhr};this._isLocal(o[0])||(this.xhr=t.ajax(this._ajaxSettings(o,i,r)),this.xhr&&"canceled"!==this.xhr.statusText&&(this._addClass(n,"ui-tabs-loading"),a.attr("aria-busy","true"),this.xhr.done(function(t,e,n){setTimeout(function(){a.html(t),s._trigger("load",i,r),h(n,e)},1)}).fail(function(t,e){setTimeout(function(){h(t,e)},1)})))},_ajaxSettings:function(e,i,s){var n=this;return{url:e.attr("href").replace(/#.*$/,""),beforeSend:function(e,o){return n._trigger("beforeLoad",i,t.extend({jqXHR:e,ajaxSettings:o},s))}}},_getPanelForTab:function(e){var i=t(e).attr("aria-controls");return this.element.find(this._sanitizeSelector("#"+i))}}),t.uiBackCompat!==!1&&t.widget("ui.tabs",t.ui.tabs,{_processTabs:function(){this._superApply(arguments),this._addClass(this.tabs,"ui-tab")}}),t.ui.tabs,t.widget("ui.tooltip",{version:"1.12.1",options:{classes:{"ui-tooltip":"ui-corner-all ui-widget-shadow"},content:function(){var e=t(this).attr("title")||"";return t("<a>").text(e).html()},hide:!0,items:"[title]:not([disabled])",position:{my:"left top+15",at:"left bottom",collision:"flipfit flip"},show:!0,track:!1,close:null,open:null},_addDescribedBy:function(e,i){var s=(e.attr("aria-describedby")||"").split(/\s+/);s.push(i),e.data("ui-tooltip-id",i).attr("aria-describedby",t.trim(s.join(" ")))},_removeDescribedBy:function(e){var i=e.data("ui-tooltip-id"),s=(e.attr("aria-describedby")||"").split(/\s+/),n=t.inArray(i,s);-1!==n&&s.splice(n,1),e.removeData("ui-tooltip-id"),s=t.trim(s.join(" ")),s?e.attr("aria-describedby",s):e.removeAttr("aria-describedby")},_create:function(){this._on({mouseover:"open",focusin:"open"}),this.tooltips={},this.parents={},this.liveRegion=t("<div>").attr({role:"log","aria-live":"assertive","aria-relevant":"additions"}).appendTo(this.document[0].body),this._addClass(this.liveRegion,null,"ui-helper-hidden-accessible"),this.disabledTitles=t([])},_setOption:function(e,i){var s=this;this._super(e,i),"content"===e&&t.each(this.tooltips,function(t,e){s._updateContent(e.element)})},_setOptionDisabled:function(t){this[t?"_disable":"_enable"]()},_disable:function(){var e=this;t.each(this.tooltips,function(i,s){var n=t.Event("blur");n.target=n.currentTarget=s.element[0],e.close(n,!0)}),this.disabledTitles=this.disabledTitles.add(this.element.find(this.options.items).addBack().filter(function(){var e=t(this);return e.is("[title]")?e.data("ui-tooltip-title",e.attr("title")).removeAttr("title"):void 0}))},_enable:function(){this.disabledTitles.each(function(){var e=t(this);e.data("ui-tooltip-title")&&e.attr("title",e.data("ui-tooltip-title"))}),this.disabledTitles=t([])},open:function(e){var i=this,s=t(e?e.target:this.element).closest(this.options.items);s.length&&!s.data("ui-tooltip-id")&&(s.attr("title")&&s.data("ui-tooltip-title",s.attr("title")),s.data("ui-tooltip-open",!0),e&&"mouseover"===e.type&&s.parents().each(function(){var e,s=t(this);s.data("ui-tooltip-open")&&(e=t.Event("blur"),e.target=e.currentTarget=this,i.close(e,!0)),s.attr("title")&&(s.uniqueId(),i.parents[this.id]={element:this,title:s.attr("title")},s.attr("title",""))}),this._registerCloseHandlers(e,s),this._updateContent(s,e))},_updateContent:function(t,e){var i,s=this.options.content,n=this,o=e?e.type:null;return"string"==typeof s||s.nodeType||s.jquery?this._open(e,t,s):(i=s.call(t[0],function(i){n._delay(function(){t.data("ui-tooltip-open")&&(e&&(e.type=o),this._open(e,t,i))})}),i&&this._open(e,t,i),void 0)},_open:function(e,i,s){function n(t){l.of=t,a.is(":hidden")||a.position(l)}var o,a,r,h,l=t.extend({},this.options.position);if(s){if(o=this._find(i))return o.tooltip.find(".ui-tooltip-content").html(s),void 0;i.is("[title]")&&(e&&"mouseover"===e.type?i.attr("title",""):i.removeAttr("title")),o=this._tooltip(i),a=o.tooltip,this._addDescribedBy(i,a.attr("id")),a.find(".ui-tooltip-content").html(s),this.liveRegion.children().hide(),h=t("<div>").html(a.find(".ui-tooltip-content").html()),h.removeAttr("name").find("[name]").removeAttr("name"),h.removeAttr("id").find("[id]").removeAttr("id"),h.appendTo(this.liveRegion),this.options.track&&e&&/^mouse/.test(e.type)?(this._on(this.document,{mousemove:n}),n(e)):a.position(t.extend({of:i},this.options.position)),a.hide(),this._show(a,this.options.show),this.options.track&&this.options.show&&this.options.show.delay&&(r=this.delayedShow=setInterval(function(){a.is(":visible")&&(n(l.of),clearInterval(r))},t.fx.interval)),this._trigger("open",e,{tooltip:a})}},_registerCloseHandlers:function(e,i){var s={keyup:function(e){if(e.keyCode===t.ui.keyCode.ESCAPE){var s=t.Event(e);s.currentTarget=i[0],this.close(s,!0)}}};i[0]!==this.element[0]&&(s.remove=function(){this._removeTooltip(this._find(i).tooltip)}),e&&"mouseover"!==e.type||(s.mouseleave="close"),e&&"focusin"!==e.type||(s.focusout="close"),this._on(!0,i,s)},close:function(e){var i,s=this,n=t(e?e.currentTarget:this.element),o=this._find(n);return o?(i=o.tooltip,o.closing||(clearInterval(this.delayedShow),n.data("ui-tooltip-title")&&!n.attr("title")&&n.attr("title",n.data("ui-tooltip-title")),this._removeDescribedBy(n),o.hiding=!0,i.stop(!0),this._hide(i,this.options.hide,function(){s._removeTooltip(t(this))}),n.removeData("ui-tooltip-open"),this._off(n,"mouseleave focusout keyup"),n[0]!==this.element[0]&&this._off(n,"remove"),this._off(this.document,"mousemove"),e&&"mouseleave"===e.type&&t.each(this.parents,function(e,i){t(i.element).attr("title",i.title),delete s.parents[e]}),o.closing=!0,this._trigger("close",e,{tooltip:i}),o.hiding||(o.closing=!1)),void 0):(n.removeData("ui-tooltip-open"),void 0)},_tooltip:function(e){var i=t("<div>").attr("role","tooltip"),s=t("<div>").appendTo(i),n=i.uniqueId().attr("id");return this._addClass(s,"ui-tooltip-content"),this._addClass(i,"ui-tooltip","ui-widget ui-widget-content"),i.appendTo(this._appendTo(e)),this.tooltips[n]={element:e,tooltip:i}},_find:function(t){var e=t.data("ui-tooltip-id");return e?this.tooltips[e]:null},_removeTooltip:function(t){t.remove(),delete this.tooltips[t.attr("id")]},_appendTo:function(t){var e=t.closest(".ui-front, dialog");return e.length||(e=this.document[0].body),e},_destroy:function(){var e=this;t.each(this.tooltips,function(i,s){var n=t.Event("blur"),o=s.element;n.target=n.currentTarget=o[0],e.close(n,!0),t("#"+i).remove(),o.data("ui-tooltip-title")&&(o.attr("title")||o.attr("title",o.data("ui-tooltip-title")),o.removeData("ui-tooltip-title"))}),this.liveRegion.remove()}}),t.uiBackCompat!==!1&&t.widget("ui.tooltip",t.ui.tooltip,{options:{tooltipClass:null},_tooltip:function(){var t=this._superApply(arguments);return this.options.tooltipClass&&t.tooltip.addClass(this.options.tooltipClass),t}}),t.ui.tooltip});
\ No newline at end of file
diff --git a/spaces/zhaoyuzhaoyu/stabilityai-stable-diffusion-xl-base-1.0/app.py b/spaces/zhaoyuzhaoyu/stabilityai-stable-diffusion-xl-base-1.0/app.py
deleted file mode 100644
index 9520517f687cf7229ddfab9d8c5f8af7f76b0bd4..0000000000000000000000000000000000000000
--- a/spaces/zhaoyuzhaoyu/stabilityai-stable-diffusion-xl-base-1.0/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/stabilityai/stable-diffusion-xl-base-1.0").launch()
\ No newline at end of file
diff --git a/spaces/zhoupin30/zhoupin30/src/components/chat-suggestions.tsx b/spaces/zhoupin30/zhoupin30/src/components/chat-suggestions.tsx
deleted file mode 100644
index 48aec7c84e4407c482acdfcc7857fb0f660d12d3..0000000000000000000000000000000000000000
--- a/spaces/zhoupin30/zhoupin30/src/components/chat-suggestions.tsx
+++ /dev/null
@@ -1,45 +0,0 @@
-import React, { useMemo } from 'react'
-import Image from 'next/image'
-import HelpIcon from '@/assets/images/help.svg'
-import { SuggestedResponse } from '@/lib/bots/bing/types'
-import { useBing } from '@/lib/hooks/use-bing'
-import { atom, useAtom } from 'jotai'
-
-type Suggestions = SuggestedResponse[]
-const helpSuggestions = ['为什么不回应某些主题', '告诉我更多关于必应的资迅', '必应如何使用 AI?'].map((text) => ({ text }))
-const suggestionsAtom = atom<Suggestions>([])
-
-type ChatSuggestionsProps = React.ComponentProps<'div'> & Pick<ReturnType<typeof useBing>, 'setInput'> & { suggestions?: Suggestions }
-
-export function ChatSuggestions({ setInput, suggestions = [] }: ChatSuggestionsProps) {
-  const [currentSuggestions, setSuggestions] = useAtom(suggestionsAtom)
-  const toggleSuggestions = (() => {
-    if (currentSuggestions === helpSuggestions) {
-      setSuggestions(suggestions)
-    } else {
-      setSuggestions(helpSuggestions)
-    }
-  })
-
-  useMemo(() => {
-    setSuggestions(suggestions)
-    window.scrollBy(0, 2000)
-  }, [suggestions.length, setSuggestions])
-
-  return currentSuggestions?.length ? (
-    <div className="py-6">
-      <div className="suggestion-items">
-        <button className="rai-button" type="button" aria-label="这是什么?" onClick={toggleSuggestions}>
-          <Image alt="help" src={HelpIcon} width={24} />
-        </button>
-        {
-          currentSuggestions.map(suggestion => (
-            <button key={suggestion.text} className="body-1-strong suggestion-container" type="button" onClick={() => setInput(suggestion.text)}>
-              {suggestion.text}
-            </button>
-          ))
-        }
-      </div>
-    </div>
-  ) : null
-}
diff --git a/spaces/zhoupin30/zhoupin30/src/components/voice.tsx b/spaces/zhoupin30/zhoupin30/src/components/voice.tsx
deleted file mode 100644
index ab886394487445e4b0675770b76096bba0e61b0e..0000000000000000000000000000000000000000
--- a/spaces/zhoupin30/zhoupin30/src/components/voice.tsx
+++ /dev/null
@@ -1,52 +0,0 @@
-import React, { useEffect } from 'react'
-import { useSetAtom } from 'jotai'
-import { useBing } from '@/lib/hooks/use-bing'
-import Image from 'next/image'
-import VoiceIcon from '@/assets/images/voice.svg'
-import VoiceButton from './ui/voice'
-import { SR } from '@/lib/bots/bing/sr'
-import { voiceListenAtom } from '@/state'
-
-const sr = new SR(['发送', '清空', '退出'])
-
-const Voice = ({ setInput, input, sendMessage, isSpeaking }: Pick<ReturnType<typeof useBing>, 'setInput' | 'sendMessage' | 'input' | 'isSpeaking'>) => {
-  const setListen = useSetAtom(voiceListenAtom)
-  useEffect(() => {
-    if (sr.listening) return
-    sr.transcript = !isSpeaking
-  }, [isSpeaking])
-
-  useEffect(() => {
-    sr.onchange = (msg: string, command?: string) => {
-      switch (command) {
-        case '退出':
-          sr.stop()
-          break;
-        case '发送':
-          sendMessage(input)
-        case '清空':
-          setInput('')
-          break;
-        default:
-          setInput(input + msg)
-      }
-    }
-  }, [input, setInput, sendMessage])
-
-  const switchSR = (enable: boolean = false) => {
-    setListen(enable)
-    if (enable) {
-      sr.start()
-    } else {
-      sr.stop()
-    }
-  }
-
-  return sr.listening ? (
-    <VoiceButton onClick={() => switchSR(false)} />
-  ) : (
-    <Image alt="start voice" src={VoiceIcon} width={24} className="-mt-0.5" onClick={() => switchSR(true)} />
-  )
-};
-
-export default Voice;
diff --git a/spaces/zhoupin30/zhoupin30/src/lib/storage.ts b/spaces/zhoupin30/zhoupin30/src/lib/storage.ts
deleted file mode 100644
index a5b7825c4f76a28c704da512ae39e8bb45addd09..0000000000000000000000000000000000000000
--- a/spaces/zhoupin30/zhoupin30/src/lib/storage.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-import { getMany, set, del, clear } from 'idb-keyval';
-
-export const Storage = {
-  async get(key: string | string[] | null): Promise<any> {
-    if (key === null) return null;
-    if (typeof key === 'string') {
-      key = [key]
-    }
-    const returnData: Record<string, any> = {}
-    const values = await getMany(key)
-    key.forEach((k, idx)=> {
-      returnData[k] = values[idx]
-    })
-    return returnData;
-  },
-  async set(object: any) {
-    for (let key of Object.keys(object)) {
-      await set(key, object[key])
-    }
-  },
-  async remove(key: string) {
-    return del(key);
-  },
-  async clear() {
-    return clear();
-  }
-}
diff --git a/spaces/zixian/Zhenhuan-VITS/data_utils.py b/spaces/zixian/Zhenhuan-VITS/data_utils.py
deleted file mode 100644
index c6c8dee9d157161f2082484b89bdb282364e2a0e..0000000000000000000000000000000000000000
--- a/spaces/zixian/Zhenhuan-VITS/data_utils.py
+++ /dev/null
@@ -1,267 +0,0 @@
-import time
-import os
-import random
-import numpy as np
-import torch
-import torch.utils.data
-import torchaudio
-
-import commons
-from mel_processing import spectrogram_torch
-from utils import load_wav_to_torch, load_filepaths_and_text
-from text import text_to_sequence, cleaned_text_to_sequence
-"""Multi speaker version"""
-
-
-class TextAudioSpeakerLoader(torch.utils.data.Dataset):
-    """
-        1) loads audio, speaker_id, text pairs
-        2) normalizes text and converts them to sequences of integers
-        3) computes spectrograms from audio files.
-    """
-
-    def __init__(self, audiopaths_sid_text, hparams, symbols):
-        self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
-        self.text_cleaners = hparams.text_cleaners
-        self.max_wav_value = hparams.max_wav_value
-        self.sampling_rate = hparams.sampling_rate
-        self.filter_length = hparams.filter_length
-        self.hop_length = hparams.hop_length
-        self.win_length = hparams.win_length
-        self.sampling_rate = hparams.sampling_rate
-
-        self.cleaned_text = getattr(hparams, "cleaned_text", False)
-
-        self.add_blank = hparams.add_blank
-        self.min_text_len = getattr(hparams, "min_text_len", 1)
-        self.max_text_len = getattr(hparams, "max_text_len", 190)
-        self.symbols = symbols
-
-        random.seed(1234)
-        random.shuffle(self.audiopaths_sid_text)
-        self._filter()
-
-    def _filter(self):
-        """
-        Filter text & store spec lengths
-        """
-        # Store spectrogram lengths for Bucketing
-        # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
-        # spec_length = wav_length // hop_length
-
-        audiopaths_sid_text_new = []
-        lengths = []
-        for audiopath, sid, text in self.audiopaths_sid_text:
-            # audiopath = "./user_voice/" + audiopath
-
-            if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
-                audiopaths_sid_text_new.append([audiopath, sid, text])
-                lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
-        self.audiopaths_sid_text = audiopaths_sid_text_new
-        self.lengths = lengths
-
-    def get_audio_text_speaker_pair(self, audiopath_sid_text):
-        # separate filename, speaker_id and text
-        audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2]
-        text = self.get_text(text)
-        spec, wav = self.get_audio(audiopath)
-        sid = self.get_sid(sid)
-        return (text, spec, wav, sid)
-
-    def get_audio(self, filename):
-        # audio, sampling_rate = load_wav_to_torch(filename)
-        # if sampling_rate != self.sampling_rate:
-        #     raise ValueError("{} {} SR doesn't match target {} SR".format(
-        #         sampling_rate, self.sampling_rate))
-        # audio_norm = audio / self.max_wav_value if audio.max() > 10 else audio
-        # audio_norm = audio_norm.unsqueeze(0)
-        audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)
-        # spec_filename = filename.replace(".wav", ".spec.pt")
-        # if os.path.exists(spec_filename):
-        #     spec = torch.load(spec_filename)
-        # else:
-        #     try:
-        spec = spectrogram_torch(audio_norm, self.filter_length,
-                                 self.sampling_rate, self.hop_length, self.win_length,
-                                 center=False)
-        spec = spec.squeeze(0)
-            # except NotImplementedError:
-            #     print("?")
-            # spec = torch.squeeze(spec, 0)
-            # torch.save(spec, spec_filename)
-        return spec, audio_norm
-
-    def get_text(self, text):
-        if self.cleaned_text:
-            text_norm = cleaned_text_to_sequence(text, self.symbols)
-        else:
-            text_norm = text_to_sequence(text, self.text_cleaners)
-        if self.add_blank:
-            text_norm = commons.intersperse(text_norm, 0)
-        text_norm = torch.LongTensor(text_norm)
-        return text_norm
-
-    def get_sid(self, sid):
-        sid = torch.LongTensor([int(sid)])
-        return sid
-
-    def __getitem__(self, index):
-        return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
-
-    def __len__(self):
-        return len(self.audiopaths_sid_text)
-
-
-class TextAudioSpeakerCollate():
-    """ Zero-pads model inputs and targets
-    """
-
-    def __init__(self, return_ids=False):
-        self.return_ids = return_ids
-
-    def __call__(self, batch):
-        """Collate's training batch from normalized text, audio and speaker identities
-        PARAMS
-        ------
-        batch: [text_normalized, spec_normalized, wav_normalized, sid]
-        """
-        # Right zero-pad all one-hot text sequences to max input length
-        _, ids_sorted_decreasing = torch.sort(
-            torch.LongTensor([x[1].size(1) for x in batch]),
-            dim=0, descending=True)
-
-        max_text_len = max([len(x[0]) for x in batch])
-        max_spec_len = max([x[1].size(1) for x in batch])
-        max_wav_len = max([x[2].size(1) for x in batch])
-
-        text_lengths = torch.LongTensor(len(batch))
-        spec_lengths = torch.LongTensor(len(batch))
-        wav_lengths = torch.LongTensor(len(batch))
-        sid = torch.LongTensor(len(batch))
-
-        text_padded = torch.LongTensor(len(batch), max_text_len)
-        spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
-        wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
-        text_padded.zero_()
-        spec_padded.zero_()
-        wav_padded.zero_()
-        for i in range(len(ids_sorted_decreasing)):
-            row = batch[ids_sorted_decreasing[i]]
-
-            text = row[0]
-            text_padded[i, :text.size(0)] = text
-            text_lengths[i] = text.size(0)
-
-            spec = row[1]
-            spec_padded[i, :, :spec.size(1)] = spec
-            spec_lengths[i] = spec.size(1)
-
-            wav = row[2]
-            wav_padded[i, :, :wav.size(1)] = wav
-            wav_lengths[i] = wav.size(1)
-
-            sid[i] = row[3]
-
-        if self.return_ids:
-            return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing
-        return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid
-
-
-class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
-    """
-    Maintain similar input lengths in a batch.
-    Length groups are specified by boundaries.
-    Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
-
-    It removes samples which are not included in the boundaries.
-    Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
-    """
-
-    def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
-        super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
-        self.lengths = dataset.lengths
-        self.batch_size = batch_size
-        self.boundaries = boundaries
-
-        self.buckets, self.num_samples_per_bucket = self._create_buckets()
-        self.total_size = sum(self.num_samples_per_bucket)
-        self.num_samples = self.total_size // self.num_replicas
-
-    def _create_buckets(self):
-        buckets = [[] for _ in range(len(self.boundaries) - 1)]
-        for i in range(len(self.lengths)):
-            length = self.lengths[i]
-            idx_bucket = self._bisect(length)
-            if idx_bucket != -1:
-                buckets[idx_bucket].append(i)
-
-        for i in range(len(buckets) - 1, 0, -1):
-            if len(buckets[i]) == 0:
-                buckets.pop(i)
-                self.boundaries.pop(i + 1)
-
-        num_samples_per_bucket = []
-        for i in range(len(buckets)):
-            len_bucket = len(buckets[i])
-            total_batch_size = self.num_replicas * self.batch_size
-            rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
-            num_samples_per_bucket.append(len_bucket + rem)
-        return buckets, num_samples_per_bucket
-
-    def __iter__(self):
-        # deterministically shuffle based on epoch
-        g = torch.Generator()
-        g.manual_seed(self.epoch)
-
-        indices = []
-        if self.shuffle:
-            for bucket in self.buckets:
-                indices.append(torch.randperm(len(bucket), generator=g).tolist())
-        else:
-            for bucket in self.buckets:
-                indices.append(list(range(len(bucket))))
-
-        batches = []
-        for i in range(len(self.buckets)):
-            bucket = self.buckets[i]
-            len_bucket = len(bucket)
-            ids_bucket = indices[i]
-            num_samples_bucket = self.num_samples_per_bucket[i]
-
-            # add extra samples to make it evenly divisible
-            rem = num_samples_bucket - len_bucket
-            ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
-
-            # subsample
-            ids_bucket = ids_bucket[self.rank::self.num_replicas]
-
-            # batching
-            for j in range(len(ids_bucket) // self.batch_size):
-                batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]
-                batches.append(batch)
-
-        if self.shuffle:
-            batch_ids = torch.randperm(len(batches), generator=g).tolist()
-            batches = [batches[i] for i in batch_ids]
-        self.batches = batches
-
-        assert len(self.batches) * self.batch_size == self.num_samples
-        return iter(self.batches)
-
-    def _bisect(self, x, lo=0, hi=None):
-        if hi is None:
-            hi = len(self.boundaries) - 1
-
-        if hi > lo:
-            mid = (hi + lo) // 2
-            if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
-                return mid
-            elif x <= self.boundaries[mid]:
-                return self._bisect(x, lo, mid)
-            else:
-                return self._bisect(x, mid + 1, hi)
-        else:
-            return -1
-
-    def __len__(self):
-        return self.num_samples // self.batch_size
\ No newline at end of file
diff --git a/spaces/zox47/succinctly-text2image-prompt-generator/README.md b/spaces/zox47/succinctly-text2image-prompt-generator/README.md
deleted file mode 100644
index a8f44d74eb57813ce2fad0d805a0929f2d7ce239..0000000000000000000000000000000000000000
--- a/spaces/zox47/succinctly-text2image-prompt-generator/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Succinctly Text2image Prompt Generator
-emoji: 📊
-colorFrom: red
-colorTo: pink
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference