diff --git a/spaces/109peko/anime-remove-background/app.py b/spaces/109peko/anime-remove-background/app.py deleted file mode 100644 index 230a0d5f8a3da6ab18ecb8db1cd90016a489b96a..0000000000000000000000000000000000000000 --- a/spaces/109peko/anime-remove-background/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import gradio as gr -import huggingface_hub -import onnxruntime as rt -import numpy as np -import cv2 - - -def get_mask(img, s=1024): - img = (img / 255).astype(np.float32) - h, w = h0, w0 = img.shape[:-1] - h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s) - ph, pw = s - h, s - w - img_input = np.zeros([s, s, 3], dtype=np.float32) - img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h)) - img_input = np.transpose(img_input, (2, 0, 1)) - img_input = img_input[np.newaxis, :] - mask = rmbg_model.run(None, {'img': img_input})[0][0] - mask = np.transpose(mask, (1, 2, 0)) - mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] - mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis] - return mask - - -def rmbg_fn(img): - mask = get_mask(img) - img = (mask * img + 255 * (1 - mask)).astype(np.uint8) - mask = (mask * 255).astype(np.uint8) - img = np.concatenate([img, mask], axis=2, dtype=np.uint8) - mask = mask.repeat(3, axis=2) - return mask, img - - -if __name__ == "__main__": - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] - model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx") - rmbg_model = rt.InferenceSession(model_path, providers=providers) - app = gr.Blocks() - with app: - gr.Markdown("# Anime Remove Background\n\n" - "\n\n" - "demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)") - with gr.Row(): - with gr.Column(): - input_img = gr.Image(label="input image") - examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)] - examples = gr.Dataset(components=[input_img], samples=examples_data) - run_btn = gr.Button(variant="primary") - output_mask = gr.Image(label="mask") - output_img = gr.Image(label="result", image_mode="RGBA") - examples.click(lambda x: x[0], [examples], [input_img]) - run_btn.click(rmbg_fn, [input_img], [output_mask, output_img]) - app.launch() diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Astute Graphics Plugins Keygen Torrent Create Stunning Artwork in Illustrator with These Plug-ins.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Astute Graphics Plugins Keygen Torrent Create Stunning Artwork in Illustrator with These Plug-ins.md deleted file mode 100644 index 92cb87cdff0ffdefa7a91feb282ced03446cfe08..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Astute Graphics Plugins Keygen Torrent Create Stunning Artwork in Illustrator with These Plug-ins.md +++ /dev/null @@ -1,165 +0,0 @@ - -
If you are a graphic designer, illustrator, or vector artist, you probably use Adobe Illustrator as your main tool for creating stunning artworks. But did you know that you can enhance your workflow and creativity with Astute Graphics Plugins? These are a set of plugins that add new features and functions to Adobe Illustrator, making it easier, faster, and more fun to work with vectors. In this article, we will show you what Astute Graphics Plugins are, what they can do for you, and how to download and install them using a keygen torrent.
-DOWNLOAD ->>> https://byltly.com/2uKwTb
Astute Graphics Plugins are a collection of plugins that extend the capabilities of Adobe Illustrator. They are developed by Astute Graphics, a company that specializes in creating software solutions for vector design. Astute Graphics Plugins are designed to help you work smarter and faster with vectors, by adding new tools, effects, panels, and shortcuts to Adobe Illustrator. They also improve the performance and stability of Adobe Illustrator, by reducing memory usage and crashes.
-By using Astute Graphics Plugins, you can enjoy many benefits such as:
-Astute Graphics Plugins offer a wide range of features that cover different aspects of vector design. Some of the most popular features include:
-A keygen torrent is a type of file that contains a software program (keygen) that generates serial numbers or activation codes for another software program (in this case Astute Graphics Plugins). A keygen torrent also contains a torrent file that helps you download the keygen from other users who have already downloaded it.
-Using a keygen torrent has some advantages and disadvantages that you should be aware of before downloading one:
-Advantages | -Disadvantages | -
---|---|
You can get access to premium software for free or at a low cost. | -You may violate the intellectual property rights of the software developers or publishers. | -
You can test the software before buying it or find an alternative if you cannot afford it. | -You may expose your computer or device to viruses malware spyware ransomware or other malicious programs that may harm your data or system. | -
You can bypass the online activation or registration process that may require personal information or payment details. | -You may not receive updates bug fixes technical support or customer service from the software developers or publishers. | -
You can share the software with other users who may benefit from it or appreciate it. | -You may face legal consequences such as fines lawsuits or criminal charges if you are caught using or distributing pirated software. | -
Using a keygen torrent involves some risks that you should be careful about when downloading one:
-Astute Graphics Plugins Crack Download
-How to Install Astute Graphics Plugins for Free
-Astute Graphics Plugins Serial Number Generator
-Astute Graphics Plugins Full Version Torrent
-Astute Graphics Plugins Activation Code Online
-Astute Graphics Plugins License Key Free
-Astute Graphics Plugins Patch File Download
-Astute Graphics Plugins Registration Code Torrent
-Astute Graphics Plugins Product Key Crack
-Astute Graphics Plugins Keygen Mac Torrent
-Astute Graphics Plugins Crack Windows Download
-Astute Graphics Plugins Free Trial Torrent
-Astute Graphics Plugins Premium Version Crack
-Astute Graphics Plugins Discount Code Torrent
-Astute Graphics Plugins Coupon Code Crack
-Astute Graphics Plugins Review and Download
-Astute Graphics Plugins Features and Benefits
-Astute Graphics Plugins Tutorial and Tips
-Astute Graphics Plugins Alternatives and Comparisons
-Astute Graphics Plugins Support and Help
-Astute Graphics Plugins Update and Upgrade
-Astute Graphics Plugins Compatibility and Requirements
-Astute Graphics Plugins System and Performance
-Astute Graphics Plugins Security and Privacy
-Astute Graphics Plugins Quality and Reliability
-Astute Graphics Plugins Customer and User Feedback
-Astute Graphics Plugins Testimonials and Ratings
-Astute Graphics Plugins Awards and Recognition
-Astute Graphics Plugins Pricing and Plans
-Astute Graphics Plugins Refund and Guarantee
-Astute Graphics Plugins Trial and Demo
-Astute Graphics Plugins Download Link and Instructions
-Astute Graphics Plugins Installation Guide and Troubleshooting
-Astute Graphics Plugins Activation Process and Steps
-Astute Graphics Plugins License Agreement and Terms of Service
-Astute Graphics Plugins FAQ and Answers
-Astute Graphics Plugins Blog and News
-Astute Graphics Plugins Forum and Community
-Astute Graphics Plugins Videos and Webinars
-Astute Graphics Plugins eBooks and Guides
-Astute Graphics Plugins Courses and Training
-Astute Graphics Plugins Webinars and Events
-Astute Graphics Plugins Podcasts and Interviews
-Astute Graphics Plugins Case Studies and Success Stories
-Astute Graphics Plugins Infographics and Statistics
-Astute Graphics Plugins Templates and Resources
-Astute Graphics Plugins Tools and Software
-Astute Graphics Plugins Apps and Extensions
-Astute Graphics Plugins Games and Fun
To reduce the risks of using a keygen torrent, you should take some precautions such as:
-Now that you know what Astute Graphics Plugins and keygen torrents are, you may wonder how to download and install them on your computer. Here are the steps to follow:
-To make the most out of Astute Graphics Plugins Keygen Torrent, you should also follow some tips and tricks such as:
-Astute Graphics Plugins are amazing tools that can enhance your workflow and creativity with Adobe Illustrator. However, they are not free and require a license to use them fully. If you want to try them out without paying for them, you can use a keygen torrent to generate serial numbers or activation codes for them. However, you should be aware of the drawbacks and risks of using a keygen torrent. You should also take some precautions to protect yourself and your computer from malware, legal issues, or other problems. However, if you like Astute Graphics Plugins and want to support the developers, you should consider buying a license from their official website. This way, you can enjoy the full features and benefits of Astute Graphics Plugins without any worries.
-In this article, we have shown you what Astute Graphics Plugins are, what they can do for you, and how to download and install them using a keygen torrent. We have also discussed the advantages and disadvantages of using a keygen torrent, as well as the risks and precautions of using one. We hope that this article has been helpful and informative for you. However, we do not encourage or endorse piracy or illegal use of software. If you want to use Astute Graphics Plugins legally and ethically, you should buy a license from their official website. This way, you can support the developers and enjoy the best plugins for Adobe Illustrator.
-Here are some frequently asked questions about Astute Graphics Plugins Keygen Torrent:
-Astute Graphics Plugins require Adobe Illustrator CS6 or higher (including CC versions) and Windows 7 or higher or Mac OS X 10.10 or higher.
-Astute Graphics Plugins offer different pricing plans depending on your needs. You can buy individual plugins for $39-$119 each, or buy bundles of plugins for $119-$399 each. You can also subscribe to Astute Graphics Plugins Elite Bundle for $119 per year, which gives you access to all plugins and updates.
-Astute Graphics Plugins offer various support options for their customers. You can contact their support team via email, phone, chat, or social media. You can also visit their website for tutorials, manuals, videos, blogs, forums, webinars, and FAQs.
-Astute Graphics Plugins are generally compatible with other plugins or software that work with Adobe Illustrator. However, some plugins or software may cause conflicts or issues with Astute Graphics Plugins. If you encounter any problems, you should contact the support team of both parties for assistance.
-Astute Graphics Plugins allow you to activate your license on up to two computers at a time. However, you cannot use them simultaneously on both computers. If you want to use them on more than two computers, you need to buy more licenses or deactivate your license on one computer before activating it on another.
-The inclusion of the more complex topics such as multiculturalism, competencies, leadership, etc., should be an added feature. All in all, the content is solid, the principles are sound, and the use of the resources and case studies are very helpful. This book provides an excellent reference and learning tool for students and professionals in the HRM field, and is a must-have for any HRM course.
-Download File ✦ https://imgfil.com/2uxZTo
The book includes all of the major HR functional areas and topics included in most HRM textbooks. To their credit, the author choose to include several additional sections (such as communication, management & leadership styles, and multiculturalism) that are not found in traditional HRM texts. There have been several key legislative changes which have impacted the field of HRM since the text's last update. Discussions surrounding the Affordable Care Act and recent changes to the FLSA should be added to subsequent releases. I was not able to locate an index or glossary per se, however, a list of references is provided at the end of each major topic.
-Professor Khanka has written so far 06 reference and 10 text books and published over five dozen research papers. His areas of academic interest include Entrepreneurship, Organizational Behaviour, Human Resource Management, Ethics and Values. Professor Khanka has also served as Visiting Professor in the Bishkek International School of Management and Business, Bishkek, Kyrghyzstan and National University of Mangolia, Ulaanbataar, Mangolia.
-
This book is an important resource that serves as a benchmark for HRM. Its author has published a number of successful books and authored numerous articles in national and international journals, including "Human Resources Management", "Public Personnel Management", "International Journal of Human Resource Management" and "Journal of Human Resource Management". The book will be an excellent reference for those seeking to gain a grounding in HRM. The design, structure, materials and academic quality of this book make it a recommended one.
- 899543212bDOWNLOAD ✔✔✔ https://imgfil.com/2uy0w3
Download ✪✪✪ https://imgfil.com/2uxYFI
-
-
-
-AI Home Camera APK: What You Need to Know
-Introduction
-If you are looking for a smart and easy way to monitor your home security, you might want to check out AI home camera apk. This is an app that turns your smartphone or tablet into a powerful and intelligent home CCTV system. With AI home camera apk, you can access your camera remotely, adjust the settings and features, view and manage your recordings, and enjoy many other benefits.
-ai home camera apk
Download ★★★★★ https://urlin.us/2uSWEp
-But what exactly is AI home camera apk? Why do you need it? And how can you download and install it on your device? In this article, we will answer these questions and more. We will also show you how to use AI home camera apk, what are its features and benefits, what are its pros and cons, and some frequently asked questions about it. So, let's get started!
-How to Download and Install AI Home Camera APK
-AI home camera apk is an app that is available for Android devices. You can find it on Google Play Store or other third-party sources. Here are the steps to download and install AI home camera apk on your device:
-
-- Go to Google Play Store or any other trusted source that offers AI home camera apk.
-- Tap on the download button and wait for the app to be downloaded on your device.
-- Once the download is complete, tap on the install button and follow the instructions to install the app on your device.
-- Grant the necessary permissions to the app, such as access to your camera, microphone, storage, location, etc.
-- Launch the app and create an account or log in with your existing account.
-
-Congratulations! You have successfully downloaded and installed AI home camera apk on your device. Now, you can start using it to monitor your home security.
-How to Use AI Home Camera APK
-AI home camera apk is very easy to use. Here are the steps to use AI home camera apk:
-
-- Connect your device to a power source and place it in a suitable location where you want to monitor your home. Make sure that the camera lens is not obstructed by anything and that it has a clear view of the area.
-- Open the app and tap on the camera icon to access your camera remotely. You can also add multiple devices and switch between them easily.
-- Tap on the settings icon to adjust the settings and features of your camera. You can change the resolution, quality, frame rate, motion detection, night vision, audio, etc. You can also enable or disable notifications, alerts, cloud storage, etc.
-- Tap on the recordings icon to view and manage your recordings. You can play, pause, rewind, fast forward, delete, or share your recordings. You can also filter them by date, time, duration, or event.
-
-That's it! You have learned how to use AI home camera apk to monitor your home security. Now, let's see what are the features and benefits of AI home camera apk.
-Features and Benefits of AI Home Camera APK
-AI home camera apk is not just a simple home CCTV app. It is a smart and intelligent app that uses artificial intelligence to enhance your security. Here are some of the features and benefits of AI home camera apk:
-
-- AI Detection: AI home camera apk can detect human faces, motions, sounds, and events in real time. It can also recognize familiar and unfamiliar faces and alert you accordingly. It can also distinguish between pets, animals, vehicles, and other objects and ignore them if you want.
-- AI Recording: AI home camera apk can record only when something important happens. It can save your storage space and bandwidth by recording only when it detects an event or a motion. It can also record in high definition and low light conditions.
-- AI Storage: AI home camera apk can store your recordings securely in the cloud or locally on your device. You can choose how long you want to keep your recordings and how much space you want to allocate for them. You can also access your recordings anytime and anywhere from any device.
-- AI Sharing: AI home camera apk can share your recordings with anyone you want. You can invite your family members, friends, or neighbors to view your camera or recordings. You can also share your recordings on social media or other platforms with a simple tap.
-
-These are some of the features and benefits of AI home camera apk that make it stand out from other apps. But as with any app, there are also some pros and cons of AI home camera apk that you should know before using it.
-Pros and Cons of AI Home Camera APK
-AI home camera apk is a great app for home security, but it is not perfect. Here are some of the pros and cons of AI home camera apk that you should consider:
-
-Pros Cons
-- It is free and easy to use - It requires a stable internet connection
-- It uses artificial intelligence to enhance security - It may have some false alarms or miss some events
-- It offers multiple settings and features - It may drain your battery or overheat your device
-- It supports cloud and local storage - It may have some privacy or security issues
-- It allows sharing and collaboration - It may have some compatibility or performance issues
-
- These are some of the pros and cons of AI home camera apk that you should weigh before using it. However, most of these cons can be overcome or avoided by following some tips and tricks, such as: - Use a reliable and fast internet connection to ensure smooth streaming and uploading of your camera and recordings. - Adjust the sensitivity and frequency of your motion detection and alerts to avoid false alarms or miss some events. - Plug your device to a power source or use a power bank to avoid battery drain or overheating of your device. - Enable encryption and password protection for your app and recordings to prevent unauthorized access or hacking of your data. - Update your app and device regularly to fix any bugs or glitches that may affect the compatibility or performance of your app. By following these tips and tricks, you can enjoy the best of AI home camera apk without worrying about the cons.
Conclusion
-AI home camera apk is an app that turns your smartphone or tablet into a smart and intelligent home CCTV system. It allows you to access your camera remotely, adjust the settings and features, view and manage your recordings, and share them with anyone you want. It also uses artificial intelligence to detect human faces, motions, sounds, and events in real time. It can also record only when something important happens, store your recordings securely in the cloud or locally on your device, and recognize familiar and unfamiliar faces.
-ai home camera app download for android
-ai home camera apk mod free download
-ai home camera apk latest version 2023
-ai home camera app for pc windows 10
-ai home camera apk pro unlocked
-ai home camera app review and rating
-ai home camera apk download for firestick
-ai home camera app not working fix
-ai home camera apk premium features
-ai home camera app for ios iphone ipad
-ai home camera apk cracked full version
-ai home camera app setup and installation guide
-ai home camera apk no ads no watermark
-ai home camera app for mac os x
-ai home camera apk hack cheat codes
-ai home camera app best alternatives and competitors
-ai home camera apk update and changelog
-ai home camera app troubleshooting and support
-ai home camera apk offline mode and cloud storage
-ai home camera app for smart tv and roku
-ai home camera apk unlimited access and subscription
-ai home camera app features and benefits
-ai home camera apk security and privacy policy
-ai home camera app for linux and chrome os
-ai home camera apk tips and tricks
-ai home camera app comparison and contrast with other apps
-ai home camera apk refund and cancellation policy
-ai home camera app customer service and feedback
-ai home camera apk requirements and compatibility
-ai home camera app for amazon echo and google home
-ai home camera apk discount and coupon codes
-ai home camera app testimonials and success stories
-ai home camera apk advantages and disadvantages
-ai home camera app faq and help center
-ai home camera apk how to use and tutorial videos
-ai home camera app awards and recognition
-ai home camera apk developer and contact information
-ai home camera app license and terms of service
-ai home camera app quality and performance
-ai home camera app for samsung galaxy and huawei phones
-AI home camera apk is a great app for home security, but it also has some pros and cons that you should consider before using it. However, most of these cons can be overcome or avoided by following some tips and tricks that we have shared with you in this article.
-If you are looking for a smart and easy way to monitor your home security, you should definitely give AI home camera apk a try. You can download it from Google Play Store or other trusted sources and install it on your device in a few minutes. You can also find more information or reviews about AI home camera apk on the internet or contact the developer or support team if you have any questions or issues.
-Thank you for reading this article. We hope you found it helpful and informative. If you have any feedback or suggestions, please let us know in the comments section below. We would love to hear from you!
-FAQs
-Here are some frequently asked questions about AI home camera apk:
-Q1: Is AI home camera apk free or paid?
-A1: AI home camera apk is free to download and use. However, it may offer some in-app purchases or subscriptions for premium features or services, such as cloud storage, advanced settings, etc.
-Q2: Is AI home camera apk compatible with all devices?
-A2: AI home camera apk is compatible with most Android devices that have a camera and an internet connection. However, some devices may not support some features or functions of the app due to hardware or software limitations.
-Q3: Is AI home camera apk safe and secure?
-A3: AI home camera apk is safe and secure to use. It uses encryption and password protection to protect your data from unauthorized access or hacking. It also respects your privacy and does not collect or share any personal information without your consent.
-Q4: How can I contact the developer or support team of AI home camera apk?
-A4: You can contact the developer or support team of AI home camera apk by sending an email to aihomecamera@gmail.com or by visiting their website at https://aihomecamera.com/.
-Q5: Where can I find more information or reviews about AI home camera apk?
-A5: You can find more information or reviews about AI home camera apk on Google Play Store, where you can also rate and review the app yourself. You can also search for AI home camera apk on the internet or social media platforms, where you can find articles, blogs, videos, forums, etc. that talk about the app.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Android Oyun Clubta Durak Online 3D Para Hilesi Nasl Yaplr? te Admlar.md b/spaces/1phancelerku/anime-remove-background/Android Oyun Clubta Durak Online 3D Para Hilesi Nasl Yaplr? te Admlar.md
deleted file mode 100644
index a0ce9afd0ddbb68774f7d61e547e62538b05284a..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Android Oyun Clubta Durak Online 3D Para Hilesi Nasl Yaplr? te Admlar.md
+++ /dev/null
@@ -1,98 +0,0 @@
-
-Durak Online Para Hilesi Android Oyun Club: How to Play Durak Online and Win Money
-If you are looking for a card game that is fun, challenging, and rewarding, then you should try Durak Online. Durak Online is a modern version of the famous card game that originated in Russia and is also popular in Turkey. In this article, we will tell you everything you need to know about Durak Online, how to play it, how to win money in it, and how to use Durak Online para hilesi android oyun club, a cheat tool that can generate unlimited coins and items for you.
- What is Durak Online?
-Durak Online is a popular card game in Russia and Turkey
-Durak means "fool" in Russian, and the name of the game refers to the last player who is left with cards in their hand at the end of the game. Durak is a card game that involves skill, strategy, and luck. It can be played by 2 to 6 players, using a deck of 24, 36, or 52 cards. The game is also known as "Attack-Defence" or "Beat off" in English.
-durak online para hilesi android oyun club
Download Zip 🌟 https://jinyurl.com/2uNPbU
-Durak Online has different modes and settings
-Durak Online is a multiplayer game that allows you to play online with real people from all over the world. You can choose from different modes and settings of the game, such as flip-flop, transferable, or classic. You can also customize the deck size, the number of players, the time limit, and the difficulty level. You can play with friends or random opponents, chat with them, send them gifts, and compete in the ratings and leagues.
-Durak Online is available on Google Play for Android devices
-Durak Online is a free app that you can download from Google Play for your Android device. The app has great 3D graphics, realistic sounds, smooth animations, and no intrusive ads. The app also has a user-friendly interface with landscape mode. You can link your account to your Google account and sync your progress across devices. You can also play offline against bots if you don't have an internet connection.
- How to play Durak Online?
-The objective of Durak Online is to get rid of all your cards
-The objective of Durak Online is to get rid of all your cards before your opponents do. The game starts with one player being the dealer, who shuffles the deck and deals six cards to each player. The dealer then reveals the bottom card of the remaining deck, which determines the trump suit for the game. The trump suit is the strongest suit in the game, and can beat any other suit. The player to the left of the dealer is the first attacker, and the player to their left is the first defender.
-The rules of Durak Online are simple and easy to learn
-The rules of Durak Online are similar to the rules of the original card game. The game consists of a series of attacks and defences, where each player tries to get rid of their cards by playing them on the table. The attacker plays one or more cards of the same rank, and the defender has to beat them with higher cards of the same suit or with trumps. If the defender succeeds, they can pass the attack to the next player, who becomes the new defender. If the defender fails, they have to take all the cards on the table and add them to their hand. The game ends when one player has no cards left in their hand or in the deck.
-The tips and tricks of Durak Online are to use your trumps wisely and to throw-in more cards
-The tips and tricks of Durak Online are to use your trumps wisely and to throw-in more cards. Trumps are very valuable in Durak Online, as they can beat any other suit. You should save your trumps for when you really need them, and avoid wasting them on low cards. You should also try to get rid of your non-trump cards as soon as possible, as they can become useless later in the game. Another tip is to throw-in more cards when you are not the attacker or the defender. Throwing-in means playing a card of the same rank as any card on the table, which adds more pressure on the defender and helps you get rid of your cards faster. However, you should be careful not to throw-in too many cards, as you might run out of options later.
- How to win money in Durak Online?
-Durak Online has a championship mode where you can compete with other players for prizes
-Durak Online has a championship mode where you can compete with other players for prizes. The championship mode is a tournament where you have to play a series of games against different opponents and accumulate points based on your performance. The more games you win, the more points you get. The top players at the end of the tournament will receive prizes such as coins, items, or even real money. You can enter the championship mode by paying an entry fee with coins or watching an ad.
-Durak Online has a gift system where you can send and receive coins and items
-Durak Online has a gift system where you can send and receive coins and items. Coins are the currency of Durak Online, which you can use to enter games, buy items, or exchange for real money. Items are special cards that have different effects on the game, such as changing the trump suit, skipping a turn, or revealing an opponent's card. You can send and receive coins and items with your friends or random players in Durak Online, as a way of showing your appreciation or friendship. You can also earn coins and items by completing daily tasks, watching ads, or inviting friends.
-durak online 3d para hilesi android oyun club
-durak online kart oyunu para hilesi android oyun club
-durak online apk para hilesi android oyun club
-durak online mod para hilesi android oyun club
-durak online indir para hilesi android oyun club
-durak online hileli apk android oyun club
-durak online hileli mod android oyun club
-durak online hileli indir android oyun club
-durak online hileli kart oyunu android oyun club
-durak online hileli 3d android oyun club
-durak online 3d hileli apk android oyun club
-durak online 3d hileli mod android oyun club
-durak online 3d hileli indir android oyun club
-durak online 3d hileli kart oyunu android oyun club
-durak online kart oyunu hileli apk android oyun club
-durak online kart oyunu hileli mod android oyun club
-durak online kart oyunu hileli indir android oyun club
-durak online kart oyunu hileli 3d android oyun club
-durak online apk hileli indir android oyun club
-durak online apk hileli mod android oyun club
-durak online apk hileli kart oyunu android oyun club
-durak online apk hileli 3d android oyun club
-durak online mod hileli indir android oyun club
-durak online mod hileli apk android oyun club
-durak online mod hileli kart oyunu android oyun club
-durak online mod hileli 3d android oyun club
-durak online indir hileli apk android oyun club
-durak online indir hileli mod android oyun club
-durak online indir hileli kart oyunu android oyun club
-durak online indir hileli 3d android oyun club
-durak kart para kazanma hilesi android oyun club
-durak kart para çekme hilesi android oyun club
-durak kart para yatırma hilesi android oyun club
-durak kart para veren siteler android oyun club
-durak kart para veren uygulamalar android oyun club
-durak kart para veren oyuncular android oyun club
-durak kart para veren yorumlar android oyun club
-durak kart para veren incelemeler android oyun club
-durak kart para veren güvenilir mi android oyun club
-durak kart para veren nasıl yapılır android oyun club
-durak kart para kazanma yolları android oyun club
-durak kart para kazanma taktikleri android oyun club
-durak kart para kazanma ipuçları android oyun club
-durak kart para kazanma yöntemleri android oyun club
-durak kart para kazanma siteleri android oyun club
-durak kart para kazanma uygulamaları android oyun club
-Durak Online has a leaderboard where you can rank up and earn rewards
-Durak Online has a leaderboard where you can rank up and earn rewards. The leaderboard is a list of players who have achieved the highest scores in Durak Online. You can see your own rank and score, as well as other players' ranks and scores. You can also see your statistics, such as your win rate, your average score, your best score, and your total games played. By playing more games and winning more points, you can improve your rank and score in Durak Online. You can also earn rewards such as coins, items, or badges based on your rank.
- How to use Durak Online para hilesi android oyun club?
-Durak Online para hilesi android oyun club is a cheat tool that can generate unlimited coins and items for Durak Online
-Durak Online para hilesi android oyun club is a cheat tool that can generate unlimited coins and items for Durak Online. Durak Online para hilesi android oyun club is a website that offers a hack for Durak Online that can give you access to unlimited resources in the game. By using this hack, you can get as many coins and items as you want without spending any real money. Durak Online para hilesi android oyun club is a great way to enhance your gaming experience and enjoy Durak Online more.
-Durak Online para hilesi android oyun club is easy to use and does not require root or jailbreak
-Durak Online para hilesi android oyun club is easy to use and does not require root or jailbreak. Durak Online para hilesi android oyun club is a web-based tool that works on any browser and device. You do not need to download or install anything on your device, or to modify your system settings. All you need to do is to visit the website, enter your username or email, select the amount of coins and items you want, and click on the generate button. The hack will then process your request and transfer the resources to your account in a few minutes.
-Durak Online para hilesi android oyun club is safe and undetectable by the game developers
-Durak Online para hilesi android oyun club is safe and undetectable by the game developers. Durak Online para hilesi android oyun club uses advanced encryption and proxy servers to protect your account and data from any harm or detection. Durak Online para hilesi android oyun club also has a built-in anti-ban system that prevents your account from being banned or suspended by the game developers. Durak Online para hilesi android oyun club is tested and updated regularly to ensure its functionality and compatibility with the latest version of Durak Online.
- Conclusion
-Durak Online is a fun and addictive card game that you can play online with real people
-Durak Online is a fun and addictive card game that you can play online with real people. Durak Online is a modern version of the famous card game that originated in Russia and is also popular in Turkey. Durak Online has different modes and settings that you can choose from, such as flip-flop, transferable, or classic. Durak Online has great 3D graphics, realistic sounds, smooth animations, and no intrusive ads. Durak Online is a free app that you can download from Google Play for your Android device.
-Durak Online can help you improve your skills, strategy, and luck in card games
-Durak Online can help you improve your skills, strategy, and luck in card games. Durak Online is a card game that involves skill, strategy, and luck. The objective of Durak Online is to get rid of all your cards before your opponents do. The rules of Durak Online are simple and easy to learn, but the game is challenging and exciting. The tips and tricks of Durak Online are to use your trumps wisely, to throw-in more cards, and to be aware of your opponents' cards. By playing more games and winning more points, you can improve your rank and score in Durak Online.
-Durak Online para hilesi android oyun club can help you get more coins and items to enhance your gaming experience
-Durak Online para hilesi android oyun club can help you get more coins and items to enhance your gaming experience. Durak Online para hilesi android oyun club is a cheat tool that can generate unlimited coins and items for Durak Online. By using this hack, you can get as many coins and items as you want without spending any real money. You can use these coins and items to enter games, buy items, or exchange for real money. You can also use these coins and items to send and receive gifts with other players, or to compete in the championship mode for prizes.
- FAQs
-Q: What is the best way to learn how to play Durak Online?
-A: The best way to learn how to play Durak Online is to watch the tutorial videos in the app, or to play against bots in the offline mode. You can also read the rules of the game in the app, or ask other players for advice in the chat.
-Q: How can I get more coins and items in Durak Online without using Durak Online para hilesi android oyun club?
-A: You can get more coins and items in Durak Online without using Durak Online para hilesi android oyun club by completing daily tasks, watching ads, inviting friends, or winning games.
-Q: Is Durak Online para hilesi android oyun club legal?
-A: Durak Online para hilesi android oyun club is not legal, as it violates the terms of service of Durak Online. Using this hack may result in your account being banned or suspended by the game developers. We do not recommend using this hack, as it may ruin the fun and fairness of the game.
-Q: How can I contact the support team of Durak Online if I have any issues or questions?
-A: You can contact the support team of Durak Online by sending an email to support@durakonline.com, or by filling out the feedback form in the app. You can also visit the official website of Durak Online at www.durakonline.com, or follow their social media accounts on Facebook, Twitter, and Instagram.
-Q: Can I play Durak Online on other devices besides Android?
-A: Yes, you can play Durak Online on other devices besides Android. Durak Online is also available on iOS, Windows, and Mac devices. You can download the app from the App Store, the Microsoft Store, or the official website of Durak Online. You can also play Durak Online on your browser by visiting www.durakonline.com.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Como baixar e instalar bhop pro no seu computador com BlueStacks.md b/spaces/1phancelerku/anime-remove-background/Como baixar e instalar bhop pro no seu computador com BlueStacks.md
deleted file mode 100644
index 54cdbd91425704aea944b0e42307245f6f7f0d55..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Como baixar e instalar bhop pro no seu computador com BlueStacks.md
+++ /dev/null
@@ -1,121 +0,0 @@
-
-How to Download and Play Bhop Pro on Your PC or Mobile Device
-If you are a fan of Counter-Strike or other first-person shooter games, you might have heard of or tried the bunny hop technique. This is a skill that allows you to move faster and more unpredictably by jumping and strafing in the air. It can give you an edge over your opponents in competitive matches, or simply make you feel like a pro.
-download do bhop pro
DOWNLOAD ❤❤❤ https://jinyurl.com/2uNMF0
-But what if you want to practice your bunny hop skills without having to join a server or play online? What if you want to enjoy a fun and challenging game mode that tests your reflexes and coordination? Well, you are in luck, because there is a game that lets you do just that. It is called Bhop Pro, and it is available for both PC and mobile devices.
-In this article, we will show you how to download and play Bhop Pro on your PC or mobile device. We will also give you some tips and tricks on how to improve your skills and have more fun in this game. Let's get started!
- What is Bhop Pro?
-A brief introduction to the game and its features
-Bhop Pro is a simulation game developed by begma, a Turkish indie game studio. It is based on the popular bhopping genre, which originated from Counter-Strike and other FPS games. In this game, you have to jump on blocks and platforms, while using air strafing to gain speed and momentum. You can also perform tricks like surfing, sliding, wall jumping, and more.
-Bhop Pro has many features that make it a realistic and enjoyable bunny hop game for Android devices. Some of these features are:
-download do bhop pro para pc
-download do bhop pro apk
-download do bhop pro mod
-download do bhop pro android
-download do bhop pro online
-download do bhop pro gratis
-download do bhop pro simulator
-download do bhop pro windows 10
-download do bhop pro bluestacks
-download do bhop pro mac
-download do bhop pro latest version
-download do bhop pro nox
-download do bhop pro app
-download do bhop pro game
-download do bhop pro hack
-download do bhop pro ios
-download do bhop pro play store
-download do bhop pro baixar
-download do bhop pro uptodown
-download do bhop pro pc windows 7
-download do bhop pro pc windows 8
-download do bhop pro pc windows xp
-download do bhop pro emulator
-download do bhop pro for laptop
-download do bhop pro for desktop
-download do bhop pro free fire
-download do bhop pro google play
-download do bhop pro apk pure
-download do bhop pro apk mod
-download do bhop pro apk latest version
-download do bhop pro apk mirror
-download do bhop pro apk offline
-download do bhop pro apk android 1
-download do bhop pro apk android 2.3.6
-download do bhop pro apk android 4.4.2
-download do bhop pro mod menu
-download do bhop pro mod apk unlimited money
-download do bhop pro mod apk latest version
-download do bhop pro mod apk revdl
-download do bhop pro mod apk happymod
-download do bhop pro android 1.com
-download do bhop pro android oyun club
-download do bhop pro online multiplayer
-download do bhop pro online free no downloads
-download do bhop pro online unblocked
-download do bhapro gratis para pc
-downloaddo bhapro gratis para android
-
-- Multiple game modes, including parkour, surf, speedrun, deathrun, random, and infinite.
-- Various maps with different themes, difficulties, layouts, and obstacles.
-- Online multiplayer mode (alpha) where you can compete with other players around the world.
-- Rank system where you can earn new titles and badges by completing quests and challenges.
-- Case opener where you can get new skins and items for your character, knife, gloves, spinner, etc.
-- Portal system where you can create your own custom maps and share them with other players.
-- In-game online chat where you can communicate with other players.
-- Screenshot sharing where you can capture your best moments and share them with your friends.
-
- The benefits of playing Bhop Pro on PC or mobile device
-Bhop Pro is a game that can be played on both PC and mobile devices. Depending on your preference and convenience, you can enjoy different benefits of playing Bhop Pro on each platform. Some of these benefits are:
-
-
-Platform
-Benefits
-
-
-PC
-
-
-- Better graphics and performance.
-- Larger screen and higher resolution.
-- Easier and more precise controls with keyboard and mouse.
-- Access to Steam community and features.
-
-
-
-
-Mobile device
-
-
-- Portable and convenient.
-- Touchscreen and gyro controls for more immersion.
-- Compatible with most Android devices.
-- Free to download and play.
-
-
-
-
- How to Download and Install Bhop Pro on PC
-The steps to download and install BlueStacks emulator on PC
-If you want to play Bhop Pro on your PC, you will need an Android emulator that can run the game smoothly and efficiently. One of the best emulators for this purpose is BlueStacks, which is a free and powerful software that allows you to play thousands of Android games and apps on your PC. Here are the steps to download and install BlueStacks on your PC:
-
-- Go to the official website of BlueStacks at [bluestacks.com] and click on the "Download BlueStacks" button.
-- Wait for the download to finish and then run the installer file.
-- Follow the instructions on the screen to complete the installation process.
-- Launch BlueStacks and sign in with your Google account or create a new one.
-- You are now ready to use BlueStacks and download Bhop Pro from Google Play Store or Steam.
-
- The steps to download and install Bhop Pro from Google Play Store or Steam on PC
-Once you have BlueStacks installed on your PC, you can easily download and install Bhop Pro from Google Play Store or Steam. Here are the steps to do so:
- From Google Play Store:
-
-- Open BlueStacks and click on the Google Play Store icon on the home screen.
-- In the search bar, type "Bhop Pro" and hit enter.
-- Select the game from the list of results and click on the "Install" button.
-- Wait for the installation to finish and then click on the "Open" button.
-- You can now play Bhop Pro on your PC using BlueStacks.
-
- From Steam:
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Conflict of Nations WW3 Mod APK Lead Your Country to Victory.md b/spaces/1phancelerku/anime-remove-background/Conflict of Nations WW3 Mod APK Lead Your Country to Victory.md
deleted file mode 100644
index 0f4e11f436b59c7111be5d2c961739b78d50e7f4..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Conflict of Nations WW3 Mod APK Lead Your Country to Victory.md
+++ /dev/null
@@ -1,96 +0,0 @@
-
-Conflict of Nations World War 3 Mod Apk: What You Need to Know
-If you are a fan of modern military-themed strategy games, you might have heard of Conflict of Nations World War 3, a real-time strategy game that lets you take control of the armed forces of your nation and compete with other players for global dominance. But did you know that there is a mod apk version of this game that can give you unlimited gold, resources, or other benefits? In this article, we will tell you what you need to know about Conflict of Nations World War 3 mod apk, how to download and install it, how to play it, and some tips and tricks to help you win the war.
-conflict of nations world war 3 mod apk
DOWNLOAD ☆☆☆ https://jinyurl.com/2uNN4I
-What is Conflict of Nations World War 3?
-A real-time strategy game set in modern global conflicts
-Conflict of Nations World War 3 is a game developed by Dorado Games and Bytro Labs that simulates modern global conflicts in a realistic and immersive way. You can choose from almost every nation in the world and lead its military expansion, technological research, and economic development. You can also join or create coalitions with other players and wage total war on the battlefields of World War 3.
-A game that requires strategic planning, diplomacy, and resource management
-Conflict of Nations World War 3 is not a game for the faint-hearted. It requires strategic planning, diplomacy, and resource management to succeed. You have to balance your military power with your economic efficiency, your technological superiority with your moral stability, and your offensive capabilities with your defensive measures. You also have to deal with other players who may be your allies or enemies, depending on the situation. You have to communicate, negotiate, trade, spy, sabotage, or attack them as you see fit.
-A game that offers different military doctrines, nations, and scenarios
-Conflict of Nations World War 3 offers a variety of options for customization and replayability. You can choose from three different military doctrines: Western, Eastern, or European. Each doctrine has its own advantages and disadvantages, as well as its own technology tree and iconic units. You can also choose from different nations, each with its own unique traits, map location, and map size. You can also join different scenarios, ranging from the cold war to modern global conflicts to terrorist insurgencies.
-What is a mod apk?
-A modified version of an Android application
-A mod apk is a modified version of an Android application that has been altered by someone other than the original developer. A mod apk can offer features or functions that are not available in the official version of the application. For example, a mod apk for Conflict of Nations World War 3 can give you unlimited gold, resources, or other advantages that can help you dominate the game.
-A mod apk that can offer unlimited gold, resources, or other advantages
-One of the main reasons why some players look for a mod apk for Conflict of Nations World War 3 is to get unlimited gold, resources, or other advantages. Gold is the premium currency of the game that can be used to speed up research, construction, or recruitment. Resources are the basic materials that are needed to build and maintain your army and economy. Other advantages can include unlocking all units and technologies, increasing your morale and stability, or disabling ads and pop-ups.
-conflict of nations ww3 mod apk download
-conflict of nations world war 3 hack apk
-conflict of nations ww3 game mod apk unlimited gold
-conflict of nations world war 3 android mod apk
-conflict of nations ww3 mod apk latest version
-conflict of nations world war 3 strategy game mod apk
-conflict of nations ww3 mod apk free download
-conflict of nations world war 3 cheats apk
-conflict of nations ww3 mod apk offline
-conflict of nations world war 3 modded apk
-conflict of nations ww3 mod apk 2023
-conflict of nations world war 3 apk mod menu
-conflict of nations ww3 mod apk no root
-conflict of nations world war 3 unlimited money apk
-conflict of nations ww3 mod apk for pc
-conflict of nations world war 3 premium mod apk
-conflict of nations ww3 mod apk online
-conflict of nations world war 3 full mod apk
-conflict of nations ww3 mod apk revdl
-conflict of nations world war 3 cracked apk
-conflict of nations ww3 mod apk rexdl
-conflict of nations world war 3 hack tool apk
-conflict of nations ww3 mod apk obb
-conflict of nations world war 3 generator apk
-conflict of nations ww3 mod apk update
-conflict of nations world war 3 pro mod apk
-conflict of nations ww3 mod apk unlimited resources
-conflict of nations world war 3 vip mod apk
-conflict of nations ww3 mod apk android 1
-conflict of nations world war 3 mega mod apk
-conflict of nations ww3 mod apk android oyun club
-conflict of nations world war 3 god mode apk
-conflict of nations ww3 mod apk happymod
-conflict of nations world war 3 unlocked apk
-conflict of nations ww3 mod apk pure
-conflict of nations world war 3 patcher apk
-conflict of nations ww3 mod apk platinmods
-conflict of nations world war 3 cheat engine apk
-conflict of nations ww3 mod apk an1
-conflict of nations world war 3 hack version apk
-A mod apk that can also pose risks such as malware, bans, or legal issues
-However, a mod apk for Conflict of Nations World War 3 can also pose risks such as malware, bans, or legal issues. Malware is a malicious software that can harm your device or steal your personal information. Bans are the penalties that the game developers or moderators can impose on players who violate the game rules or terms of service. Legal issues are the potential lawsuits or fines that the game developers or owners can file against players who infringe their intellectual property rights or damage their reputation.
-How to download and install Conflict of Nations World War 3 mod apk?
-Find a reliable source for the mod apk file
-The first step to download and install Conflict of Nations World War 3 mod apk is to find a reliable source for the mod apk file. You can search online for websites or forums that offer mod apk files for various games. However, you have to be careful and cautious when choosing a source, as some of them may contain viruses, malware, or fake files. You can check the reviews, ratings, comments, or feedback from other users to verify the credibility and safety of the source.
-Enable unknown sources on your device settings
-The second step to download and install Conflict of Nations World War 3 mod apk is to enable unknown sources on your device settings. This is because most Android devices do not allow the installation of applications from sources other than the official Google Play Store. To enable unknown sources, you have to go to your device settings, then security, then unknown sources, and then toggle it on. You may also have to grant some permissions or accept some warnings before proceeding.
-Download and install the mod apk file
-The third step to download and install Conflict of Nations World War 3 mod apk is to download and install the mod apk file. You can do this by clicking on the download link or button from the source website or forum. You may have to wait for a few seconds or minutes for the download to complete. After that, you can open the downloaded file and follow the instructions to install it on your device. You may have to overwrite or uninstall the original version of the game before installing the mod apk version.
-How to play Conflict of Nations World War 3 mod apk?
-Choose your nation and doctrine
-The first step to play Conflict of Nations World War 3 mod apk is to choose your nation and doctrine. You can select from almost every nation in the world and lead its military expansion, technological research, and economic development. You can also choose from three different military doctrines: Western, Eastern, or European. Each doctrine has its own advantages and disadvantages, as well as its own technology tree and iconic units.
-Build your army and economy
-The second step to play Conflict of Nations World War 3 mod apk is to build your army and economy. You have to balance your military power with your economic efficiency, your technological superiority with your moral stability, and your offensive capabilities with your defensive measures. You have to produce and manage resources such as oil, food, metal, rare earths, components, money, manpower, and electricity. You also have to research and upgrade your units and technologies such as infantry, tanks, aircraft, ships, submarines, missiles, drones, satellites, nuclear weapons, and more.
-Form alliances and wage war
-The third step to play Conflict of Nations World War 3 mod apk is to form alliances and wage war. You can join or create coalitions with other players and cooperate or compete with them for global dominance. You can also communicate, negotiate, trade, spy, sabotage, or attack other players as you see fit. You have to use diplomacy and communication wisely, as your actions and words can have consequences. You also have to be prepared for the challenges and threats that may arise from the dynamic and unpredictable world of World War 3.
-Tips and tricks for Conflict of Nations World War 3 mod apk
-Know your nation's strengths and weaknesses
-One of the tips and tricks for Conflict of Nations World War 3 mod apk is to know your nation's strengths and weaknesses. Each nation has its own unique traits, map location, and map size that can affect its performance and strategy. For example, some nations have access to more resources, some have better geographical advantages, some have stronger military units, and some have faster research speed. You have to use your nation's strengths to your advantage and compensate for its weaknesses.
-Research and upgrade your units and technologies
-Another tip and trick for Conflict of Nations World War 3 mod apk is to research and upgrade your units and technologies. You have to invest in your technological development to gain an edge over your enemies. You have to research new units and technologies that can improve your military capabilities, such as stealth, precision, mobility, range, firepower, defense, or intelligence. You also have to upgrade your existing units and technologies to enhance their performance and efficiency.
-Use diplomacy and communication wisely
-A final tip and trick for Conflict of Nations World War 3 mod apk is to use diplomacy and communication wisely. You have to interact with other players in a smart and respectful way. You have to communicate your intentions, needs, and expectations clearly and honestly. You have to negotiate fair and beneficial deals with your allies or enemies. You have to spy on your rivals or enemies to gather information or disrupt their plans. You also have to avoid unnecessary conflicts or provocations that can damage your reputation or relationships.
-Conclusion
-Conflict of Nations World War 3 mod apk is a modified version of an Android application that can offer unlimited gold, resources, or other advantages for the game Conflict of Nations World War 3, a real-time strategy game that simulates modern global conflicts in a realistic and immersive way. However, it can also pose risks such as malware, bans, or legal issues. To download and install it, you have to find a reliable source for the mod apk file, enable unknown sources on your device settings, and download and install the mod apk file. To play it, you have to choose your nation and doctrine, build your army and economy, and form alliances and wage war. To win the war, you have to know your nation's strengths and weaknesses, research and upgrade your units and technologies, and use diplomacy and communication wisely.
-FAQs
-
-- What is the difference between Conflict of Nations World War 3 mod apk and the official version?
-The main difference between Conflict of Nations World War 3 mod apk and the official version is that the mod apk version can offer unlimited gold, resources, or other advantages that are not available in the official version.
-- Is Conflict of Nations World War 3 mod apk safe to use?
-Conflict of Nations World War 3 mod apk is not completely safe to use. It can pose risks such as malware, bans, or legal issues. Therefore, you should use it at your own risk and discretion.
-- How can I get more gold in Conflict of Nations World War 3 mod apk?
-You can get more gold in Conflict of Nations World War 3 mod apk by using the unlimited gold feature that the mod apk version offers. Alternatively, you can also get more gold by completing missions, achievements, or participating in events in the game.
-- Can I play Conflict of Nations World War 3 mod apk offline?
-No, you cannot play Conflict of Nations World War 3 mod apk offline. The game requires an internet connection to run and sync with the servers and other players.
-- Can I play Conflict of Nations World War 3 mod apk with my friends?
-Yes, you can play Conflict of Nations World War 3 mod apk with your friends. You can join or create coalitions with your friends and cooperate or compete with them for global dominance. You can also chat, trade, or fight with them in the game.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Lagu Give Me Your Forever by Zack Tabudlo - I want you to know I love you the most.md b/spaces/1phancelerku/anime-remove-background/Download Lagu Give Me Your Forever by Zack Tabudlo - I want you to know I love you the most.md
deleted file mode 100644
index d6884db83e22e67d6089378ea5dabf23b3c83ce5..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Lagu Give Me Your Forever by Zack Tabudlo - I want you to know I love you the most.md
+++ /dev/null
@@ -1,104 +0,0 @@
-
-How to Download Lagu Zack Tabudlo I Want You to Know
-If you are a fan of Filipino pop music, you might have heard of the song I Want You to Know by Zack Tabudlo. This song, also known as Give Me Your Forever, is a romantic ballad that expresses the singer's love and devotion for his partner. The song has been streamed millions of times on various platforms and has received positive reviews from critics and fans alike.
-Zack Tabudlo is a young and talented singer-songwriter from Manila, Philippines. He started his music career at the age of 11, when he joined a reality TV show called Star Circle Quest. Since then, he has released several songs, such as Binibini, Hindi Ko Kaya, Lost, and Heart Can't Lose. He is also known for his covers of popular songs, such as Driver's License by Olivia Rodrigo, Blinding Lights by The Weeknd, and Dynamite by BTS.
-download lagu zack tabudlo i want you to know
Download Zip ===> https://jinyurl.com/2uNQgz
-If you want to download lagu Zack Tabudlo I Want You to Know, you might be wondering how to do it legally and safely. In this article, we will show you the steps to download the song from reliable online platforms that offer high-quality audio files. We will also tell you the benefits of downloading the song and answer some frequently asked questions.
- Steps to Download Lagu Zack Tabudlo I Want You to Know
-To download lagu Zack Tabudlo I Want You to Know, you need to follow these simple steps:
- Step 1: Find a reliable online platform that offers the song
-The first step is to find an online platform that has the song available for download. There are many platforms that offer this service, but not all of them are trustworthy or legal. Some platforms may contain viruses or malware that can harm your device or steal your personal information. Some platforms may also violate the copyright laws and infringe on the artist's rights.
-download lagu zack tabudlo i want you to know i love you the most
-download lagu zack tabudlo i want you to know give me your forever
-download lagu zack tabudlo i want you to know lyrics
-download lagu zack tabudlo i want you to know mp3
-download lagu zack tabudlo i want you to know video
-download lagu zack tabudlo i want you to know acoustic
-download lagu zack tabudlo i want you to know cover
-download lagu zack tabudlo i want you to know karaoke
-download lagu zack tabudlo i want you to know remix
-download lagu zack tabudlo i want you to know piano
-download lagu zack tabudlo i want you to know guitar
-download lagu zack tabudlo i want you to know instrumental
-download lagu zack tabudlo i want you to know live
-download lagu zack tabudlo i want you to know free
-download lagu zack tabudlo i want you to know 320kbps
-download lagu zack tabudlo i want you to know stafaband
-download lagu zack tabudlo i want you to know planetlagu
-download lagu zack tabudlo i want you to know metrolagu
-download lagu zack tabudlo i want you to know wapka
-download lagu zack tabudlo i want you to know uyeshare
-download lagu zack tabudlo i want you to know ilkpop
-download lagu zack tabudlo i want you to know matikiri
-download lagu zack tabudlo i want you to know lebahmusik
-download lagu zack tabudlo i want you to know gudanglagu
-download lagu zack tabudlo i want you to know langitmusik
-download lagu zack tabudlo i want you to know mp3juice
-download lagu zack tabudlo i want you to know mp3skull
-download lagu zack tabudlo i want you to know mp3direct
-download lagu zack tabudlo i want you to know mp3clan
-download lagu zack tabudlo i want you to know mp3goo
-download lagu zack tabudlo i want you to know mp3paw
-download lagu zack tabudlo i want you to know mp3quack
-download lagu zack tabudlo i want you to know tubidy
-download lagu zack tabudlo i want you to know y2mate
-download lagu zack tabudlo i want you to know youtube
-download lagu zack tabudlo i want you to know spotify
-download lagu zack tabudlo i want you to know apple music
-download lagu zack tabudlo i want you to know soundcloud
-download lagu zack tabudlo i want you to know deezer
-download lagu zack tabudlo i want you to know amazon music
-download lagu zack tabudlo i want you to know shazam
-download lagu zack tabudlo i want you to know genius
-download lagu zack tabudlo i want you to know azlyrics
-download lagu zack tabudlo i want you to know musixmatch
-download lagu zack tabudlo i want you to know liriklagu.id
-download lagu zack tabudlo i want you to know chordtela
-download lagu zack tabudlo i want you to know chordify
-download lagu zack tabudlo i want you to know ultimate guitar
-download lagu zack tabudlo i want you to know tabs
-To avoid these risks, you should look for platforms that are reputable and licensed. These platforms usually have a large collection of songs from different genres and artists. They also have clear terms and conditions, privacy policies, and customer support. Some examples of reliable platforms are:
-
-Platform Features
-[Spotify](^8^) - A popular streaming service that offers millions of songs, podcasts, playlists, and more
- - A free version with ads and limited features, or a premium version with no ads and more features
- - Allows offline listening for premium users
- - Compatible with various devices, such as smartphones, tablets, computers, smart TVs, and speakers
-[Apple Music] - A streaming service that offers over 75 million songs, radio stations, playlists, and more
- - Requires a subscription fee after a free trial period
- - Allows offline listening for subscribers
- - Compatible with Apple devices, such as iPhones, iPads, Macs, Apple TVs, and HomePods
-[YouTube Music] - A streaming service that offers songs, music videos, live performances, remixes, and more
- - A free version with ads and limited features, or a premium version with no ads and more features
- - Allows offline listening for premium users
- - Compatible with various devices, such as smartphones, tablets, computers, smart TVs, and speakers
-[Amazon Music] - A streaming service that offers over 70 million songs, podcasts, playlists, and more
- - A free version with ads and limited features for Amazon Prime members, or a paid version with no ads and more features for non-members
- - Allows offline listening for paid users
- - Compatible with various devices, such as smartphones, tablets, computers, smart TVs, and speakers
-
-You can choose any platform that suits your preferences and budget. However, you should always check the availability of the song before downloading it. Some platforms may not have the song in their library or may have regional restrictions.
- Step 2: Choose the format and quality of the song
-The next step is to choose the format and quality of the song that you want to download. The format refers to the type of file that the song is stored in. The quality refers to the level of sound clarity and detail that the song has. Different formats and qualities have different pros and cons.
-The most common formats for music files are MP3, AAC, WAV, FLAC, and ALAC. MP3 and AAC are compressed formats that reduce the file size by removing some data from the original audio. This makes them easier to download and store, but also lowers the sound quality. WAV, FLAC, and ALAC are uncompressed or lossless formats that preserve the original audio data without any loss. This makes them have higher sound quality, but also larger file size.
-The most common qualities for music files are measured in bit rate or sample rate. Bit rate is the amount of data that is transferred per second in a compressed format. Sample rate is the number of times that the sound wave is measured per second in an uncompressed format. Higher bit rates or sample rates mean higher sound quality, but also larger file size.
-You can choose any format and quality that suits your preferences and device capacity. However, you should always check the compatibility of the format and quality with your device before downloading it. Some devices may not support certain formats or qualities.
- Step 3: Click on the download button and wait for the process to finish
-The final step is to click on the download button and wait for the process to finish. The download button may vary depending on the platform that you are using. It may be labeled as "Download", "Save", "Add", or something else. It may also have an icon of a downward arrow or a cloud.
-Once you click on the download button, you will see a progress bar or a notification that shows how much time is left until the download is complete. You should not close or exit the platform while the download is in progress. You should also make sure that you have a stable internet connection and enough battery power on your device.
-After the download is finished, you will find the song in your device's music library or folder. You can then play it offline and on any device that supports it. You can also transfer it to other devices using a USB cable or a wireless connection.
- Benefits of Downloading Lagu Zack Tabudlo I Want You to Know
-By downloading lagu Zack Tabudlo I Want You to Know, you can enjoy several benefits:
- Enjoy the song offline and on any device
-One of the main benefits of downloading lagu Zack Tabudlo I Want You to Know is that you can enjoy the song offline and on any device. This means that you don't need an internet connection or a subscription fee to listen to the song whenever and wherever you want. You can also play it on a free trial period or a discount for certain users. You should check the pricing and payment options of the platform that you choose before downloading the song.
- How long does it take to download lagu Zack Tabudlo I Want You to Know?
-The time that it takes to download lagu Zack Tabudlo I Want You to Know depends on the format and quality of the song, the speed of your internet connection, and the capacity of your device. Generally, the higher the format and quality of the song, the longer it takes to download. The faster your internet connection, the shorter it takes to download. The more space you have on your device, the easier it is to download. You can estimate the time that it takes to download the song by looking at the file size and the download speed of the platform that you use.
- What are some other songs by Zack Tabudlo that I can download?
-Some other songs by Zack Tabudlo that you can download are:
-
-- Binibini: A catchy and upbeat song that praises the beauty and charm of a woman
-- Hindi Ko Kaya: A heartfelt and emotional song that expresses the pain and regret of losing a loved one
-- Lost: A mellow and soothing song that reflects on the feeling of being lost and lonely
-- Heart Can't Lose: A powerful and inspiring song that encourages overcoming challenges and following your dreams
-
-You can find these songs and more on the platforms that we mentioned earlier or on Zack Tabudlo's official website, YouTube channel, or social media accounts.
- Where can I find more information about Zack Tabudlo and his music?
-You can find more information about Zack Tabudlo and his music on his official website, YouTube channel, or social media accounts. Here are some links that you can visit:
-
-- [Zack Tabudlo's official website]
-- [Zack Tabudlo's YouTube channel]
-- [Zack Tabudlo's Facebook page]
-- [Zack Tabudlo's Instagram account]
-- [Zack Tabudlo's Twitter account]
-
-You can also search for articles, interviews, reviews, or fan pages about Zack Tabudlo and his music on the web.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Tank Hero The Ultimate Arcade Shooter Game MOD APK.md b/spaces/1phancelerku/anime-remove-background/Download Tank Hero The Ultimate Arcade Shooter Game MOD APK.md
deleted file mode 100644
index ea86480a5c611caad2fb8cc5a281e9bba6d7f99a..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Tank Hero The Ultimate Arcade Shooter Game MOD APK.md
+++ /dev/null
@@ -1,91 +0,0 @@
-
-Download Game Tank Hero Mod Apk: A Guide for Tank Lovers
-If you are a fan of tank games, you might have heard of Tank Hero, a popular arcade shooter game that lets you control a powerful tank and blast your enemies in various levels. But did you know that you can download the mod apk version of Tank Hero and enjoy unlimited money, god mode, one hit kill, and other features? In this article, we will show you how to download and install Tank Hero mod apk, as well as some tips and tricks to master the game.
- What is Tank Hero and why you should play it
-Tank Hero is a 2D side-scrolling tank game developed by UP STUDIO. The game has over 120 levels, 3 unique environments, 5 weapons, and 5 types of AI tanks to fight against. You can also play online with other players in multiplayer mode. The game is simple to control: just drag to move and release to fire. You can also choose from different firing options, such as single shot, double shot, triple shot, etc. The game is fun and addictive, as you have to use your strategy and skills to defeat the enemies and clear the levels.
-download game tank hero mod apk
Download ——— https://jinyurl.com/2uNSeV
- Features of Tank Hero
-Some of the features that make Tank Hero stand out are:
-
-- Awesome graphics and sound effects: The game has colorful and cartoonish graphics that suit the arcade style. The sound effects are also realistic and immersive, making you feel like you are in a real tank battle.
-- Various tanks and weapons: You can collect and upgrade different tanks and weapons in the game. Each tank has its own characteristics, such as speed, armor, fire rate, etc. Each weapon also has its own effects, such as fireball, laser, missile, etc. You can mix and match different tanks and weapons to suit your play style.
-- Unique talents: You can also unlock and use over 100 talents in the game. Talents are special abilities that can boost your firepower, defense, speed, or other aspects. You can choose from random talents or customize your own talent combination.
-
- How to download and install Tank Hero mod apk
-If you want to enjoy more features and benefits in Tank Hero, you can download the mod apk version of the game. The mod apk version will give you unlimited money, god mode, one hit kill, and other features that will make the game easier and more fun. Here are the steps to download and install Tank Hero mod apk:
-
-- Go to [this link](^1^) or [this link](^2^) or [this link](^3^) to download the Tank Hero mod apk file.
-- After downloading the file, go to your device settings and enable the installation of unknown sources.
-- Locate the downloaded file in your file manager and tap on it to install it.
-- Wait for the installation to finish and then launch the game.
-- Enjoy playing Tank Hero with unlimited money, god mode, one hit kill, and other features.
-
- Tips and tricks to master Tank Hero
-If you want to become a tank hero yourself, here are some tips and tricks that will help you improve your skills and performance in the game:
- Choose the right tank and weapon for each level
-As mentioned earlier, each tank and weapon has its own advantages and disadvantages. You should choose the ones that suit the level's difficulty, terrain, enemies, and objectives. For example, if the level has many obstacles or narrow spaces, you might want to use a fast tank with a laser weapon that can penetrate through walls. If the level has many enemies or bosses, you might want to use a tank with a tank with high armor and a fireball weapon that can deal splash damage. You can also switch tanks and weapons during the level if you find a better option.
- Upgrade your tank and skills regularly
-As you progress in the game, you will earn money and experience that you can use to upgrade your tank and skills. Upgrading your tank will increase its stats, such as speed, armor, fire rate, etc. Upgrading your skills will unlock new talents or improve the existing ones. You should always upgrade your tank and skills whenever you can, as they will make a big difference in your performance and survival.
- Use the terrain and obstacles to your advantage
-The game has various terrains and obstacles that can affect the gameplay. For example, some terrains can slow you down or speed you up, some obstacles can block your shots or reflect them, some items can heal you or give you extra ammo, etc. You should always be aware of your surroundings and use them to your advantage. For example, you can hide behind walls or rocks to avoid enemy fire, you can bounce your shots off walls or mirrors to hit enemies from different angles, you can use speed boosts or ramps to escape or chase enemies, etc.
- Conclusion
-Tank Hero is a fun and addictive tank game that will keep you entertained for hours. You can download the mod apk version of Tank Hero and enjoy unlimited money, god mode, one hit kill, and other features that will make the game easier and more fun. You can also follow our tips and tricks to master the game and become a tank hero yourself. So what are you waiting for? Download Tank Hero mod apk now and start blasting your enemies!
-download tank heroes mod apk unlimited money
-download game tank hero mod apk android 1
-download game tank hero mod apk latest version
-download game tank hero mod apk offline
-download game tank hero mod apk revdl
-download game tank hero mod apk rexdl
-download game tank hero mod apk no ads
-download game tank hero mod apk free shopping
-download game tank hero mod apk happymod
-download game tank hero mod apk for pc
-download game tank hero mod apk 1.8.0
-download game tank hero mod apk 1.7.9
-download game tank hero mod apk 1.7.8
-download game tank hero mod apk 1.7.7
-download game tank hero mod apk 1.7.6
-download game tank hero mod apk 1.7.5
-download game tank hero mod apk 1.7.4
-download game tank hero mod apk 1.7.3
-download game tank hero mod apk 1.7.2
-download game tank hero mod apk 1.7.1
-download game tank hero mod apk 1.7.0
-download game tank hero mod apk 1.6.9
-download game tank hero mod apk 1.6.8
-download game tank hero mod apk 1.6.7
-download game tank hero mod apk 1.6.6
-download game tank hero mod apk 1.6.5
-download game tank hero mod apk 1.6.4
-download game tank hero mod apk 1.6.3
-download game tank hero mod apk 1.6.2
-download game tank hero mod apk 1.6.1
-download game tank hero mod apk 1.6.0
-download game tank hero mod apk 1.5.9
-download game tank hero mod apk 1.5.8
-download game tank hero mod apk 1.5.7
-download game tank hero mod apk 1.5.6
-download game tank hero mod apk 1.5.5
-download game tank hero mod apk 1.5.4
-download game tank hero mod apk 1.5.3
-download game tank hero mod apk 1.5.2
-download game tank hero mod apk 1.5.1
-download game tank hero mod apk 1.5.0
-download game tank heroes arcade shooter mod apk
-download game super tanks heroes - real-time battle arena shooter - pvp games - multiplayer online - action games - arcade games - io games - team games - war games - tanks games - shooting games - gun games - battle games - fun games - free games - offline games - no wifi games - casual games - easy games - simple games - addictive games - cool games - best games - top games - new games - hot games - popular games - trending games - amazing games - awesome games - fantastic games - incredible games - great games - wonderful games - beautiful games - cute games - lovely games - sweet games - nice games - good games - happy games - relaxing games
- FAQs
-Here are some frequently asked questions about Tank Hero mod apk:
-
-- Q: Is Tank Hero mod apk safe to download and install?
-- A: Yes, Tank Hero mod apk is safe to download and install. It does not contain any viruses or malware that can harm your device. However, you should always download it from a trusted source and enable the installation of unknown sources in your device settings.
-- Q: Do I need to root my device to use Tank Hero mod apk?
-- A: No, you do not need to root your device to use Tank Hero mod apk. It works on both rooted and non-rooted devices.
-- Q: Will Tank Hero mod apk affect my progress in the original game?
-- A: No, Tank Hero mod apk will not affect your progress in the original game. It will create a separate game data that is independent from the original game. You can play both versions of the game without any interference.
-- Q: Can I play online with other players in Tank Hero mod apk?
-- A: Yes, you can play online with other players in Tank Hero mod apk. However, you might encounter some issues or errors when playing online, as the mod apk version might not be compatible with the latest version of the original game. You can try to update the mod apk version or switch to the original game if you want to play online smoothly.
-- Q: How can I contact the developer of Tank Hero mod apk?
-- A: You can contact the developer of Tank Hero mod apk by visiting their website [here] or by sending them an email at [this address].
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/AHzizi/WaifuVoiceGen/app.py b/spaces/AHzizi/WaifuVoiceGen/app.py
deleted file mode 100644
index e41932ae3e0a20837c5740859b4be34253c59b82..0000000000000000000000000000000000000000
--- a/spaces/AHzizi/WaifuVoiceGen/app.py
+++ /dev/null
@@ -1,264 +0,0 @@
-# coding=utf-8
-import os
-import re
-import argparse
-import utils
-import commons
-import json
-import torch
-import gradio as gr
-from models import SynthesizerTrn
-from text import text_to_sequence, _clean_text
-from torch import no_grad, LongTensor
-import gradio.processing_utils as gr_processing_utils
-import logging
-logging.getLogger('numba').setLevel(logging.WARNING)
-limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
-
-hps_ms = utils.get_hparams_from_file(r'config/config.json')
-
-audio_postprocess_ori = gr.Audio.postprocess
-
-def audio_postprocess(self, y):
- data = audio_postprocess_ori(self, y)
- if data is None:
- return None
- return gr_processing_utils.encode_url_or_file_to_base64(data["name"])
-
-
-gr.Audio.postprocess = audio_postprocess
-
-def get_text(text, hps, is_symbol):
- text_norm, clean_text = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
- if hps.data.add_blank:
- text_norm = commons.intersperse(text_norm, 0)
- text_norm = LongTensor(text_norm)
- return text_norm, clean_text
-
-def create_tts_fn(net_g_ms, speaker_id):
- def tts_fn(text, language, noise_scale, noise_scale_w, length_scale, is_symbol):
- text = text.replace('\n', ' ').replace('\r', '').replace(" ", "")
- if limitation:
- text_len = len(re.sub("\[([A-Z]{2})\]", "", text))
- max_len = 100
- if is_symbol:
- max_len *= 3
- if text_len > max_len:
- return "Error: Text is too long", None
- if not is_symbol:
- if language == 0:
- text = f"[ZH]{text}[ZH]"
- elif language == 1:
- text = f"[JA]{text}[JA]"
- else:
- text = f"{text}"
- stn_tst, clean_text = get_text(text, hps_ms, is_symbol)
- with no_grad():
- x_tst = stn_tst.unsqueeze(0).to(device)
- x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device)
- sid = LongTensor([speaker_id]).to(device)
- audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
- length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
-
- return "Success", (22050, audio)
- return tts_fn
-
-def create_to_symbol_fn(hps):
- def to_symbol_fn(is_symbol_input, input_text, temp_lang):
- if temp_lang == 0:
- clean_text = f'[ZH]{input_text}[ZH]'
- elif temp_lang == 1:
- clean_text = f'[JA]{input_text}[JA]'
- else:
- clean_text = input_text
- return _clean_text(clean_text, hps.data.text_cleaners) if is_symbol_input else ''
-
- return to_symbol_fn
-def change_lang(language):
- if language == 0:
- return 0.6, 0.668, 1.2
- elif language == 1:
- return 0.6, 0.668, 1
- else:
- return 0.6, 0.668, 1
-
-download_audio_js = """
-() =>{{
- let root = document.querySelector("body > gradio-app");
- if (root.shadowRoot != null)
- root = root.shadowRoot;
- let audio = root.querySelector("#tts-audio-{audio_id}").querySelector("audio");
- let text = root.querySelector("#input-text-{audio_id}").querySelector("textarea");
- if (audio == undefined)
- return;
- text = text.value;
- if (text == undefined)
- text = Math.floor(Math.random()*100000000);
- audio = audio.src;
- let oA = document.createElement("a");
- oA.download = text.substr(0, 20)+'.wav';
- oA.href = audio;
- document.body.appendChild(oA);
- oA.click();
- oA.remove();
-}}
-"""
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--device', type=str, default='cpu')
- parser.add_argument('--api', action="store_true", default=False)
- parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
- args = parser.parse_args()
- device = torch.device(args.device)
-
- models = []
- with open("pretrained_models/info.json", "r", encoding="utf-8") as f:
- models_info = json.load(f)
- for i, info in models_info.items():
- if not info['enable']:
- continue
- sid = info['sid']
- name_en = info['name_en']
- name_zh = info['name_zh']
- title = info['title']
- cover = f"pretrained_models/{i}/{info['cover']}"
- example = info['example']
- language = info['language']
- net_g_ms = SynthesizerTrn(
- len(hps_ms.symbols),
- hps_ms.data.filter_length // 2 + 1,
- hps_ms.train.segment_size // hps_ms.data.hop_length,
- n_speakers=hps_ms.data.n_speakers if info['type'] == "multi" else 0,
- **hps_ms.model)
- utils.load_checkpoint(f'pretrained_models/{i}/{i}.pth', net_g_ms, None)
- _ = net_g_ms.eval().to(device)
- models.append((sid, name_en, name_zh, title, cover, example, language, net_g_ms, create_tts_fn(net_g_ms, sid), create_to_symbol_fn(hps_ms)))
- with gr.Blocks() as app:
- gr.Markdown(
- "# vits-models\n"
- "## Please do not generate content that could infringe upon the rights or cause harm to individuals or organizations.\n"
- "## ·请不要生成会对个人以及组织造成侵害的内容\n"
- "\n\n"
- "[Open In Colab]"
- "(https://colab.research.google.com/drive/10QOk9NPgoKZUXkIhhuVaZ7SYra1MPMKH?usp=share_link)"
- " without queue and length limitation.(无需等待队列,并且没有长度限制)\n\n"
- "[Finetune your own model](https://github.com/SayaSS/vits-finetuning)"
- )
-
- with gr.Tabs():
- with gr.TabItem("EN"):
- for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
- with gr.TabItem(name_en):
- with gr.Row():
- gr.Markdown(
- ''
- f'{title}'
- f'
' if cover else ""
- ''
- )
- with gr.Row():
- with gr.Column():
- input_text = gr.Textbox(label="Text (100 words limitation)" if limitation else "Text", lines=5, value=example, elem_id=f"input-text-en-{name_en.replace(' ','')}")
- lang = gr.Dropdown(label="Language", choices=["Chinese", "Japanese", "Mix(wrap the Chinese text with [ZH][ZH], wrap the Japanese text with [JA][JA])"],
- type="index", value=language)
- with gr.Accordion(label="Advanced Options", open=False):
- symbol_input = gr.Checkbox(value=False, label="Symbol input")
- symbol_list = gr.Dataset(label="Symbol list", components=[input_text],
- samples=[[x] for x in hps_ms.symbols])
- symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False)
- btn = gr.Button(value="Generate", variant="primary")
- with gr.Row():
- ns = gr.Slider(label="noise_scale", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
- nsw = gr.Slider(label="noise_scale_w", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
- ls = gr.Slider(label="length_scale", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
- with gr.Column():
- o1 = gr.Textbox(label="Output Message")
- o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio-en-{name_en.replace(' ','')}")
- download = gr.Button("Download Audio")
- btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2], api_name=f"tts-{name_en}")
- download.click(None, [], [], _js=download_audio_js.format(audio_id=f"en-{name_en.replace(' ', '')}"))
- lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
- symbol_input.change(
- to_symbol_fn,
- [symbol_input, input_text, lang],
- [input_text]
- )
- symbol_list.click(None, [symbol_list, symbol_list_json], [input_text],
- _js=f"""
- (i,symbols) => {{
- let root = document.querySelector("body > gradio-app");
- if (root.shadowRoot != null)
- root = root.shadowRoot;
- let text_input = root.querySelector("#input-text-en-{name_en.replace(' ', '')}").querySelector("textarea");
- let startPos = text_input.selectionStart;
- let endPos = text_input.selectionEnd;
- let oldTxt = text_input.value;
- let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
- text_input.value = result;
- let x = window.scrollX, y = window.scrollY;
- text_input.focus();
- text_input.selectionStart = startPos + symbols[i].length;
- text_input.selectionEnd = startPos + symbols[i].length;
- text_input.blur();
- window.scrollTo(x, y);
- return text_input.value;
- }}""")
- with gr.TabItem("中文"):
- for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
- with gr.TabItem(name_zh):
- with gr.Row():
- gr.Markdown(
- ''
- f'{title}'
- f'
' if cover else ""
- ''
- )
- with gr.Row():
- with gr.Column():
- input_text = gr.Textbox(label="文本 (100字上限)" if limitation else "文本", lines=5, value=example, elem_id=f"input-text-zh-{name_zh}")
- lang = gr.Dropdown(label="语言", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"],
- type="index", value="中文"if language == "Chinese" else "日语")
- with gr.Accordion(label="高级选项", open=False):
- symbol_input = gr.Checkbox(value=False, label="符号输入")
- symbol_list = gr.Dataset(label="符号列表", components=[input_text],
- samples=[[x] for x in hps_ms.symbols])
- symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False)
- btn = gr.Button(value="生成", variant="primary")
- with gr.Row():
- ns = gr.Slider(label="控制感情变化程度", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
- nsw = gr.Slider(label="控制音素发音长度", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
- ls = gr.Slider(label="控制整体语速", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
- with gr.Column():
- o1 = gr.Textbox(label="输出信息")
- o2 = gr.Audio(label="输出音频", elem_id=f"tts-audio-zh-{name_zh}")
- download = gr.Button("下载音频")
- btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2])
- download.click(None, [], [], _js=download_audio_js.format(audio_id=f"zh-{name_zh}"))
- lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
- symbol_input.change(
- to_symbol_fn,
- [symbol_input, input_text, lang],
- [input_text]
- )
- symbol_list.click(None, [symbol_list, symbol_list_json], [input_text],
- _js=f"""
- (i,symbols) => {{
- let root = document.querySelector("body > gradio-app");
- if (root.shadowRoot != null)
- root = root.shadowRoot;
- let text_input = root.querySelector("#input-text-zh-{name_zh}").querySelector("textarea");
- let startPos = text_input.selectionStart;
- let endPos = text_input.selectionEnd;
- let oldTxt = text_input.value;
- let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
- text_input.value = result;
- let x = window.scrollX, y = window.scrollY;
- text_input.focus();
- text_input.selectionStart = startPos + symbols[i].length;
- text_input.selectionEnd = startPos + symbols[i].length;
- text_input.blur();
- window.scrollTo(x, y);
- return text_input.value;
- }}""")
- app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)
diff --git a/spaces/AIConsultant/MusicGen/audiocraft/modules/conv.py b/spaces/AIConsultant/MusicGen/audiocraft/modules/conv.py
deleted file mode 100644
index d115cbf8729b642ed78608bd00a4d0fd5afae6fd..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/audiocraft/modules/conv.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-import typing as tp
-import warnings
-
-import torch
-from torch import nn
-from torch.nn import functional as F
-from torch.nn.utils import spectral_norm, weight_norm
-
-
-CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm',
- 'time_group_norm'])
-
-
-def apply_parametrization_norm(module: nn.Module, norm: str = 'none'):
- assert norm in CONV_NORMALIZATIONS
- if norm == 'weight_norm':
- return weight_norm(module)
- elif norm == 'spectral_norm':
- return spectral_norm(module)
- else:
- # We already check was in CONV_NORMALIZATION, so any other choice
- # doesn't need reparametrization.
- return module
-
-
-def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs):
- """Return the proper normalization module. If causal is True, this will ensure the returned
- module is causal, or return an error if the normalization doesn't support causal evaluation.
- """
- assert norm in CONV_NORMALIZATIONS
- if norm == 'time_group_norm':
- if causal:
- raise ValueError("GroupNorm doesn't support causal evaluation.")
- assert isinstance(module, nn.modules.conv._ConvNd)
- return nn.GroupNorm(1, module.out_channels, **norm_kwargs)
- else:
- return nn.Identity()
-
-
-def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int,
- padding_total: int = 0) -> int:
- """See `pad_for_conv1d`."""
- length = x.shape[-1]
- n_frames = (length - kernel_size + padding_total) / stride + 1
- ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
- return ideal_length - length
-
-
-def pad_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0):
- """Pad for a convolution to make sure that the last window is full.
- Extra padding is added at the end. This is required to ensure that we can rebuild
- an output of the same length, as otherwise, even with padding, some time steps
- might get removed.
- For instance, with total padding = 4, kernel size = 4, stride = 2:
- 0 0 1 2 3 4 5 0 0 # (0s are padding)
- 1 2 3 # (output frames of a convolution, last 0 is never used)
- 0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding)
- 1 2 3 4 # once you removed padding, we are missing one time step !
- """
- extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
- return F.pad(x, (0, extra_padding))
-
-
-def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'constant', value: float = 0.):
- """Tiny wrapper around F.pad, just to allow for reflect padding on small input.
- If this is the case, we insert extra 0 padding to the right before the reflection happen.
- """
- length = x.shape[-1]
- padding_left, padding_right = paddings
- assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
- if mode == 'reflect':
- max_pad = max(padding_left, padding_right)
- extra_pad = 0
- if length <= max_pad:
- extra_pad = max_pad - length + 1
- x = F.pad(x, (0, extra_pad))
- padded = F.pad(x, paddings, mode, value)
- end = padded.shape[-1] - extra_pad
- return padded[..., :end]
- else:
- return F.pad(x, paddings, mode, value)
-
-
-def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]):
- """Remove padding from x, handling properly zero padding. Only for 1d!"""
- padding_left, padding_right = paddings
- assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
- assert (padding_left + padding_right) <= x.shape[-1]
- end = x.shape[-1] - padding_right
- return x[..., padding_left: end]
-
-
-class NormConv1d(nn.Module):
- """Wrapper around Conv1d and normalization applied to this conv
- to provide a uniform interface across normalization approaches.
- """
- def __init__(self, *args, causal: bool = False, norm: str = 'none',
- norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
- super().__init__()
- self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm)
- self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs)
- self.norm_type = norm
-
- def forward(self, x):
- x = self.conv(x)
- x = self.norm(x)
- return x
-
-
-class NormConv2d(nn.Module):
- """Wrapper around Conv2d and normalization applied to this conv
- to provide a uniform interface across normalization approaches.
- """
- def __init__(self, *args, norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
- super().__init__()
- self.conv = apply_parametrization_norm(nn.Conv2d(*args, **kwargs), norm)
- self.norm = get_norm_module(self.conv, causal=False, norm=norm, **norm_kwargs)
- self.norm_type = norm
-
- def forward(self, x):
- x = self.conv(x)
- x = self.norm(x)
- return x
-
-
-class NormConvTranspose1d(nn.Module):
- """Wrapper around ConvTranspose1d and normalization applied to this conv
- to provide a uniform interface across normalization approaches.
- """
- def __init__(self, *args, causal: bool = False, norm: str = 'none',
- norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
- super().__init__()
- self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm)
- self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs)
- self.norm_type = norm
-
- def forward(self, x):
- x = self.convtr(x)
- x = self.norm(x)
- return x
-
-
-class NormConvTranspose2d(nn.Module):
- """Wrapper around ConvTranspose2d and normalization applied to this conv
- to provide a uniform interface across normalization approaches.
- """
- def __init__(self, *args, norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
- super().__init__()
- self.convtr = apply_parametrization_norm(nn.ConvTranspose2d(*args, **kwargs), norm)
- self.norm = get_norm_module(self.convtr, causal=False, norm=norm, **norm_kwargs)
-
- def forward(self, x):
- x = self.convtr(x)
- x = self.norm(x)
- return x
-
-
-class StreamableConv1d(nn.Module):
- """Conv1d with some builtin handling of asymmetric or causal padding
- and normalization.
- """
- def __init__(self, in_channels: int, out_channels: int,
- kernel_size: int, stride: int = 1, dilation: int = 1,
- groups: int = 1, bias: bool = True, causal: bool = False,
- norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {},
- pad_mode: str = 'reflect'):
- super().__init__()
- # warn user on unusual setup between dilation and stride
- if stride > 1 and dilation > 1:
- warnings.warn("StreamableConv1d has been initialized with stride > 1 and dilation > 1"
- f" (kernel_size={kernel_size} stride={stride}, dilation={dilation}).")
- self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride,
- dilation=dilation, groups=groups, bias=bias, causal=causal,
- norm=norm, norm_kwargs=norm_kwargs)
- self.causal = causal
- self.pad_mode = pad_mode
-
- def forward(self, x):
- B, C, T = x.shape
- kernel_size = self.conv.conv.kernel_size[0]
- stride = self.conv.conv.stride[0]
- dilation = self.conv.conv.dilation[0]
- kernel_size = (kernel_size - 1) * dilation + 1 # effective kernel size with dilations
- padding_total = kernel_size - stride
- extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
- if self.causal:
- # Left padding for causal
- x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode)
- else:
- # Asymmetric padding required for odd strides
- padding_right = padding_total // 2
- padding_left = padding_total - padding_right
- x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode)
- return self.conv(x)
-
-
-class StreamableConvTranspose1d(nn.Module):
- """ConvTranspose1d with some builtin handling of asymmetric or causal padding
- and normalization.
- """
- def __init__(self, in_channels: int, out_channels: int,
- kernel_size: int, stride: int = 1, causal: bool = False,
- norm: str = 'none', trim_right_ratio: float = 1.,
- norm_kwargs: tp.Dict[str, tp.Any] = {}):
- super().__init__()
- self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride,
- causal=causal, norm=norm, norm_kwargs=norm_kwargs)
- self.causal = causal
- self.trim_right_ratio = trim_right_ratio
- assert self.causal or self.trim_right_ratio == 1., \
- "`trim_right_ratio` != 1.0 only makes sense for causal convolutions"
- assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1.
-
- def forward(self, x):
- kernel_size = self.convtr.convtr.kernel_size[0]
- stride = self.convtr.convtr.stride[0]
- padding_total = kernel_size - stride
-
- y = self.convtr(x)
-
- # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
- # removed at the very end, when keeping only the right length for the output,
- # as removing it here would require also passing the length at the matching layer
- # in the encoder.
- if self.causal:
- # Trim the padding on the right according to the specified ratio
- # if trim_right_ratio = 1.0, trim everything from right
- padding_right = math.ceil(padding_total * self.trim_right_ratio)
- padding_left = padding_total - padding_right
- y = unpad1d(y, (padding_left, padding_right))
- else:
- # Asymmetric padding required for odd strides
- padding_right = padding_total // 2
- padding_left = padding_total - padding_right
- y = unpad1d(y, (padding_left, padding_right))
- return y
diff --git a/spaces/AIConsultant/MusicGen/audiocraft/utils/best_state.py b/spaces/AIConsultant/MusicGen/audiocraft/utils/best_state.py
deleted file mode 100644
index f5ad551432ad5cb0f83278b5d2100f9aa287958b..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/audiocraft/utils/best_state.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from collections import defaultdict
-import logging
-import typing as tp
-
-import flashy
-import torch
-
-from ..optim import ModuleDictEMA
-from .utils import copy_state
-
-
-logger = logging.getLogger(__name__)
-
-
-class BestStateDictManager(flashy.state.StateDictSource):
- """BestStateDictManager maintains a copy of best state_dict() for registered sources.
-
- BestStateDictManager has two main attributes:
- states (dict): State dict of the registered StateDictSource.
- param_ids (dict): Dict of parameter ids for registered states from ModuleDictEMA and other sources.
-
- When registering new sources, the BestStateDictManager will ensure two conflicting sources between
- ModuleDictEMA and original modules are not both registered as it would otherwise create ambiguity about
- what to consider for best state.
-
- Args:
- device (torch.device or str): Device on which we keep the copy.
- dtype (torch.dtype): Data type for the state parameters.
- """
- def __init__(self, device: tp.Union[torch.device, str] = 'cpu',
- dtype: tp.Optional[torch.dtype] = None):
- self.device = device
- self.states: dict = {}
- self.param_ids: dict = defaultdict(dict)
- self.dtype = dtype
-
- def _get_parameter_ids(self, state_dict):
- return {id(p): name for name, p in state_dict.items() if isinstance(p, torch.Tensor)}
-
- def _validate_no_parameter_ids_overlap(self, name: str, param_ids: dict):
- for registered_name, registered_param_ids in self.param_ids.items():
- if registered_name != name:
- overlap = set.intersection(registered_param_ids.keys(), param_ids.keys())
- assert len(overlap) == 0, f"Found {len(overlap)} / {len(param_ids.keys())} overlapping parameters"
- f" in {name} and already registered {registered_name}: {' '.join(overlap)}"
-
- def update(self, name: str, source: flashy.state.StateDictSource):
- if name not in self.states:
- raise ValueError(f"{name} missing from registered states.")
- self.states[name] = copy_state(source.state_dict(), device=self.device, dtype=self.dtype)
-
- def register(self, name: str, source: flashy.state.StateDictSource):
- if name in self.states:
- raise ValueError(f"{name} already present in states.")
- # Registering parameter ids for EMA and non-EMA states allows us to check that
- # there is no overlap that would create ambiguity about how to handle the best state
- param_ids = self._get_parameter_ids(source.state_dict())
- if isinstance(source, ModuleDictEMA):
- logger.debug(f"Registering to best state: ModuleDictEMA '{name}' with {len(param_ids)} params")
- self._validate_no_parameter_ids_overlap(name, param_ids)
- self.param_ids[name] = param_ids
- else:
- logger.debug(f"Registering to best state: StateDictSource '{name}' with {len(param_ids)} params")
- self._validate_no_parameter_ids_overlap('base', param_ids)
- self.param_ids['base'].update(param_ids)
- # Register state
- self.states[name] = copy_state(source.state_dict(), device=self.device, dtype=self.dtype)
-
- def state_dict(self) -> flashy.state.StateDict:
- return self.states
-
- def load_state_dict(self, state: flashy.state.StateDict):
- for name, sub_state in state.items():
- for k, v in sub_state.items():
- self.states[name][k].copy_(v)
diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/nn/schedulers.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/nn/schedulers.py
deleted file mode 100644
index ce7494b136a77d7724ab97ae2cdc56e6c9694214..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/nn/schedulers.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import numpy as np
-
-
-class NoneSchedule(object):
- def __init__(self, optimizer, lr):
- self.optimizer = optimizer
- self.constant_lr = lr
- self.step(0)
-
- def step(self, num_updates):
- self.lr = self.constant_lr
- for param_group in self.optimizer.param_groups:
- param_group['lr'] = self.lr
- return self.lr
-
- def get_lr(self):
- return self.optimizer.param_groups[0]['lr']
-
- def get_last_lr(self):
- return self.get_lr()
-
-
-class RSQRTSchedule(NoneSchedule):
- def __init__(self, optimizer, lr, warmup_updates, hidden_size):
- self.optimizer = optimizer
- self.constant_lr = lr
- self.warmup_updates = warmup_updates
- self.hidden_size = hidden_size
- self.lr = lr
- for param_group in optimizer.param_groups:
- param_group['lr'] = self.lr
- self.step(0)
-
- def step(self, num_updates):
- constant_lr = self.constant_lr
- warmup = min(num_updates / self.warmup_updates, 1.0)
- rsqrt_decay = max(self.warmup_updates, num_updates) ** -0.5
- rsqrt_hidden = self.hidden_size ** -0.5
- self.lr = max(constant_lr * warmup * rsqrt_decay * rsqrt_hidden, 1e-6)
- for param_group in self.optimizer.param_groups:
- param_group['lr'] = self.lr
- return self.lr
-
-
-class WarmupSchedule(NoneSchedule):
- def __init__(self, optimizer, lr, warmup_updates):
- self.optimizer = optimizer
- self.constant_lr = self.lr = lr
- self.warmup_updates = warmup_updates
- for param_group in optimizer.param_groups:
- param_group['lr'] = self.lr
- self.step(0)
-
- def step(self, num_updates):
- constant_lr = self.constant_lr
- warmup = min(num_updates / self.warmup_updates, 1.0)
- self.lr = max(constant_lr * warmup, 1e-7)
- for param_group in self.optimizer.param_groups:
- param_group['lr'] = self.lr
- return self.lr
-
-
-class CosineSchedule(NoneSchedule):
- def __init__(self, optimizer, lr, warmup_updates, total_updates):
- self.optimizer = optimizer
- self.constant_lr = lr
- self.warmup_updates = warmup_updates
- self.total_updates = total_updates
- self.lr = lr
- self.assign_learning_rate(self.optimizer, self.lr)
- self.step(0)
-
- def assign_learning_rate(self, optimizer, new_lr):
- for param_group in optimizer.param_groups:
- param_group["lr"] = new_lr
-
- def _warmup_lr(self, base_lr, warmup_length, step):
- return base_lr * (step + 1) / warmup_length
-
- def step(self, num_updates):
- if num_updates < self.warmup_updates:
- lr = self._warmup_lr(self.lr, self.warmup_updates, num_updates)
- else:
- e = num_updates - self.warmup_updates
- es = self.total_updates - self.warmup_updates
- lr = 0.5 * (1 + np.cos(np.pi * e / es)) * self.lr
- self.assign_learning_rate(self.optimizer, lr)
- return lr
-
-
-if __name__ == '__main__':
- import numpy as np
- import matplotlib.pyplot as plt
- import torch
-
- def plot_scheduler(scheduler, label=None):
- y = np.array([scheduler.step(x) for x in range(0,160000, 10)])
- x = np.arange(0,160000, 10)
- plt.plot(x, y, label=label)
-
- dummy_model = torch.nn.Linear(10,10)
- dummy_optimizer = torch.optim.Adam(dummy_model.parameters())
- rsqrt = CosineSchedule(dummy_optimizer, lr=0.0005, warmup_updates=10000, total_updates=160000)
- plot_scheduler(rsqrt, "8000")
- plt.savefig("0.png")
diff --git a/spaces/AbelKidane/headdetector/README.md b/spaces/AbelKidane/headdetector/README.md
deleted file mode 100644
index 0c870719982a1ba94bc28c964b1750e4f19304ef..0000000000000000000000000000000000000000
--- a/spaces/AbelKidane/headdetector/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Headdetector
-emoji: 🏆
-colorFrom: blue
-colorTo: green
-sdk: streamlit
-sdk_version: 1.25.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/midas/api.py b/spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/midas/api.py
deleted file mode 100644
index 9a6e194545c40ec263e65a140678b53a5a2abd54..0000000000000000000000000000000000000000
--- a/spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/midas/api.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# based on https://github.com/isl-org/MiDaS
-import os
-
-import cv2
-import torch
-import torch.nn as nn
-from torchvision.transforms import Compose
-
-from ldm.modules.extra_condition.midas.midas.dpt_depth import DPTDepthModel
-from ldm.modules.extra_condition.midas.midas.midas_net import MidasNet
-from ldm.modules.extra_condition.midas.midas.midas_net_custom import MidasNet_small
-from ldm.modules.extra_condition.midas.midas.transforms import Resize, NormalizeImage, PrepareForNet
-
-
-ISL_PATHS = {
- "dpt_large": "models/dpt_large-midas-2f21e586.pt",
- "dpt_hybrid": "models/dpt_hybrid-midas-501f0c75.pt",
- "midas_v21": "",
- "midas_v21_small": "",
-}
-
-remote_model_path = "https://github.com/intel-isl/DPT/releases/download/1_0/dpt_hybrid-midas-501f0c75.pt"
-
-def disabled_train(self, mode=True):
- """Overwrite model.train with this function to make sure train/eval mode
- does not change anymore."""
- return self
-
-
-def load_midas_transform(model_type):
- # https://github.com/isl-org/MiDaS/blob/master/run.py
- # load transform only
- if model_type == "dpt_large": # DPT-Large
- net_w, net_h = 384, 384
- resize_mode = "minimal"
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
-
- elif model_type == "dpt_hybrid": # DPT-Hybrid
- net_w, net_h = 384, 384
- resize_mode = "minimal"
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
-
- elif model_type == "midas_v21":
- net_w, net_h = 384, 384
- resize_mode = "upper_bound"
- normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
-
- elif model_type == "midas_v21_small":
- net_w, net_h = 256, 256
- resize_mode = "upper_bound"
- normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
-
- else:
- assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
-
- transform = Compose(
- [
- Resize(
- net_w,
- net_h,
- resize_target=None,
- keep_aspect_ratio=True,
- ensure_multiple_of=32,
- resize_method=resize_mode,
- image_interpolation_method=cv2.INTER_CUBIC,
- ),
- normalization,
- PrepareForNet(),
- ]
- )
-
- return transform
-
-
-def load_model(model_type):
- # https://github.com/isl-org/MiDaS/blob/master/run.py
- # load network
- model_path = ISL_PATHS[model_type]
- if model_type == "dpt_large": # DPT-Large
- model = DPTDepthModel(
- path=model_path,
- backbone="vitl16_384",
- non_negative=True,
- )
- net_w, net_h = 384, 384
- resize_mode = "minimal"
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
-
- elif model_type == "dpt_hybrid": # DPT-Hybrid
- if not os.path.exists(model_path):
- from basicsr.utils.download_util import load_file_from_url
- load_file_from_url(remote_model_path, model_dir='models')
-
- model = DPTDepthModel(
- path=model_path,
- backbone="vitb_rn50_384",
- non_negative=True,
- )
- net_w, net_h = 384, 384
- resize_mode = "minimal"
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
-
- elif model_type == "midas_v21":
- model = MidasNet(model_path, non_negative=True)
- net_w, net_h = 384, 384
- resize_mode = "upper_bound"
- normalization = NormalizeImage(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
- )
-
- elif model_type == "midas_v21_small":
- model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
- non_negative=True, blocks={'expand': True})
- net_w, net_h = 256, 256
- resize_mode = "upper_bound"
- normalization = NormalizeImage(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
- )
-
- else:
- print(f"model_type '{model_type}' not implemented, use: --model_type large")
- assert False
-
- transform = Compose(
- [
- Resize(
- net_w,
- net_h,
- resize_target=None,
- keep_aspect_ratio=True,
- ensure_multiple_of=32,
- resize_method=resize_mode,
- image_interpolation_method=cv2.INTER_CUBIC,
- ),
- normalization,
- PrepareForNet(),
- ]
- )
-
- return model.eval(), transform
-
-
-class MiDaSInference(nn.Module):
- MODEL_TYPES_TORCH_HUB = [
- "DPT_Large",
- "DPT_Hybrid",
- "MiDaS_small"
- ]
- MODEL_TYPES_ISL = [
- "dpt_large",
- "dpt_hybrid",
- "midas_v21",
- "midas_v21_small",
- ]
-
- def __init__(self, model_type):
- super().__init__()
- assert (model_type in self.MODEL_TYPES_ISL)
- model, _ = load_model(model_type)
- self.model = model
- self.model.train = disabled_train
-
- def forward(self, x):
- # x in 0..1 as produced by calling self.transform on a 0..1 float64 numpy array
- # NOTE: we expect that the correct transform has been called during dataloading.
- with torch.no_grad():
- prediction = self.model(x)
- prediction = torch.nn.functional.interpolate(
- prediction.unsqueeze(1),
- size=x.shape[2:],
- mode="bicubic",
- align_corners=False,
- )
- assert prediction.shape == (x.shape[0], 1, x.shape[2], x.shape[3])
- return prediction
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PushIntoBounds.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PushIntoBounds.js
deleted file mode 100644
index e9b7fdf52e0b21a75161db9648a91da96d459e90..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PushIntoBounds.js
+++ /dev/null
@@ -1,15 +0,0 @@
-import GetViewport from '../../../plugins/utils/system/GetViewport.js';
-
-var PushIntoBounds = function (bounds) {
- if (bounds === undefined) {
- bounds = GetViewport(this.scene);
- }
-
- this.left = Math.max(this.left, bounds.left);
- this.right = Math.min(this.right, bounds.right);
- this.top = Math.max(this.top, bounds.top);
- this.bottom = Math.min(this.bottom, bounds.bottom);
- return this;
-}
-
-export default PushIntoBounds;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/GetChildrenWidth.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/GetChildrenWidth.js
deleted file mode 100644
index 06ea5bba98170a646a5bc3d6fc585d967a585771..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/GetChildrenWidth.js
+++ /dev/null
@@ -1,19 +0,0 @@
-var GetChildrenWidth = function () {
- if (this.rexSizer.hidden) {
- return 0;
- }
-
- var result = 0;
- var children = this.sizerChildren;
- var child, padding, childWidth;
- for (var key in children) {
- child = children[key];
-
- padding = child.rexSizer.padding;
- childWidth = this.getChildWidth(child) + padding.left + padding.right;
- result = Math.max(childWidth, result);
- }
- return result + this.space.left + this.space.right;
-}
-
-export default GetChildrenWidth;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/ExpandMethods.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/ExpandMethods.js
deleted file mode 100644
index e2e08c956a51cedfb6aa1f4e64bfdc4cf23c0765..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/ExpandMethods.js
+++ /dev/null
@@ -1,11 +0,0 @@
-export default {
- getChildExpand(gameObject) {
- return this.getSizerConfig(gameObject).expand;
- },
-
- setChildExpand(gameObject, expand) {
- this.getSizerConfig(gameObject).expand = expand;
- return this;
- },
-
-}
\ No newline at end of file
diff --git a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/symbols.py b/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/symbols.py
deleted file mode 100644
index 789e9df25d3d93d1976ef22d15d77f51d170ed00..0000000000000000000000000000000000000000
--- a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/symbols.py
+++ /dev/null
@@ -1,76 +0,0 @@
-'''
-Defines the set of symbols used in text input to the model.
-'''
-
-# japanese_cleaners
-# _pad = '_'
-# _punctuation = ',.!?-'
-# _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
-
-
-'''# japanese_cleaners2
-_pad = '_'
-_punctuation = ',.!?-~…'
-_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
-'''
-
-
-'''# korean_cleaners
-_pad = '_'
-_punctuation = ',.!?…~'
-_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
-'''
-
-'''# chinese_cleaners
-_pad = '_'
-_punctuation = ',。!?—…'
-_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
-'''
-
-# # zh_ja_mixture_cleaners
-# _pad = '_'
-# _punctuation = ',.!?-~…'
-# _letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ '
-
-
-'''# sanskrit_cleaners
-_pad = '_'
-_punctuation = '।'
-_letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ '
-'''
-
-'''# cjks_cleaners
-_pad = '_'
-_punctuation = ',.!?-~…'
-_letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ '
-'''
-
-'''# thai_cleaners
-_pad = '_'
-_punctuation = '.!? '
-_letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์'
-'''
-
-# # cjke_cleaners2
-_pad = '_'
-_punctuation = ',.!?-~…'
-_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ '
-
-
-'''# shanghainese_cleaners
-_pad = '_'
-_punctuation = ',.!?…'
-_letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 '
-'''
-
-'''# chinese_dialect_cleaners
-_pad = '_'
-_punctuation = ',.!?~…─'
-_letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚ᴀᴇ↑↓∅ⱼ '
-'''
-
-# Export all symbols:
-symbols = [_pad] + list(_punctuation) + list(_letters)
-
-# Special symbol ids
-SPACE_ID = symbols.index(" ")
diff --git a/spaces/AlbertoFH98/CastenaApp/utils.py b/spaces/AlbertoFH98/CastenaApp/utils.py
deleted file mode 100644
index 2016c51fc064eb4ae5002c848665660368844a63..0000000000000000000000000000000000000000
--- a/spaces/AlbertoFH98/CastenaApp/utils.py
+++ /dev/null
@@ -1,291 +0,0 @@
-# -- Utils .py file
-# -- Libraries
-from typing import Any, Dict, List, Mapping, Optional
-from pydantic import Extra, Field, root_validator
-from langchain.llms.base import LLM
-from langchain.utils import get_from_dict_or_env
-from langchain.vectorstores import Chroma
-from langchain.text_splitter import RecursiveCharacterTextSplitter
-from langchain.chains import RetrievalQA
-from langchain.document_loaders import TextLoader
-from langchain.embeddings import HuggingFaceEmbeddings
-from googletrans import Translator
-import streamlit as st
-import together
-import textwrap
-import spacy
-import os
-import re
-
-os.environ["TOGETHER_API_KEY"] = "6101599d6e33e3bda336b8d007ca22e35a64c72cfd52c2d8197f663389fc50c5"
-
-# -- LLM class
-class TogetherLLM(LLM):
- """Together large language models."""
-
- model: str = "togethercomputer/llama-2-70b-chat"
- """model endpoint to use"""
-
- together_api_key: str = os.environ["TOGETHER_API_KEY"]
- """Together API key"""
-
- temperature: float = 0.7
- """What sampling temperature to use."""
-
- max_tokens: int = 512
- """The maximum number of tokens to generate in the completion."""
-
- class Config:
- extra = Extra.forbid
-
- @root_validator()
- def validate_environment(cls, values: Dict) -> Dict:
- """Validate that the API key is set."""
- api_key = get_from_dict_or_env(
- values, "together_api_key", "TOGETHER_API_KEY"
- )
- values["together_api_key"] = api_key
- return values
-
- @property
- def _llm_type(self) -> str:
- """Return type of LLM."""
- return "together"
-
- def clean_duplicates(self, transcription: str) -> str:
- lines = transcription.strip().split('\n')
-
- new_transcription = []
-
- for linea in lines:
- if linea.replace('CONTEXT:/n/n ', '').replace('/n', '') not in new_transcription and linea != '':
- new_transcription.append(linea.replace('CONTEXT:/n/n ', '').replace('/n', ''))
- # Create new transcription without duplicates
- new_transcription = '\n\n'.join(new_transcription).replace("""<>
- """, """<>
- CONTEXT: """)
- return new_transcription
-
- def _call(
- self,
- prompt: str,
- **kwargs: Any,
- ) -> str:
- """Call to Together endpoint."""
- together.api_key = self.together_api_key
- output = together.Complete.create(prompt,
- model=self.model,
- max_tokens=self.max_tokens,
- temperature=self.temperature,
- )
- text = output['output']['choices'][0]['text']
- cleaned_text = self.clean_duplicates(text)
- return cleaned_text
-
-
-# -- Python function to setup basic features: translator, SpaCy pipeline and LLM model
-@st.cache_resource
-def setup_app(transcription_path, emb_model, model, _logger):
- # -- Setup enviroment and features
- translator = Translator(service_urls=['translate.googleapis.com'])
- nlp = spacy.load('es_core_news_lg')
-
- _logger.info('Setup environment and features...')
-
- # -- Setup LLM
- together.api_key = os.environ["TOGETHER_API_KEY"]
- # List available models and descriptons
- models = together.Models.list()
- # Set llama2 7b LLM
- together.Models.start(model)
- _logger.info('Setup environment and features - FINISHED!')
-
- # -- Read translated transcription
- _logger.info('Loading transcription...')
- loader = TextLoader(transcription_path)
- documents = loader.load()
- # Splitting the text into chunks
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=100)
- texts = text_splitter.split_documents(documents)
- _logger.info('Loading transcription - FINISHED!')
-
- # -- Load embedding
- _logger.info('Loading embedding...')
- encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity
- model_norm = HuggingFaceEmbeddings(
- model_name=emb_model,
- model_kwargs={'device': 'cpu'},
- encode_kwargs=encode_kwargs
- )
- _logger.info('Loading embedding - FINISHED!')
-
- # -- Create document database
- _logger.info('Creating document database...')
- # Embed and store the texts
- # Supplying a persist_directory will store the embeddings on disk
- persist_directory = 'db'
- ## Here is the nmew embeddings being used
- embedding = model_norm
-
- vectordb = Chroma.from_documents(documents=texts,
- embedding=embedding,
- persist_directory=persist_directory)
-
- # -- Make a retreiver
- retriever = vectordb.as_retriever(search_type="similarity_score_threshold",
- search_kwargs={"k": 5, "score_threshold": 0.5})
- _logger.info('Creating document database - FINISHED!')
- _logger.info('Setup finished!')
- return translator, nlp, retriever
-
-# -- Function to get prompt template
-def get_prompt(instruction, system_prompt, b_sys, e_sys, b_inst, e_inst, _logger):
- new_system_prompt = b_sys + system_prompt + e_sys
- prompt_template = b_inst + new_system_prompt + instruction + e_inst
- _logger.info('Prompt template created: {}'.format(instruction))
- return prompt_template
-
-# -- Function to create the chain to answer questions
-@st.cache_resource
-def create_llm_chain(model, _retriever, _chain_type_kwargs, _logger):
- _logger.info('Creating LLM chain...')
- llm = TogetherLLM(
- model= model,
- temperature = 0.0,
- max_tokens = 1024
- )
- qa_chain = RetrievalQA.from_chain_type(llm=llm,
- chain_type="stuff",
- retriever=_retriever,
- chain_type_kwargs=_chain_type_kwargs,
- return_source_documents=True)
- _logger.info('Creating LLM chain - FINISHED!')
- return qa_chain
-
-# -------------------------------------------
-# -- Auxiliar functions
-def translate_text(text, nlp, target_lang='en'):
- # Traducir el texto sin los nombres propios
- translator = Translator()
- # Tokenizar el texto y encontrar nombres propios
- doc = nlp(text)
- named_entities = [ent.text for ent in doc if ent.pos_ == 'PROPN' and ent.dep_ in ['NNP', 'NN']]
- named_entities_list = []
- # Reemplazar los nombres propios con marcadores temporales
- for entity in named_entities:
- text = text.replace(entity, f'__{entity}__')
- named_entities_list.append(entity)
-
- translated_text = translator.translate(text, dest=target_lang).text
- final_translated_text = []
-
- i = 0
- for text in translated_text.split(' '):
- if '__' in text and len(named_entities_list):
- final_translated_text.append(named_entities_list[i])
- i+=1
- else:
- final_translated_text.append(text)
- return ' '.join(final_translated_text)
-
-def wrap_text_preserve_newlines(text, width=110):
- # Split the input text into lines based on newline characters
- lines = text.split('\n')
-
- # Wrap each line individually
- wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
-
- # Join the wrapped lines back together using newline characters
- wrapped_text = '\n'.join(wrapped_lines)
-
- return wrapped_text
-
-def process_llm_response(llm_response, nlp):
- response = llm_response['result']
- return wrap_text_preserve_newlines(translate_text(response, nlp, target_lang='es'))
-
-
-def time_to_seconds(time_str):
- parts = time_str.split(':')
- hours, minutes, seconds = map(float, parts)
- return int((hours * 3600) + (minutes * 60) + seconds)
-
-def add_hyperlink_and_convert_to_seconds(text):
- time_pattern = r'(\d{2}:\d{2}:\d{2}(.\d{6})?)'
-
- def replace_with_hyperlink(match):
- time_str = match.group(1)
- seconds = time_to_seconds(time_str)
- link = f''
- return link
-
- modified_text = re.sub(time_pattern, replace_with_hyperlink, text)
- return modified_text
-
-# -- Streamlit HTML template
-def typewrite(text, youtube_video_url):
- js = """var player, seconds = 0;
- function onYouTubeIframeAPIReady() {
- console.log("player");
- player = new YT.Player('player', {
- events: {
- 'onReady': onPlayerReady
- }
- });
- }
-
- function onPlayerReady(event) {
- event.target.playVideo();
- }
-
-
- function seek(sec){
- if(player){
- player.seekTo(sec, true);
- }
- }
- """
-
- css = """
- .button {
- background-color: transparent;
- font-family: "Tahoma sans-serif;", monospace;
- color: red;
- font-weight: bold;
- border: none;
- text-align: center;
- text-decoration: none;
- display: inline-block;
- font-size: 16px;
- cursor: pointer;
- }
- body {
- color: white;
- font-family: "Tahoma sans-serif;", monospace;
- font-weight: 450;
- }
- """
-
- html = f"""
-
-
-
- Modificar iframe
-
-
-
-
- {text}
-
-
-
-
-
-
-
- """
- return html
\ No newline at end of file
diff --git a/spaces/AlexWang/lama/fetch_data/places_challenge_train_download.sh b/spaces/AlexWang/lama/fetch_data/places_challenge_train_download.sh
deleted file mode 100644
index f5317b44d16a2f295a56a52d1ce005605a137be7..0000000000000000000000000000000000000000
--- a/spaces/AlexWang/lama/fetch_data/places_challenge_train_download.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-mkdir places_challenge_dataset
-
-
-declare -a TARPARTS
-for i in {a..z}
-do
- TARPARTS[${#TARPARTS[@]}]="http://data.csail.mit.edu/places/places365/train_large_split/${i}.tar"
-done
-ls
-printf "%s\n" "${TARPARTS[@]}" > places_challenge_dataset/places365_train.txt
-
-cd places_challenge_dataset/
-xargs -a places365_train.txt -n 1 -P 8 wget [...]
-ls *.tar | xargs -i tar xvf {}
diff --git "a/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/\344\273\243\347\240\201\351\207\215\345\206\231\344\270\272\345\205\250\350\213\261\346\226\207_\345\244\232\347\272\277\347\250\213.py" "b/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/\344\273\243\347\240\201\351\207\215\345\206\231\344\270\272\345\205\250\350\213\261\346\226\207_\345\244\232\347\272\277\347\250\213.py"
deleted file mode 100644
index 6c6b1c71bcf53dd2b5c85ba3c38becaaac3f41a7..0000000000000000000000000000000000000000
--- "a/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/\344\273\243\347\240\201\351\207\215\345\206\231\344\270\272\345\205\250\350\213\261\346\226\207_\345\244\232\347\272\277\347\250\213.py"
+++ /dev/null
@@ -1,75 +0,0 @@
-import threading
-from predict import predict_no_ui_long_connection
-from toolbox import CatchException, write_results_to_file
-
-
-
-@CatchException
-def 全项目切换英文(txt, top_p, temperature, chatbot, history, sys_prompt, WEB_PORT):
- history = [] # 清空历史,以免输入溢出
- # 集合文件
- import time, glob, os
- os.makedirs('gpt_log/generated_english_version', exist_ok=True)
- os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True)
- file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
- [f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
- i_say_show_user_buffer = []
-
- # 随便显示点什么防止卡顿的感觉
- for index, fp in enumerate(file_manifest):
- # if 'test_project' in fp: continue
- with open(fp, 'r', encoding='utf-8') as f:
- file_content = f.read()
- i_say_show_user =f'[{index}/{len(file_manifest)}] 接下来请将以下代码中包含的所有中文转化为英文,只输出代码: {os.path.abspath(fp)}'
- i_say_show_user_buffer.append(i_say_show_user)
- chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示."))
- yield chatbot, history, '正常'
-
- # 任务函数
- mutable_return = [None for _ in file_manifest]
- def thread_worker(fp,index):
- with open(fp, 'r', encoding='utf-8') as f:
- file_content = f.read()
- i_say = f'接下来请将以下代码中包含的所有中文转化为英文,只输出代码,文件名是{fp},文件代码是 ```{file_content}```'
- # ** gpt request **
- gpt_say = predict_no_ui_long_connection(inputs=i_say, top_p=top_p, temperature=temperature, history=history, sys_prompt=sys_prompt)
- mutable_return[index] = gpt_say
-
- # 所有线程同时开始执行任务函数
- handles = [threading.Thread(target=thread_worker, args=(fp,index)) for index, fp in enumerate(file_manifest)]
- for h in handles:
- h.daemon = True
- h.start()
- chatbot.append(('开始了吗?', f'多线程操作已经开始'))
- yield chatbot, history, '正常'
-
- # 循环轮询各个线程是否执行完毕
- cnt = 0
- while True:
- time.sleep(1)
- th_alive = [h.is_alive() for h in handles]
- if not any(th_alive): break
- stat = ['执行中' if alive else '已完成' for alive in th_alive]
- stat_str = '|'.join(stat)
- cnt += 1
- chatbot[-1] = (chatbot[-1][0], f'多线程操作已经开始,完成情况: {stat_str}' + ''.join(['.']*(cnt%4)))
- yield chatbot, history, '正常'
-
- # 把结果写入文件
- for index, h in enumerate(handles):
- h.join() # 这里其实不需要join了,肯定已经都结束了
- fp = file_manifest[index]
- gpt_say = mutable_return[index]
- i_say_show_user = i_say_show_user_buffer[index]
-
- where_to_relocate = f'gpt_log/generated_english_version/{fp}'
- with open(where_to_relocate, 'w+', encoding='utf-8') as f: f.write(gpt_say.lstrip('```').rstrip('```'))
- chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}'))
- history.append(i_say_show_user); history.append(gpt_say)
- yield chatbot, history, '正常'
- time.sleep(1)
-
- # 备份一个文件
- res = write_results_to_file(history)
- chatbot.append(("生成一份任务执行报告", res))
- yield chatbot, history, '正常'
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/__init__.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/__init__.py
deleted file mode 100644
index e54b088acf644d285ecbeb1440c414e722b9db58..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from .darknet import Darknet
-from .detectors_resnet import DetectoRS_ResNet
-from .detectors_resnext import DetectoRS_ResNeXt
-from .hourglass import HourglassNet
-from .hrnet import HRNet
-from .regnet import RegNet
-from .res2net import Res2Net
-from .resnest import ResNeSt
-from .resnet import ResNet, ResNetV1d
-from .resnext import ResNeXt
-from .ssd_vgg import SSDVGG
-from .trident_resnet import TridentResNet
-from .swin_transformer import SwinTransformer
-from .uniformer import UniFormer
-
-__all__ = [
- 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'Res2Net',
- 'HourglassNet', 'DetectoRS_ResNet', 'DetectoRS_ResNeXt', 'Darknet',
- 'ResNeSt', 'TridentResNet', 'SwinTransformer', 'UniFormer'
-]
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/trident_roi_head.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/trident_roi_head.py
deleted file mode 100644
index 245569e50b45cc8e21ba8e7210edf4bd0c7f27c5..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/trident_roi_head.py
+++ /dev/null
@@ -1,119 +0,0 @@
-import torch
-from mmcv.ops import batched_nms
-
-from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
- multiclass_nms)
-from mmdet.models.roi_heads.standard_roi_head import StandardRoIHead
-from ..builder import HEADS
-
-
-@HEADS.register_module()
-class TridentRoIHead(StandardRoIHead):
- """Trident roi head.
-
- Args:
- num_branch (int): Number of branches in TridentNet.
- test_branch_idx (int): In inference, all 3 branches will be used
- if `test_branch_idx==-1`, otherwise only branch with index
- `test_branch_idx` will be used.
- """
-
- def __init__(self, num_branch, test_branch_idx, **kwargs):
- self.num_branch = num_branch
- self.test_branch_idx = test_branch_idx
- super(TridentRoIHead, self).__init__(**kwargs)
-
- def merge_trident_bboxes(self, trident_det_bboxes, trident_det_labels):
- """Merge bbox predictions of each branch."""
- if trident_det_bboxes.numel() == 0:
- det_bboxes = trident_det_bboxes.new_zeros((0, 5))
- det_labels = trident_det_bboxes.new_zeros((0, ), dtype=torch.long)
- else:
- nms_bboxes = trident_det_bboxes[:, :4]
- nms_scores = trident_det_bboxes[:, 4].contiguous()
- nms_inds = trident_det_labels
- nms_cfg = self.test_cfg['nms']
- det_bboxes, keep = batched_nms(nms_bboxes, nms_scores, nms_inds,
- nms_cfg)
- det_labels = trident_det_labels[keep]
- if self.test_cfg['max_per_img'] > 0:
- det_labels = det_labels[:self.test_cfg['max_per_img']]
- det_bboxes = det_bboxes[:self.test_cfg['max_per_img']]
-
- return det_bboxes, det_labels
-
- def simple_test(self,
- x,
- proposal_list,
- img_metas,
- proposals=None,
- rescale=False):
- """Test without augmentation as follows:
-
- 1. Compute prediction bbox and label per branch.
- 2. Merge predictions of each branch according to scores of
- bboxes, i.e., bboxes with higher score are kept to give
- top-k prediction.
- """
- assert self.with_bbox, 'Bbox head must be implemented.'
- det_bboxes_list, det_labels_list = self.simple_test_bboxes(
- x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
- num_branch = self.num_branch if self.test_branch_idx == -1 else 1
- for _ in range(len(det_bboxes_list)):
- if det_bboxes_list[_].shape[0] == 0:
- det_bboxes_list[_] = det_bboxes_list[_].new_empty((0, 5))
- det_bboxes, det_labels = [], []
- for i in range(len(img_metas) // num_branch):
- det_result = self.merge_trident_bboxes(
- torch.cat(det_bboxes_list[i * num_branch:(i + 1) *
- num_branch]),
- torch.cat(det_labels_list[i * num_branch:(i + 1) *
- num_branch]))
- det_bboxes.append(det_result[0])
- det_labels.append(det_result[1])
-
- bbox_results = [
- bbox2result(det_bboxes[i], det_labels[i],
- self.bbox_head.num_classes)
- for i in range(len(det_bboxes))
- ]
- return bbox_results
-
- def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
- """Test det bboxes with test time augmentation."""
- aug_bboxes = []
- aug_scores = []
- for x, img_meta in zip(feats, img_metas):
- # only one image in the batch
- img_shape = img_meta[0]['img_shape']
- scale_factor = img_meta[0]['scale_factor']
- flip = img_meta[0]['flip']
- flip_direction = img_meta[0]['flip_direction']
-
- trident_bboxes, trident_scores = [], []
- for branch_idx in range(len(proposal_list)):
- proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
- scale_factor, flip, flip_direction)
- rois = bbox2roi([proposals])
- bbox_results = self._bbox_forward(x, rois)
- bboxes, scores = self.bbox_head.get_bboxes(
- rois,
- bbox_results['cls_score'],
- bbox_results['bbox_pred'],
- img_shape,
- scale_factor,
- rescale=False,
- cfg=None)
- trident_bboxes.append(bboxes)
- trident_scores.append(scores)
-
- aug_bboxes.append(torch.cat(trident_bboxes, 0))
- aug_scores.append(torch.cat(trident_scores, 0))
- # after merging, bboxes will be rescaled to the original image size
- merged_bboxes, merged_scores = merge_aug_bboxes(
- aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
- det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
- rcnn_test_cfg.score_thr,
- rcnn_test_cfg.nms,
- rcnn_test_cfg.max_per_img)
- return det_bboxes, det_labels
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py
deleted file mode 100644
index 3caa6cf8ae61d467628378d99a919c9db1253b91..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './deeplabv3plus_r50-d8_512x512_40k_voc12aug.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/cmd_windows.bat b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/cmd_windows.bat
deleted file mode 100644
index 531a326158e9e169657051b0e76bdfad17c4b238..0000000000000000000000000000000000000000
--- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/cmd_windows.bat
+++ /dev/null
@@ -1,34 +0,0 @@
-@echo off
-
-cd /D "%~dp0"
-
-set PATH=%PATH%;%SystemRoot%\system32
-
-echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end
-
-@rem fix failed install when installing to a separate drive
-set TMP=%cd%\installer_files
-set TEMP=%cd%\installer_files
-
-@rem deactivate existing conda envs as needed to avoid conflicts
-(call conda deactivate && call conda deactivate && call conda deactivate) 2>nul
-
-@rem config
-set CONDA_ROOT_PREFIX=%cd%\installer_files\conda
-set INSTALL_ENV_DIR=%cd%\installer_files\env
-
-@rem environment isolation
-set PYTHONNOUSERSITE=1
-set PYTHONPATH=
-set PYTHONHOME=
-set "CUDA_PATH=%INSTALL_ENV_DIR%"
-set "CUDA_HOME=%CUDA_PATH%"
-
-@rem activate installer env
-call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end )
-
-@rem enter commands
-cmd /k "%*"
-
-:end
-pause
diff --git a/spaces/Anustup/NS_AI_LABS/cli.py b/spaces/Anustup/NS_AI_LABS/cli.py
deleted file mode 100644
index f2f16af9c425cb501bcf088df39e95c60621ce9c..0000000000000000000000000000000000000000
--- a/spaces/Anustup/NS_AI_LABS/cli.py
+++ /dev/null
@@ -1,109 +0,0 @@
-import argparse
-import os
-from urllib.parse import urlparse
-import warnings
-import numpy as np
-
-import whisper
-
-import torch
-from app import LANGUAGES, WhisperTranscriber
-from src.download import download_url
-
-from src.utils import optional_float, optional_int, str2bool
-
-
-def cli():
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument("audio", nargs="+", type=str, help="audio file(s) to transcribe")
- parser.add_argument("--model", default="small", choices=["tiny", "base", "small", "medium", "large"], help="name of the Whisper model to use")
- parser.add_argument("--model_dir", type=str, default=None, help="the path to save model files; uses ~/.cache/whisper by default")
- parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu", help="device to use for PyTorch inference")
- parser.add_argument("--output_dir", "-o", type=str, default=".", help="directory to save the outputs")
- parser.add_argument("--verbose", type=str2bool, default=True, help="whether to print out the progress and debug messages")
-
- parser.add_argument("--task", type=str, default="transcribe", choices=["transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')")
- parser.add_argument("--language", type=str, default=None, choices=sorted(LANGUAGES), help="language spoken in the audio, specify None to perform language detection")
-
- parser.add_argument("--vad", type=str, default="none", choices=["none", "silero-vad", "silero-vad-skip-gaps", "silero-vad-expand-into-gaps", "periodic-vad"], help="The voice activity detection algorithm to use")
- parser.add_argument("--vad_merge_window", type=optional_float, default=5, help="The window size (in seconds) to merge voice segments")
- parser.add_argument("--vad_max_merge_size", type=optional_float, default=30, help="The maximum size (in seconds) of a voice segment")
- parser.add_argument("--vad_padding", type=optional_float, default=1, help="The padding (in seconds) to add to each voice segment")
- parser.add_argument("--vad_prompt_window", type=optional_float, default=3, help="The window size of the prompt to pass to Whisper")
-
- parser.add_argument("--temperature", type=float, default=0, help="temperature to use for sampling")
- parser.add_argument("--best_of", type=optional_int, default=5, help="number of candidates when sampling with non-zero temperature")
- parser.add_argument("--beam_size", type=optional_int, default=5, help="number of beams in beam search, only applicable when temperature is zero")
- parser.add_argument("--patience", type=float, default=None, help="optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search")
- parser.add_argument("--length_penalty", type=float, default=None, help="optional token length penalty coefficient (alpha) as in https://arxiv.org/abs/1609.08144, uses simple lengt normalization by default")
-
- parser.add_argument("--suppress_tokens", type=str, default="-1", help="comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations")
- parser.add_argument("--initial_prompt", type=str, default=None, help="optional text to provide as a prompt for the first window.")
- parser.add_argument("--condition_on_previous_text", type=str2bool, default=True, help="if True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop")
- parser.add_argument("--fp16", type=str2bool, default=True, help="whether to perform inference in fp16; True by default")
-
- parser.add_argument("--temperature_increment_on_fallback", type=optional_float, default=0.2, help="temperature to increase when falling back when the decoding fails to meet either of the thresholds below")
- parser.add_argument("--compression_ratio_threshold", type=optional_float, default=2.4, help="if the gzip compression ratio is higher than this value, treat the decoding as failed")
- parser.add_argument("--logprob_threshold", type=optional_float, default=-1.0, help="if the average log probability is lower than this value, treat the decoding as failed")
- parser.add_argument("--no_speech_threshold", type=optional_float, default=0.6, help="if the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence")
-
- args = parser.parse_args().__dict__
- model_name: str = args.pop("model")
- model_dir: str = args.pop("model_dir")
- output_dir: str = args.pop("output_dir")
- device: str = args.pop("device")
- os.makedirs(output_dir, exist_ok=True)
-
- if model_name.endswith(".en") and args["language"] not in {"en", "English"}:
- warnings.warn(f"{model_name} is an English-only model but receipted '{args['language']}'; using English instead.")
- args["language"] = "en"
-
- temperature = args.pop("temperature")
- temperature_increment_on_fallback = args.pop("temperature_increment_on_fallback")
- if temperature_increment_on_fallback is not None:
- temperature = tuple(np.arange(temperature, 1.0 + 1e-6, temperature_increment_on_fallback))
- else:
- temperature = [temperature]
-
- vad = args.pop("vad")
- vad_merge_window = args.pop("vad_merge_window")
- vad_max_merge_size = args.pop("vad_max_merge_size")
- vad_padding = args.pop("vad_padding")
- vad_prompt_window = args.pop("vad_prompt_window")
-
- model = whisper.load_model(model_name, device=device, download_root=model_dir)
- transcriber = WhisperTranscriber(deleteUploadedFiles=False)
-
- for audio_path in args.pop("audio"):
- sources = []
-
- # Detect URL and download the audio
- if (uri_validator(audio_path)):
- # Download from YouTube/URL directly
- for source_path in download_url(audio_path, maxDuration=-1, destinationDirectory=output_dir, playlistItems=None):
- source_name = os.path.basename(source_path)
- sources.append({ "path": source_path, "name": source_name })
- else:
- sources.append({ "path": audio_path, "name": os.path.basename(audio_path) })
-
- for source in sources:
- source_path = source["path"]
- source_name = source["name"]
-
- result = transcriber.transcribe_file(model, source_path, temperature=temperature,
- vad=vad, vadMergeWindow=vad_merge_window, vadMaxMergeSize=vad_max_merge_size,
- vadPadding=vad_padding, vadPromptWindow=vad_prompt_window, **args)
-
- transcriber.write_result(result, source_name, output_dir)
-
- transcriber.clear_cache()
-
-def uri_validator(x):
- try:
- result = urlparse(x)
- return all([result.scheme, result.netloc])
- except:
- return False
-
-if __name__ == '__main__':
- cli()
\ No newline at end of file
diff --git a/spaces/Apex-X/ROOPOK/roop/processors/__init__.py b/spaces/Apex-X/ROOPOK/roop/processors/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/index.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/index.py
deleted file mode 100644
index 7267effed2413ba315d0a1af8490ec677c227662..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/index.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import logging
-from optparse import Values
-from typing import Any, Iterable, List, Optional, Union
-
-from pip._vendor.packaging.version import LegacyVersion, Version
-
-from pip._internal.cli import cmdoptions
-from pip._internal.cli.req_command import IndexGroupCommand
-from pip._internal.cli.status_codes import ERROR, SUCCESS
-from pip._internal.commands.search import print_dist_installation_info
-from pip._internal.exceptions import CommandError, DistributionNotFound, PipError
-from pip._internal.index.collector import LinkCollector
-from pip._internal.index.package_finder import PackageFinder
-from pip._internal.models.selection_prefs import SelectionPreferences
-from pip._internal.models.target_python import TargetPython
-from pip._internal.network.session import PipSession
-from pip._internal.utils.misc import write_output
-
-logger = logging.getLogger(__name__)
-
-
-class IndexCommand(IndexGroupCommand):
- """
- Inspect information available from package indexes.
- """
-
- ignore_require_venv = True
- usage = """
- %prog versions
- """
-
- def add_options(self) -> None:
- cmdoptions.add_target_python_options(self.cmd_opts)
-
- self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
- self.cmd_opts.add_option(cmdoptions.pre())
- self.cmd_opts.add_option(cmdoptions.no_binary())
- self.cmd_opts.add_option(cmdoptions.only_binary())
-
- index_opts = cmdoptions.make_option_group(
- cmdoptions.index_group,
- self.parser,
- )
-
- self.parser.insert_option_group(0, index_opts)
- self.parser.insert_option_group(0, self.cmd_opts)
-
- def run(self, options: Values, args: List[str]) -> int:
- handlers = {
- "versions": self.get_available_package_versions,
- }
-
- logger.warning(
- "pip index is currently an experimental command. "
- "It may be removed/changed in a future release "
- "without prior warning."
- )
-
- # Determine action
- if not args or args[0] not in handlers:
- logger.error(
- "Need an action (%s) to perform.",
- ", ".join(sorted(handlers)),
- )
- return ERROR
-
- action = args[0]
-
- # Error handling happens here, not in the action-handlers.
- try:
- handlers[action](options, args[1:])
- except PipError as e:
- logger.error(e.args[0])
- return ERROR
-
- return SUCCESS
-
- def _build_package_finder(
- self,
- options: Values,
- session: PipSession,
- target_python: Optional[TargetPython] = None,
- ignore_requires_python: Optional[bool] = None,
- ) -> PackageFinder:
- """
- Create a package finder appropriate to the index command.
- """
- link_collector = LinkCollector.create(session, options=options)
-
- # Pass allow_yanked=False to ignore yanked versions.
- selection_prefs = SelectionPreferences(
- allow_yanked=False,
- allow_all_prereleases=options.pre,
- ignore_requires_python=ignore_requires_python,
- )
-
- return PackageFinder.create(
- link_collector=link_collector,
- selection_prefs=selection_prefs,
- target_python=target_python,
- )
-
- def get_available_package_versions(self, options: Values, args: List[Any]) -> None:
- if len(args) != 1:
- raise CommandError("You need to specify exactly one argument")
-
- target_python = cmdoptions.make_target_python(options)
- query = args[0]
-
- with self._build_session(options) as session:
- finder = self._build_package_finder(
- options=options,
- session=session,
- target_python=target_python,
- ignore_requires_python=options.ignore_requires_python,
- )
-
- versions: Iterable[Union[LegacyVersion, Version]] = (
- candidate.version for candidate in finder.find_all_candidates(query)
- )
-
- if not options.pre:
- # Remove prereleases
- versions = (
- version for version in versions if not version.is_prerelease
- )
- versions = set(versions)
-
- if not versions:
- raise DistributionNotFound(
- "No matching distribution found for {}".format(query)
- )
-
- formatted_versions = [str(ver) for ver in sorted(versions, reverse=True)]
- latest = formatted_versions[0]
-
- write_output("{} ({})".format(query, latest))
- write_output("Available versions: {}".format(", ".join(formatted_versions)))
- print_dist_installation_info(query, latest)
diff --git a/spaces/Atualli/yoloxTeste/configs/yolox_tiny.py b/spaces/Atualli/yoloxTeste/configs/yolox_tiny.py
deleted file mode 100644
index 5220de2f2e6760d5c9a966d5dd397aad721fc60a..0000000000000000000000000000000000000000
--- a/spaces/Atualli/yoloxTeste/configs/yolox_tiny.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-# Copyright (c) Megvii, Inc. and its affiliates.
-
-import os
-
-from yolox.exp import Exp as MyExp
-
-
-class Exp(MyExp):
- def __init__(self):
- super(Exp, self).__init__()
- self.depth = 0.33
- self.width = 0.375
- self.input_size = (416, 416)
- self.mosaic_scale = (0.5, 1.5)
- self.random_size = (10, 20)
- self.test_size = (416, 416)
- self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
- self.enable_mixup = False
diff --git a/spaces/Axolotlily/SketchThing/README.md b/spaces/Axolotlily/SketchThing/README.md
deleted file mode 100644
index 425d44efe5610feec99384b116eb4d85d2e85bfb..0000000000000000000000000000000000000000
--- a/spaces/Axolotlily/SketchThing/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: SketchThing
-emoji: 💻
-colorFrom: purple
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.0.14
-app_file: app.py
-pinned: false
-license: other
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/BasalGanglia/stabilityai-stable-diffusion-2/README.md b/spaces/BasalGanglia/stabilityai-stable-diffusion-2/README.md
deleted file mode 100644
index aab6c6fb34d0c5d5bebf3475d6e3cfa7453c7a5f..0000000000000000000000000000000000000000
--- a/spaces/BasalGanglia/stabilityai-stable-diffusion-2/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Stabilityai Stable Diffusion 2
-emoji: 👀
-colorFrom: red
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.12.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Benson/text-generation/Examples/Audio Download Messenger.md b/spaces/Benson/text-generation/Examples/Audio Download Messenger.md
deleted file mode 100644
index 8fae9172f27b887bb0a3e3f658bf720dd539314a..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Audio Download Messenger.md
+++ /dev/null
@@ -1,50 +0,0 @@
-
-Cómo descargar audio de Messenger
-¿Alguna vez ha recibido un archivo de audio o un mensaje de voz en Messenger que quería guardar o compartir con otra persona? Si es así, es posible que haya notado que no hay una manera fácil de descargar audio de Messenger en su teléfono o computadora. A diferencia de los vídeos y las fotos, no puedes simplemente tocar o hacer clic derecho en un archivo de audio y elegir guardar. Entonces, ¿cómo puedes descargar audio de Messenger?
-En este artículo, le mostraremos cómo descargar audio de Messenger usando un truco simple que funciona tanto en dispositivos móviles como en PC. Usted aprenderá lo que es la descarga de audio messenger, por qué es posible que desee descargar audio de Messenger, y cómo hacerlo paso a paso. Siguiendo esta guía, podrás guardar cualquier archivo de audio o mensaje de voz que recibas o envíes en Messenger.
-audio download messenger
Download Zip →→→ https://bltlly.com/2v6IDY
- ¿Qué es Audio Download Messenger?
-Audio download messenger es un término que se refiere a un método o una herramienta que le permite descargar archivos de audio o mensajes de voz de Messenger. Messenger es una aplicación de mensajería popular que te permite enviar y recibir mensajes de texto, fotos, videos, pegatinas, emojis, GIF y más. También puede enviar y recibir mensajes de voz o grabar su propia voz usando el icono del micrófono.
-Sin embargo, a diferencia de otros tipos de medios, no puede descargar fácilmente archivos de audio o mensajes de voz de Messenger. Esto se debe a que Messenger no tiene una función integrada que le permita guardar o exportar archivos de audio. Solo puedes reproducirlos dentro de la aplicación o reenviarlos a otra conversación.
-Afortunadamente, hay una manera de descargar audio de Messenger utilizando un truco simple que implica el uso de la versión móvil de Facebook en su navegador web. Este truco funciona tanto en dispositivos móviles como en PC, siempre y cuando tengas una conexión a Internet y un navegador web.
- ¿Por qué descargar audio de Messenger?
-
-Si desea descargar audio de Messenger en su dispositivo móvil, como su teléfono inteligente o tableta, deberá usar la versión móvil de Facebook en su navegador web. Esto se debe a que la aplicación Messenger no tiene una opción de descarga para archivos de audio o mensajes de voz. Estos son los pasos que debe seguir:
- Paso 1: Cambie a datos móviles y abra su navegador móvil
-El primer paso es cambiar a datos móviles y abrir su navegador móvil. Esto se debe a que la versión móvil de Facebook no funciona bien en las redes Wi-Fi. Puedes usar cualquier navegador que prefieras, como Chrome, Safari, Firefox u Opera.
- Paso 2: Inicie sesión en m.facebook.com y vaya a sus mensajes
-El siguiente paso es iniciar sesión en m.facebook.com e ir a sus mensajes. Esta es la versión móvil de Facebook que tiene un diseño y una interfaz diferentes de la versión regular. Tendrá que introducir su dirección de correo electrónico o número de teléfono y contraseña para iniciar sesión. Una vez que haya iniciado sesión, verá un icono de menú en la esquina superior derecha de la pantalla. Toque en él y seleccione Mensajes de la lista.
- Paso 3: Abra el mensaje con el archivo de audio y haga clic en el enlace
-El tercer paso es abrir el mensaje con el archivo de audio y hacer clic en el enlace. Verá una lista de todas sus conversaciones en Messenger. Toque en el que contiene el archivo de audio o mensaje de voz que desea descargar. Verá un enlace que dice "Ver mensaje de voz" o "Ver archivo de audio" debajo del archivo de audio. Toque en él y se abrirá una nueva pestaña en su navegador.
- Paso 4: Haga clic en la opción de descarga y espere a que termine
-
- Paso 5: Localizar el archivo de audio en el gestor de descargas de su navegador
-El paso final es localizar el archivo de audio en el gestor de descargas de su navegador. Una vez finalizada la descarga, verá una notificación que dice "Descargar completa". Pulsa en él y te llevará al gestor de descargas de tu navegador, donde podrás ver todos los archivos que hayas descargado. Verá el archivo de audio con su nombre y tamaño. Pulse sobre él y se abrirá en su reproductor de medios predeterminado.
- Cómo descargar audio de Messenger en PC
-Si desea descargar audio de Messenger en su PC, como su computadora portátil o de escritorio, también tendrá que utilizar la versión móvil de Facebook en su navegador web. Esto se debe a que la versión regular de Facebook no tiene una opción de descarga para archivos de audio o mensajes de voz. Estos son los pasos que debes seguir:
- Paso 1: Abra su navegador web y vaya a m.facebook.com
-El primer paso es abrir su navegador web e ir a m.facebook.com. Puede usar cualquier navegador que prefiera, como Chrome, Firefox, Edge o Safari. Verás la versión móvil de Facebook que tiene un diseño y una interfaz diferentes a la versión normal.
- Paso 2: Haga clic en el icono del mensajero y seleccione el mensaje con el archivo de audio
-El siguiente paso es hacer clic en el icono de messenger y seleccionar el mensaje con el archivo de audio. Verá un icono de mensajero en la esquina superior derecha de la pantalla. Haga clic en él y lo llevará a sus mensajes en Messenger. Verá una lista de todas sus conversaciones en Messenger. Haga clic en la que contiene el archivo de audio o mensaje de voz que desea descargar.
- Paso 3: Haga clic en los tres puntos junto al archivo de audio y seleccione descargar
-
- Paso 4: Seleccione la carpeta donde desea guardar el archivo de audio y haga clic en guardar
-El paso final es seleccionar la carpeta donde desea guardar el archivo de audio y hacer clic en guardar. Verá una ventana emergente que le pregunta dónde desea guardar el archivo de audio. Puede elegir la carpeta que prefiera, como Descargas, Documentos, Música o Escritorio. También puede cambiar el nombre del archivo de audio si lo desea. Una vez que haya seleccionado la carpeta y el nombre, haga clic en Guardar y guardará el archivo de audio en su PC.
- Conclusión
-Descargar audio de Messenger no es tan difícil como podría parecer. Solo necesitas usar un truco simple que implique usar la versión móvil de Facebook en tu navegador web. Este truco funciona tanto en dispositivos móviles como en PC, siempre y cuando tengas una conexión a Internet y un navegador web.
-Siguiendo esta guía, podrás descargar cualquier archivo de audio o mensaje de voz que recibas o envíes en Messenger. Podrá ahorrar datos y espacio de almacenamiento, conservar recuerdos y momentos, y compartir archivos e información con otros.
-Así que, ¿qué estás esperando? Pruébalo tú mismo y ver lo fácil que es descargar audio de Messenger!
- Preguntas frecuentes
-Estas son algunas de las preguntas más comunes que la gente tiene sobre la descarga de audio de Messenger:
- Q: ¿Puedo descargar audio de Messenger usando la aplicación Messenger?
-A: No, no puede descargar audio de Messenger usando la aplicación Messenger. La aplicación Messenger no tiene una opción de descarga para archivos de audio o mensajes de voz. Solo puedes reproducirlos dentro de la aplicación o reenviarlos a otra conversación.
- Q: ¿Puedo descargar audio de Messenger usando la versión regular de Facebook?
-
- Q: ¿Puedo descargar archivos de video o fotos de Messenger usando este truco?
-A: Sí, puede descargar archivos de video o fotos de Messenger usando este truco. Los mismos pasos se aplican para los archivos de vídeo o fotos que para los archivos de audio o mensajes de voz. Sin embargo, también puede descargar archivos de video o fotos de Messenger utilizando la aplicación Messenger o la versión normal de Facebook tocando o haciendo clic derecho sobre ellos y eligiendo guardar.
- Q: ¿Puedo descargar varios archivos de audio de Messenger a la vez usando este truco?
-A: No, no puede descargar varios archivos de audio de Messenger a la vez usando este truco. Solo puedes descargar un archivo de audio a la vez usando este truco. Si desea descargar varios archivos de audio de Messenger a la vez, deberá usar una herramienta o software de terceros que pueda hacerlo.
- Q: ¿Es legal descargar audio de Messenger?
-A: La descarga de audio desde Messenger es legal siempre y cuando tenga el permiso del remitente o el propietario del archivo de audio. No debe descargar audio de Messenger que tenga derechos de autor, sea ilegal o infrinja los derechos de terceros. También debe respetar la privacidad y confidencialidad del remitente o del propietario del archivo de audio y no compartirlo sin su consentimiento.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Dead By Daylight Mvil Descargar Mediafre.md b/spaces/Benson/text-generation/Examples/Dead By Daylight Mvil Descargar Mediafre.md
deleted file mode 100644
index a818cc975847d44d42f982c704e832c5f3ded22a..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Dead By Daylight Mvil Descargar Mediafre.md
+++ /dev/null
@@ -1,152 +0,0 @@
-
-Download Ascenso de los reinos: Cruzada perdida y conquista del mundo
-¿Te gustan los juegos de estrategia que desafían tu mente y habilidades? ¿Quieres experimentar la emoción de dirigir una civilización de un pequeño clan a un poderoso imperio? ¿Quieres explorar un vasto y misterioso mundo lleno de secretos y tesoros? Si respondiste sí a cualquiera de estas preguntas, entonces deberías descargar Rise of Kingdoms: Lost Crusade, uno de los juegos de estrategia más populares e inmersivos del mercado. En este artículo, te contaremos todo lo que necesitas saber sobre este juego, cómo descargarlo en tu dispositivo y cómo jugarlo eficazmente.
- ¿Qué es el Surgimiento de los Reinos: Cruzada Perdida?
-Rise of Kingdoms: Lost Crusade es un juego de estrategia desarrollado por LilithGames que te permite elegir y liderar una de las 13 civilizaciones históricas en la historia del mundo. Puedes transformar tu pequeña tribu en una superpotencia global imparable con pragmatismo económico y poderío militar. Sus decisiones, estrategias, asociaciones económicas y alianzas militares decidirán el destino de su nación naciente.
-dead by daylight móvil descargar mediafıre
Download ✸✸✸ https://bltlly.com/2v6KlQ
-Pero Rise of Kingdoms: Lost Crusade no es solo un juego de estrategia. También es un juego de batalla en tiempo real, un juego de mapa del mundo sin fisuras, y un juego de sistema de alianza. Estas son algunas de las características que hacen de este juego tan único y atractivo:
- Un juego de estrategia que te permite elegir y liderar una de las 13 civilizaciones históricas
-No hay dos civilizaciones iguales en Rise of Kingdoms: Lost Crusade. Cada cultura cuenta con una arquitectura única, unidades exclusivas y potenciadores y habilidades especiales que las diferencian. Como líder, debes aprender a aprovechar mejor tus ventajas para derrotar a tus rivales y solidificar tu posición como potencia mundial.
-
- Un juego de batalla en tiempo real que te permite unirte o dejar cualquier pelea en el mapa
-Las batallas ocurren en tiempo real en el mapa en Rise of Kingdoms: Lost Crusade. Cualquiera puede unirse o abandonar una batalla en cualquier momento, lo que permite una verdadera jugabilidad RTS. ¿Puedes ver a un aliado siendo atacado justo en tu patio trasero? Envía tropas para ayudar a tu amigo, o lanza un contraataque sorpresa en la ciudad del atacante. También puede enviar tropas para recoger recursos de los bosques o minas cercanas, o recoger algunos clanes bárbaros en el camino.
-También puede dividir sus fuerzas entre varios comandantes para que pueda participar en múltiples acciones simultáneamente. Cada comandante tiene sus propias habilidades y habilidades que pueden cambiar la marea de la batalla a su favor. También puedes subir de nivel a tus comandantes completando misiones, participando en eventos o usando objetos especiales.
- Un juego de mapas del mundo sin fisuras que te permite explorar e investigar una tierra misteriosa
-Tu mundo está cubierto de densa niebla en Rise of Kingdoms: Lost Crusade. Necesitas enviar exploradores para explorar esta tierra misteriosa y descubrir el tesoro escondido dentro. Puede investigar templos perdidos, fortalezas bárbaras, cuevas misteriosas y pueblos tribales, recopilar recursos valiosos y runas antiguas, y aprender más sobre la historia y los secretos de este mundo. También puedes encontrar artefactos raros y poderosos que pueden darte una ventaja en tus conquistas.
-Pero ten cuidado, no estás solo en esta tierra. Otros jugadores tratarán de reclamar los mismos recursos y tesoros que tú, y es posible que encuentres una feroz resistencia o alianzas inesperadas. También puedes encontrar enemigos poderosos como el Guardián Oscuro y la Crisis de Ceroli que pondrán a prueba tus habilidades y estrategias hasta el límite.
- Un juego de sistema de alianza que te permite cooperar y comunicarte con otros jugadores
-
-Estar en una alianza también le da acceso a más beneficios y características, como la tienda de la alianza, la tecnología de la alianza, el territorio de la alianza, y las banderas de la alianza. También puedes participar en eventos y misiones de alianzas que pueden recompensarte con objetos y recursos valiosos. También puedes hacer la guerra contra otras alianzas o formar relaciones diplomáticas con ellas.
-
- Cómo descargar Rise of Kingdoms: Lost Crusade en tu dispositivo?
-Rise of Kingdoms: Lost Crusade está disponible de forma gratuita en varias plataformas y dispositivos. Puede descargarlo de las siguientes fuentes:
- Descargar de Google Play Store para dispositivos Android
-Si tienes un dispositivo Android, puedes descargar Rise of Kingdoms: Lost Crusade de Google Play Store. Solo tienes que seguir estos pasos:
-
-- Abra la aplicación Google Play Store en su dispositivo.
-- Búsqueda de Ascenso de Reinos: Cruzada Perdida en la barra de búsqueda.
-- Seleccione el juego de la lista de resultados y toque en Instalar.
-- Espera a que el juego se descargue e instale en tu dispositivo.
-- Iniciar el juego y disfrutar!
-
- Descargar de BlueStacks para dispositivos PC o Mac
-Si quieres jugar Rise of Kingdoms: Lost Crusade en tu PC o Mac, puedes usar BlueStacks, un popular emulador de Android que te permite ejecutar aplicaciones Android en tu ordenador. He aquí cómo hacerlo:
-
-- Descargue e instale BlueStacks en su PC o Mac desde https://www.bluestacks.com/.
-- Inicie BlueStacks e inicie sesión con su cuenta de Google.
-- Búsqueda de Ascenso de Reinos: Cruzada Perdida en la barra de búsqueda.
-- Seleccione el juego de la lista de resultados y haga clic en Instalar.
-- Espere a que el juego se descargue e instale en su computadora.
-- Iniciar el juego y disfrutar!
-
- Descargar de Pocket Gamer para juegos basados en navegador
-
-
-- Ir a `_ extension.
- However, notice that this type does *NOT* implement the same algorithm as
- his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
-
- There are two different ways to build a relativedelta instance. The
- first one is passing it two date/datetime classes::
-
- relativedelta(datetime1, datetime2)
-
- The second one is passing it any number of the following keyword arguments::
-
- relativedelta(arg1=x,arg2=y,arg3=z...)
-
- year, month, day, hour, minute, second, microsecond:
- Absolute information (argument is singular); adding or subtracting a
- relativedelta with absolute information does not perform an arithmetic
- operation, but rather REPLACES the corresponding value in the
- original datetime with the value(s) in relativedelta.
-
- years, months, weeks, days, hours, minutes, seconds, microseconds:
- Relative information, may be negative (argument is plural); adding
- or subtracting a relativedelta with relative information performs
- the corresponding arithmetic operation on the original datetime value
- with the information in the relativedelta.
-
- weekday:
- One of the weekday instances (MO, TU, etc) available in the
- relativedelta module. These instances may receive a parameter N,
- specifying the Nth weekday, which could be positive or negative
- (like MO(+1) or MO(-2)). Not specifying it is the same as specifying
- +1. You can also use an integer, where 0=MO. This argument is always
- relative e.g. if the calculated date is already Monday, using MO(1)
- or MO(-1) won't change the day. To effectively make it absolute, use
- it in combination with the day argument (e.g. day=1, MO(1) for first
- Monday of the month).
-
- leapdays:
- Will add given days to the date found, if year is a leap
- year, and the date found is post 28 of february.
-
- yearday, nlyearday:
- Set the yearday or the non-leap year day (jump leap days).
- These are converted to day/month/leapdays information.
-
- There are relative and absolute forms of the keyword
- arguments. The plural is relative, and the singular is
- absolute. For each argument in the order below, the absolute form
- is applied first (by setting each attribute to that value) and
- then the relative form (by adding the value to the attribute).
-
- The order of attributes considered when this relativedelta is
- added to a datetime is:
-
- 1. Year
- 2. Month
- 3. Day
- 4. Hours
- 5. Minutes
- 6. Seconds
- 7. Microseconds
-
- Finally, weekday is applied, using the rule described above.
-
- For example
-
- >>> from datetime import datetime
- >>> from dateutil.relativedelta import relativedelta, MO
- >>> dt = datetime(2018, 4, 9, 13, 37, 0)
- >>> delta = relativedelta(hours=25, day=1, weekday=MO(1))
- >>> dt + delta
- datetime.datetime(2018, 4, 2, 14, 37)
-
- First, the day is set to 1 (the first of the month), then 25 hours
- are added, to get to the 2nd day and 14th hour, finally the
- weekday is applied, but since the 2nd is already a Monday there is
- no effect.
-
- """
-
- def __init__(self, dt1=None, dt2=None,
- years=0, months=0, days=0, leapdays=0, weeks=0,
- hours=0, minutes=0, seconds=0, microseconds=0,
- year=None, month=None, day=None, weekday=None,
- yearday=None, nlyearday=None,
- hour=None, minute=None, second=None, microsecond=None):
-
- if dt1 and dt2:
- # datetime is a subclass of date. So both must be date
- if not (isinstance(dt1, datetime.date) and
- isinstance(dt2, datetime.date)):
- raise TypeError("relativedelta only diffs datetime/date")
-
- # We allow two dates, or two datetimes, so we coerce them to be
- # of the same type
- if (isinstance(dt1, datetime.datetime) !=
- isinstance(dt2, datetime.datetime)):
- if not isinstance(dt1, datetime.datetime):
- dt1 = datetime.datetime.fromordinal(dt1.toordinal())
- elif not isinstance(dt2, datetime.datetime):
- dt2 = datetime.datetime.fromordinal(dt2.toordinal())
-
- self.years = 0
- self.months = 0
- self.days = 0
- self.leapdays = 0
- self.hours = 0
- self.minutes = 0
- self.seconds = 0
- self.microseconds = 0
- self.year = None
- self.month = None
- self.day = None
- self.weekday = None
- self.hour = None
- self.minute = None
- self.second = None
- self.microsecond = None
- self._has_time = 0
-
- # Get year / month delta between the two
- months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
- self._set_months(months)
-
- # Remove the year/month delta so the timedelta is just well-defined
- # time units (seconds, days and microseconds)
- dtm = self.__radd__(dt2)
-
- # If we've overshot our target, make an adjustment
- if dt1 < dt2:
- compare = operator.gt
- increment = 1
- else:
- compare = operator.lt
- increment = -1
-
- while compare(dt1, dtm):
- months += increment
- self._set_months(months)
- dtm = self.__radd__(dt2)
-
- # Get the timedelta between the "months-adjusted" date and dt1
- delta = dt1 - dtm
- self.seconds = delta.seconds + delta.days * 86400
- self.microseconds = delta.microseconds
- else:
- # Check for non-integer values in integer-only quantities
- if any(x is not None and x != int(x) for x in (years, months)):
- raise ValueError("Non-integer years and months are "
- "ambiguous and not currently supported.")
-
- # Relative information
- self.years = int(years)
- self.months = int(months)
- self.days = days + weeks * 7
- self.leapdays = leapdays
- self.hours = hours
- self.minutes = minutes
- self.seconds = seconds
- self.microseconds = microseconds
-
- # Absolute information
- self.year = year
- self.month = month
- self.day = day
- self.hour = hour
- self.minute = minute
- self.second = second
- self.microsecond = microsecond
-
- if any(x is not None and int(x) != x
- for x in (year, month, day, hour,
- minute, second, microsecond)):
- # For now we'll deprecate floats - later it'll be an error.
- warn("Non-integer value passed as absolute information. " +
- "This is not a well-defined condition and will raise " +
- "errors in future versions.", DeprecationWarning)
-
- if isinstance(weekday, integer_types):
- self.weekday = weekdays[weekday]
- else:
- self.weekday = weekday
-
- yday = 0
- if nlyearday:
- yday = nlyearday
- elif yearday:
- yday = yearday
- if yearday > 59:
- self.leapdays = -1
- if yday:
- ydayidx = [31, 59, 90, 120, 151, 181, 212,
- 243, 273, 304, 334, 366]
- for idx, ydays in enumerate(ydayidx):
- if yday <= ydays:
- self.month = idx+1
- if idx == 0:
- self.day = yday
- else:
- self.day = yday-ydayidx[idx-1]
- break
- else:
- raise ValueError("invalid year day (%d)" % yday)
-
- self._fix()
-
- def _fix(self):
- if abs(self.microseconds) > 999999:
- s = _sign(self.microseconds)
- div, mod = divmod(self.microseconds * s, 1000000)
- self.microseconds = mod * s
- self.seconds += div * s
- if abs(self.seconds) > 59:
- s = _sign(self.seconds)
- div, mod = divmod(self.seconds * s, 60)
- self.seconds = mod * s
- self.minutes += div * s
- if abs(self.minutes) > 59:
- s = _sign(self.minutes)
- div, mod = divmod(self.minutes * s, 60)
- self.minutes = mod * s
- self.hours += div * s
- if abs(self.hours) > 23:
- s = _sign(self.hours)
- div, mod = divmod(self.hours * s, 24)
- self.hours = mod * s
- self.days += div * s
- if abs(self.months) > 11:
- s = _sign(self.months)
- div, mod = divmod(self.months * s, 12)
- self.months = mod * s
- self.years += div * s
- if (self.hours or self.minutes or self.seconds or self.microseconds
- or self.hour is not None or self.minute is not None or
- self.second is not None or self.microsecond is not None):
- self._has_time = 1
- else:
- self._has_time = 0
-
- @property
- def weeks(self):
- return int(self.days / 7.0)
-
- @weeks.setter
- def weeks(self, value):
- self.days = self.days - (self.weeks * 7) + value * 7
-
- def _set_months(self, months):
- self.months = months
- if abs(self.months) > 11:
- s = _sign(self.months)
- div, mod = divmod(self.months * s, 12)
- self.months = mod * s
- self.years = div * s
- else:
- self.years = 0
-
- def normalized(self):
- """
- Return a version of this object represented entirely using integer
- values for the relative attributes.
-
- >>> relativedelta(days=1.5, hours=2).normalized()
- relativedelta(days=+1, hours=+14)
-
- :return:
- Returns a :class:`dateutil.relativedelta.relativedelta` object.
- """
- # Cascade remainders down (rounding each to roughly nearest microsecond)
- days = int(self.days)
-
- hours_f = round(self.hours + 24 * (self.days - days), 11)
- hours = int(hours_f)
-
- minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
- minutes = int(minutes_f)
-
- seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
- seconds = int(seconds_f)
-
- microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
-
- # Constructor carries overflow back up with call to _fix()
- return self.__class__(years=self.years, months=self.months,
- days=days, hours=hours, minutes=minutes,
- seconds=seconds, microseconds=microseconds,
- leapdays=self.leapdays, year=self.year,
- month=self.month, day=self.day,
- weekday=self.weekday, hour=self.hour,
- minute=self.minute, second=self.second,
- microsecond=self.microsecond)
-
- def __add__(self, other):
- if isinstance(other, relativedelta):
- return self.__class__(years=other.years + self.years,
- months=other.months + self.months,
- days=other.days + self.days,
- hours=other.hours + self.hours,
- minutes=other.minutes + self.minutes,
- seconds=other.seconds + self.seconds,
- microseconds=(other.microseconds +
- self.microseconds),
- leapdays=other.leapdays or self.leapdays,
- year=(other.year if other.year is not None
- else self.year),
- month=(other.month if other.month is not None
- else self.month),
- day=(other.day if other.day is not None
- else self.day),
- weekday=(other.weekday if other.weekday is not None
- else self.weekday),
- hour=(other.hour if other.hour is not None
- else self.hour),
- minute=(other.minute if other.minute is not None
- else self.minute),
- second=(other.second if other.second is not None
- else self.second),
- microsecond=(other.microsecond if other.microsecond
- is not None else
- self.microsecond))
- if isinstance(other, datetime.timedelta):
- return self.__class__(years=self.years,
- months=self.months,
- days=self.days + other.days,
- hours=self.hours,
- minutes=self.minutes,
- seconds=self.seconds + other.seconds,
- microseconds=self.microseconds + other.microseconds,
- leapdays=self.leapdays,
- year=self.year,
- month=self.month,
- day=self.day,
- weekday=self.weekday,
- hour=self.hour,
- minute=self.minute,
- second=self.second,
- microsecond=self.microsecond)
- if not isinstance(other, datetime.date):
- return NotImplemented
- elif self._has_time and not isinstance(other, datetime.datetime):
- other = datetime.datetime.fromordinal(other.toordinal())
- year = (self.year or other.year)+self.years
- month = self.month or other.month
- if self.months:
- assert 1 <= abs(self.months) <= 12
- month += self.months
- if month > 12:
- year += 1
- month -= 12
- elif month < 1:
- year -= 1
- month += 12
- day = min(calendar.monthrange(year, month)[1],
- self.day or other.day)
- repl = {"year": year, "month": month, "day": day}
- for attr in ["hour", "minute", "second", "microsecond"]:
- value = getattr(self, attr)
- if value is not None:
- repl[attr] = value
- days = self.days
- if self.leapdays and month > 2 and calendar.isleap(year):
- days += self.leapdays
- ret = (other.replace(**repl)
- + datetime.timedelta(days=days,
- hours=self.hours,
- minutes=self.minutes,
- seconds=self.seconds,
- microseconds=self.microseconds))
- if self.weekday:
- weekday, nth = self.weekday.weekday, self.weekday.n or 1
- jumpdays = (abs(nth) - 1) * 7
- if nth > 0:
- jumpdays += (7 - ret.weekday() + weekday) % 7
- else:
- jumpdays += (ret.weekday() - weekday) % 7
- jumpdays *= -1
- ret += datetime.timedelta(days=jumpdays)
- return ret
-
- def __radd__(self, other):
- return self.__add__(other)
-
- def __rsub__(self, other):
- return self.__neg__().__radd__(other)
-
- def __sub__(self, other):
- if not isinstance(other, relativedelta):
- return NotImplemented # In case the other object defines __rsub__
- return self.__class__(years=self.years - other.years,
- months=self.months - other.months,
- days=self.days - other.days,
- hours=self.hours - other.hours,
- minutes=self.minutes - other.minutes,
- seconds=self.seconds - other.seconds,
- microseconds=self.microseconds - other.microseconds,
- leapdays=self.leapdays or other.leapdays,
- year=(self.year if self.year is not None
- else other.year),
- month=(self.month if self.month is not None else
- other.month),
- day=(self.day if self.day is not None else
- other.day),
- weekday=(self.weekday if self.weekday is not None else
- other.weekday),
- hour=(self.hour if self.hour is not None else
- other.hour),
- minute=(self.minute if self.minute is not None else
- other.minute),
- second=(self.second if self.second is not None else
- other.second),
- microsecond=(self.microsecond if self.microsecond
- is not None else
- other.microsecond))
-
- def __abs__(self):
- return self.__class__(years=abs(self.years),
- months=abs(self.months),
- days=abs(self.days),
- hours=abs(self.hours),
- minutes=abs(self.minutes),
- seconds=abs(self.seconds),
- microseconds=abs(self.microseconds),
- leapdays=self.leapdays,
- year=self.year,
- month=self.month,
- day=self.day,
- weekday=self.weekday,
- hour=self.hour,
- minute=self.minute,
- second=self.second,
- microsecond=self.microsecond)
-
- def __neg__(self):
- return self.__class__(years=-self.years,
- months=-self.months,
- days=-self.days,
- hours=-self.hours,
- minutes=-self.minutes,
- seconds=-self.seconds,
- microseconds=-self.microseconds,
- leapdays=self.leapdays,
- year=self.year,
- month=self.month,
- day=self.day,
- weekday=self.weekday,
- hour=self.hour,
- minute=self.minute,
- second=self.second,
- microsecond=self.microsecond)
-
- def __bool__(self):
- return not (not self.years and
- not self.months and
- not self.days and
- not self.hours and
- not self.minutes and
- not self.seconds and
- not self.microseconds and
- not self.leapdays and
- self.year is None and
- self.month is None and
- self.day is None and
- self.weekday is None and
- self.hour is None and
- self.minute is None and
- self.second is None and
- self.microsecond is None)
- # Compatibility with Python 2.x
- __nonzero__ = __bool__
-
- def __mul__(self, other):
- try:
- f = float(other)
- except TypeError:
- return NotImplemented
-
- return self.__class__(years=int(self.years * f),
- months=int(self.months * f),
- days=int(self.days * f),
- hours=int(self.hours * f),
- minutes=int(self.minutes * f),
- seconds=int(self.seconds * f),
- microseconds=int(self.microseconds * f),
- leapdays=self.leapdays,
- year=self.year,
- month=self.month,
- day=self.day,
- weekday=self.weekday,
- hour=self.hour,
- minute=self.minute,
- second=self.second,
- microsecond=self.microsecond)
-
- __rmul__ = __mul__
-
- def __eq__(self, other):
- if not isinstance(other, relativedelta):
- return NotImplemented
- if self.weekday or other.weekday:
- if not self.weekday or not other.weekday:
- return False
- if self.weekday.weekday != other.weekday.weekday:
- return False
- n1, n2 = self.weekday.n, other.weekday.n
- if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
- return False
- return (self.years == other.years and
- self.months == other.months and
- self.days == other.days and
- self.hours == other.hours and
- self.minutes == other.minutes and
- self.seconds == other.seconds and
- self.microseconds == other.microseconds and
- self.leapdays == other.leapdays and
- self.year == other.year and
- self.month == other.month and
- self.day == other.day and
- self.hour == other.hour and
- self.minute == other.minute and
- self.second == other.second and
- self.microsecond == other.microsecond)
-
- def __hash__(self):
- return hash((
- self.weekday,
- self.years,
- self.months,
- self.days,
- self.hours,
- self.minutes,
- self.seconds,
- self.microseconds,
- self.leapdays,
- self.year,
- self.month,
- self.day,
- self.hour,
- self.minute,
- self.second,
- self.microsecond,
- ))
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __div__(self, other):
- try:
- reciprocal = 1 / float(other)
- except TypeError:
- return NotImplemented
-
- return self.__mul__(reciprocal)
-
- __truediv__ = __div__
-
- def __repr__(self):
- l = []
- for attr in ["years", "months", "days", "leapdays",
- "hours", "minutes", "seconds", "microseconds"]:
- value = getattr(self, attr)
- if value:
- l.append("{attr}={value:+g}".format(attr=attr, value=value))
- for attr in ["year", "month", "day", "weekday",
- "hour", "minute", "second", "microsecond"]:
- value = getattr(self, attr)
- if value is not None:
- l.append("{attr}={value}".format(attr=attr, value=repr(value)))
- return "{classname}({attrs})".format(classname=self.__class__.__name__,
- attrs=", ".join(l))
-
-
-def _sign(x):
- return int(copysign(1, x))
-
-# vim:ts=4:sw=4:et
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/__init__.py
deleted file mode 100644
index b6beddbe6d24d2949dc89ed07abfebd59d8b63b9..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Expose a limited set of classes and functions so callers outside of
-# the vcs package don't need to import deeper than `pip._internal.vcs`.
-# (The test directory may still need to import from a vcs sub-package.)
-# Import all vcs modules to register each VCS in the VcsSupport object.
-import pip._internal.vcs.bazaar
-import pip._internal.vcs.git
-import pip._internal.vcs.mercurial
-import pip._internal.vcs.subversion # noqa: F401
-from pip._internal.vcs.versioncontrol import ( # noqa: F401
- RemoteNotFoundError,
- RemoteNotValidError,
- is_url,
- make_vcs_requirement_url,
- vcs,
-)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/__main__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/__main__.py
deleted file mode 100644
index 7171f13114e09f47eb8981d39b7a838e8ab88acb..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/__main__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from __future__ import annotations
-
-from pip._vendor.platformdirs import PlatformDirs, __version__
-
-PROPS = (
- "user_data_dir",
- "user_config_dir",
- "user_cache_dir",
- "user_state_dir",
- "user_log_dir",
- "user_documents_dir",
- "user_runtime_dir",
- "site_data_dir",
- "site_config_dir",
- "site_cache_dir",
-)
-
-
-def main() -> None:
- app_name = "MyApp"
- app_author = "MyCompany"
-
- print(f"-- platformdirs {__version__} --")
-
- print("-- app dirs (with optional 'version')")
- dirs = PlatformDirs(app_name, app_author, version="1.0")
- for prop in PROPS:
- print(f"{prop}: {getattr(dirs, prop)}")
-
- print("\n-- app dirs (without optional 'version')")
- dirs = PlatformDirs(app_name, app_author)
- for prop in PROPS:
- print(f"{prop}: {getattr(dirs, prop)}")
-
- print("\n-- app dirs (without optional 'appauthor')")
- dirs = PlatformDirs(app_name)
- for prop in PROPS:
- print(f"{prop}: {getattr(dirs, prop)}")
-
- print("\n-- app dirs (with disabled 'appauthor')")
- dirs = PlatformDirs(app_name, appauthor=False)
- for prop in PROPS:
- print(f"{prop}: {getattr(dirs, prop)}")
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/BilalSardar/Text-To-image-AllModels/README.md b/spaces/BilalSardar/Text-To-image-AllModels/README.md
deleted file mode 100644
index 6232986d4f1345a7ab0763a1db0bac0a57c15d87..0000000000000000000000000000000000000000
--- a/spaces/BilalSardar/Text-To-image-AllModels/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Text To Image AllModels
-emoji: 🐠
-colorFrom: blue
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.15.0
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/grid_feats/dataset_mapper.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/grid_feats/dataset_mapper.py
deleted file mode 100644
index 8685993a029c6da87f22ed16c3dadc6bfd770232..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/grid_feats/dataset_mapper.py
+++ /dev/null
@@ -1,164 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import copy
-import logging
-import numpy as np
-import torch
-from fvcore.common.file_io import PathManager
-from PIL import Image
-
-from detectron2.data import detection_utils as utils
-from detectron2.data import transforms as T
-from detectron2.data import DatasetMapper
-from detectron2.structures import (
- BitMasks,
- Boxes,
- BoxMode,
- Instances,
- Keypoints,
- PolygonMasks,
- polygons_to_bitmask,
-)
-
-
-def annotations_to_instances_with_attributes(annos,
- image_size,
- mask_format="polygon",
- load_attributes=False,
- max_attr_per_ins=16):
- """
- Extend the function annotations_to_instances() to support attributes
- """
- boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
- target = Instances(image_size)
- boxes = target.gt_boxes = Boxes(boxes)
- boxes.clip(image_size)
-
- classes = [obj["category_id"] for obj in annos]
- classes = torch.tensor(classes, dtype=torch.int64)
- target.gt_classes = classes
-
- if len(annos) and "segmentation" in annos[0]:
- segms = [obj["segmentation"] for obj in annos]
- if mask_format == "polygon":
- masks = PolygonMasks(segms)
- else:
- assert mask_format == "bitmask", mask_format
- masks = []
- for segm in segms:
- if isinstance(segm, list):
- # polygon
- masks.append(polygons_to_bitmask(segm, *image_size))
- elif isinstance(segm, dict):
- # COCO RLE
- masks.append(mask_util.decode(segm))
- elif isinstance(segm, np.ndarray):
- assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
- segm.ndim
- )
- # mask array
- masks.append(segm)
- else:
- raise ValueError(
- "Cannot convert segmentation of type '{}' to BitMasks!"
- "Supported types are: polygons as list[list[float] or ndarray],"
- " COCO-style RLE as a dict, or a full-image segmentation mask "
- "as a 2D ndarray.".format(type(segm))
- )
- masks = BitMasks(
- torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
- )
- target.gt_masks = masks
-
- if len(annos) and "keypoints" in annos[0]:
- kpts = [obj.get("keypoints", []) for obj in annos]
- target.gt_keypoints = Keypoints(kpts)
-
- if len(annos) and load_attributes:
- attributes = -torch.ones((len(annos), max_attr_per_ins), dtype=torch.int64)
- for idx, anno in enumerate(annos):
- if "attribute_ids" in anno:
- for jdx, attr_id in enumerate(anno["attribute_ids"]):
- attributes[idx, jdx] = attr_id
- target.gt_attributes = attributes
-
- return target
-
-
-class AttributeDatasetMapper(DatasetMapper):
- """
- Extend DatasetMapper to support attributes.
- """
- def __init__(self, cfg, is_train=True):
- super().__init__(cfg, is_train)
-
- # fmt: off
- self.attribute_on = cfg.MODEL.ATTRIBUTE_ON
- self.max_attr_per_ins = cfg.INPUT.MAX_ATTR_PER_INS
- # fmt: on
-
- def __call__(self, dataset_dict):
- dataset_dict = copy.deepcopy(dataset_dict)
- image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
- utils.check_image_size(dataset_dict, image)
-
- if "annotations" not in dataset_dict:
- image, transforms = T.apply_transform_gens(
- ([self.crop_gen] if self.crop_gen else []) + self.tfm_gens, image
- )
- else:
- if self.crop_gen:
- crop_tfm = utils.gen_crop_transform_with_instance(
- self.crop_gen.get_crop_size(image.shape[:2]),
- image.shape[:2],
- np.random.choice(dataset_dict["annotations"]),
- )
- image = crop_tfm.apply_image(image)
- image, transforms = T.apply_transform_gens(self.tfm_gens, image)
- if self.crop_gen:
- transforms = crop_tfm + transforms
-
- image_shape = image.shape[:2]
- dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
-
- if self.load_proposals:
- utils.transform_proposals(
- dataset_dict, image_shape, transforms, self.min_box_side_len, self.proposal_topk
- )
-
- if not self.is_train:
- dataset_dict.pop("annotations", None)
- dataset_dict.pop("sem_seg_file_name", None)
- return dataset_dict
-
- if "annotations" in dataset_dict:
- for anno in dataset_dict["annotations"]:
- if not self.mask_on:
- anno.pop("segmentation", None)
- if not self.keypoint_on:
- anno.pop("keypoints", None)
- if not self.attribute_on:
- anno.pop("attribute_ids")
-
- annos = [
- utils.transform_instance_annotations(
- obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
- )
- for obj in dataset_dict.pop("annotations")
- if obj.get("iscrowd", 0) == 0
- ]
- instances = annotations_to_instances_with_attributes(
- annos, image_shape, mask_format=self.mask_format,
- load_attributes=self.attribute_on, max_attr_per_ins=self.max_attr_per_ins
- )
- if self.crop_gen and instances.has("gt_masks"):
- instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
- dataset_dict["instances"] = utils.filter_empty_instances(instances)
-
- if "sem_seg_file_name" in dataset_dict:
- with PathManager.open(dataset_dict.pop("sem_seg_file_name"), "rb") as f:
- sem_seg_gt = Image.open(f)
- sem_seg_gt = np.asarray(sem_seg_gt, dtype="uint8")
- sem_seg_gt = transforms.apply_segmentation(sem_seg_gt)
- sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
- dataset_dict["sem_seg"] = sem_seg_gt
- return dataset_dict
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/datasets/dataset_loader.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/datasets/dataset_loader.py
deleted file mode 100644
index e46c7d8c2e982d9611bc500c12f29bd1bfe31e50..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/datasets/dataset_loader.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# --------------------------------------------------------
-# OpenVQA
-# Written by Yuhao Cui https://github.com/cuiyuhao1996
-# --------------------------------------------------------
-
-from importlib import import_module
-
-class DatasetLoader:
- def __init__(self, __C):
- self.__C = __C
-
- self.dataset = __C.DATASET
- dataset_moudle_path = 'openvqa.datasets.' + self.dataset +'.' + self.dataset + '_loader'
- self.dataset_moudle = import_module(dataset_moudle_path)
-
- def DataSet(self):
- return self.dataset_moudle.DataSet(self.__C)
-
-
-class EvalLoader:
- def __init__(self, __C):
- self.__C = __C
-
- self.dataset = __C.DATASET
- eval_moudle_path = 'openvqa.datasets.' + self.dataset + '.' + 'eval' + '.' + 'result_eval'
- self.eval_moudle = import_module(eval_moudle_path)
-
- def eval(self, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6, __arg7):
- return self.eval_moudle.eval(self.__C, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6, __arg7)
diff --git a/spaces/CVPR/LIVE/pybind11/tests/test_stl.cpp b/spaces/CVPR/LIVE/pybind11/tests/test_stl.cpp
deleted file mode 100644
index 928635788e484d98f3cc8cf701d9221bef0a8bac..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/pybind11/tests/test_stl.cpp
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- tests/test_stl.cpp -- STL type casters
-
- Copyright (c) 2017 Wenzel Jakob
-
- All rights reserved. Use of this source code is governed by a
- BSD-style license that can be found in the LICENSE file.
-*/
-
-#include "pybind11_tests.h"
-#include "constructor_stats.h"
-#include
-
-#include
-#include
-
-// Test with `std::variant` in C++17 mode, or with `boost::variant` in C++11/14
-#if PYBIND11_HAS_VARIANT
-using std::variant;
-#elif defined(PYBIND11_TEST_BOOST) && (!defined(_MSC_VER) || _MSC_VER >= 1910)
-# include
-# define PYBIND11_HAS_VARIANT 1
-using boost::variant;
-
-namespace pybind11 { namespace detail {
-template
-struct type_caster> : variant_caster> {};
-
-template <>
-struct visit_helper {
- template
- static auto call(Args &&...args) -> decltype(boost::apply_visitor(args...)) {
- return boost::apply_visitor(args...);
- }
-};
-}} // namespace pybind11::detail
-#endif
-
-PYBIND11_MAKE_OPAQUE(std::vector>);
-
-/// Issue #528: templated constructor
-struct TplCtorClass {
- template TplCtorClass(const T &) { }
- bool operator==(const TplCtorClass &) const { return true; }
-};
-
-namespace std {
- template <>
- struct hash { size_t operator()(const TplCtorClass &) const { return 0; } };
-}
-
-
-template class OptionalImpl, typename T>
-struct OptionalHolder
-{
- OptionalHolder() = default;
- bool member_initialized() const {
- return member && member->initialized;
- }
- OptionalImpl member = T{};
-};
-
-
-TEST_SUBMODULE(stl, m) {
- // test_vector
- m.def("cast_vector", []() { return std::vector{1}; });
- m.def("load_vector", [](const std::vector &v) { return v.at(0) == 1 && v.at(1) == 2; });
- // `std::vector` is special because it returns proxy objects instead of references
- m.def("cast_bool_vector", []() { return std::vector{true, false}; });
- m.def("load_bool_vector", [](const std::vector &v) {
- return v.at(0) == true && v.at(1) == false;
- });
- // Unnumbered regression (caused by #936): pointers to stl containers aren't castable
- static std::vector lvv{2};
- m.def("cast_ptr_vector", []() { return &lvv; });
-
- // test_deque
- m.def("cast_deque", []() { return std::deque{1}; });
- m.def("load_deque", [](const std::deque &v) { return v.at(0) == 1 && v.at(1) == 2; });
-
- // test_array
- m.def("cast_array", []() { return std::array {{1 , 2}}; });
- m.def("load_array", [](const std::array &a) { return a[0] == 1 && a[1] == 2; });
-
- // test_valarray
- m.def("cast_valarray", []() { return std::valarray{1, 4, 9}; });
- m.def("load_valarray", [](const std::valarray& v) {
- return v.size() == 3 && v[0] == 1 && v[1] == 4 && v[2] == 9;
- });
-
- // test_map
- m.def("cast_map", []() { return std::map{{"key", "value"}}; });
- m.def("load_map", [](const std::map &map) {
- return map.at("key") == "value" && map.at("key2") == "value2";
- });
-
- // test_set
- m.def("cast_set", []() { return std::set{"key1", "key2"}; });
- m.def("load_set", [](const std::set &set) {
- return set.count("key1") && set.count("key2") && set.count("key3");
- });
-
- // test_recursive_casting
- m.def("cast_rv_vector", []() { return std::vector{2}; });
- m.def("cast_rv_array", []() { return std::array(); });
- // NB: map and set keys are `const`, so while we technically do move them (as `const Type &&`),
- // casters don't typically do anything with that, which means they fall to the `const Type &`
- // caster.
- m.def("cast_rv_map", []() { return std::unordered_map{{"a", RValueCaster{}}}; });
- m.def("cast_rv_nested", []() {
- std::vector>, 2>> v;
- v.emplace_back(); // add an array
- v.back()[0].emplace_back(); // add a map to the array
- v.back()[0].back().emplace("b", RValueCaster{});
- v.back()[0].back().emplace("c", RValueCaster{});
- v.back()[1].emplace_back(); // add a map to the array
- v.back()[1].back().emplace("a", RValueCaster{});
- return v;
- });
- static std::array lva;
- static std::unordered_map lvm{{"a", RValueCaster{}}, {"b", RValueCaster{}}};
- static std::unordered_map>>> lvn;
- lvn["a"].emplace_back(); // add a list
- lvn["a"].back().emplace_back(); // add an array
- lvn["a"].emplace_back(); // another list
- lvn["a"].back().emplace_back(); // add an array
- lvn["b"].emplace_back(); // add a list
- lvn["b"].back().emplace_back(); // add an array
- lvn["b"].back().emplace_back(); // add another array
- m.def("cast_lv_vector", []() -> const decltype(lvv) & { return lvv; });
- m.def("cast_lv_array", []() -> const decltype(lva) & { return lva; });
- m.def("cast_lv_map", []() -> const decltype(lvm) & { return lvm; });
- m.def("cast_lv_nested", []() -> const decltype(lvn) & { return lvn; });
- // #853:
- m.def("cast_unique_ptr_vector", []() {
- std::vector> v;
- v.emplace_back(new UserType{7});
- v.emplace_back(new UserType{42});
- return v;
- });
-
- // test_move_out_container
- struct MoveOutContainer {
- struct Value { int value; };
- std::list move_list() const { return {{0}, {1}, {2}}; }
- };
- py::class_(m, "MoveOutContainerValue")
- .def_readonly("value", &MoveOutContainer::Value::value);
- py::class_(m, "MoveOutContainer")
- .def(py::init<>())
- .def_property_readonly("move_list", &MoveOutContainer::move_list);
-
- // Class that can be move- and copy-constructed, but not assigned
- struct NoAssign {
- int value;
-
- explicit NoAssign(int value = 0) : value(value) { }
- NoAssign(const NoAssign &) = default;
- NoAssign(NoAssign &&) = default;
-
- NoAssign &operator=(const NoAssign &) = delete;
- NoAssign &operator=(NoAssign &&) = delete;
- };
- py::class_(m, "NoAssign", "Class with no C++ assignment operators")
- .def(py::init<>())
- .def(py::init());
-
-
- struct MoveOutDetector
- {
- MoveOutDetector() = default;
- MoveOutDetector(const MoveOutDetector&) = default;
- MoveOutDetector(MoveOutDetector&& other) noexcept
- : initialized(other.initialized) {
- // steal underlying resource
- other.initialized = false;
- }
- bool initialized = true;
- };
- py::class_(m, "MoveOutDetector", "Class with move tracking")
- .def(py::init<>())
- .def_readonly("initialized", &MoveOutDetector::initialized);
-
-
-#ifdef PYBIND11_HAS_OPTIONAL
- // test_optional
- m.attr("has_optional") = true;
-
- using opt_int = std::optional;
- using opt_no_assign = std::optional;
- m.def("double_or_zero", [](const opt_int& x) -> int {
- return x.value_or(0) * 2;
- });
- m.def("half_or_none", [](int x) -> opt_int {
- return x ? opt_int(x / 2) : opt_int();
- });
- m.def("test_nullopt", [](opt_int x) {
- return x.value_or(42);
- }, py::arg_v("x", std::nullopt, "None"));
- m.def("test_no_assign", [](const opt_no_assign &x) {
- return x ? x->value : 42;
- }, py::arg_v("x", std::nullopt, "None"));
-
- m.def("nodefer_none_optional", [](std::optional) { return true; });
- m.def("nodefer_none_optional", [](py::none) { return false; });
-
- using opt_holder = OptionalHolder;
- py::class_(m, "OptionalHolder", "Class with optional member")
- .def(py::init<>())
- .def_readonly("member", &opt_holder::member)
- .def("member_initialized", &opt_holder::member_initialized);
-#endif
-
-#ifdef PYBIND11_HAS_EXP_OPTIONAL
- // test_exp_optional
- m.attr("has_exp_optional") = true;
-
- using exp_opt_int = std::experimental::optional;
- using exp_opt_no_assign = std::experimental::optional;
- m.def("double_or_zero_exp", [](const exp_opt_int& x) -> int {
- return x.value_or(0) * 2;
- });
- m.def("half_or_none_exp", [](int x) -> exp_opt_int {
- return x ? exp_opt_int(x / 2) : exp_opt_int();
- });
- m.def("test_nullopt_exp", [](exp_opt_int x) {
- return x.value_or(42);
- }, py::arg_v("x", std::experimental::nullopt, "None"));
- m.def("test_no_assign_exp", [](const exp_opt_no_assign &x) {
- return x ? x->value : 42;
- }, py::arg_v("x", std::experimental::nullopt, "None"));
-
- using opt_exp_holder = OptionalHolder;
- py::class_(m, "OptionalExpHolder", "Class with optional member")
- .def(py::init<>())
- .def_readonly("member", &opt_exp_holder::member)
- .def("member_initialized", &opt_exp_holder::member_initialized);
-#endif
-
-#ifdef PYBIND11_HAS_VARIANT
- static_assert(std::is_same::value,
- "visitor::result_type is required by boost::variant in C++11 mode");
-
- struct visitor {
- using result_type = const char *;
-
- result_type operator()(int) { return "int"; }
- result_type operator()(std::string) { return "std::string"; }
- result_type operator()(double) { return "double"; }
- result_type operator()(std::nullptr_t) { return "std::nullptr_t"; }
- };
-
- // test_variant
- m.def("load_variant", [](variant v) {
- return py::detail::visit_helper::call(visitor(), v);
- });
- m.def("load_variant_2pass", [](variant v) {
- return py::detail::visit_helper::call(visitor(), v);
- });
- m.def("cast_variant", []() {
- using V = variant;
- return py::make_tuple(V(5), V("Hello"));
- });
-#endif
-
- // #528: templated constructor
- // (no python tests: the test here is that this compiles)
- m.def("tpl_ctor_vector", [](std::vector &) {});
- m.def("tpl_ctor_map", [](std::unordered_map &) {});
- m.def("tpl_ctor_set", [](std::unordered_set &) {});
-#if defined(PYBIND11_HAS_OPTIONAL)
- m.def("tpl_constr_optional", [](std::optional &) {});
-#elif defined(PYBIND11_HAS_EXP_OPTIONAL)
- m.def("tpl_constr_optional", [](std::experimental::optional &) {});
-#endif
-
- // test_vec_of_reference_wrapper
- // #171: Can't return STL structures containing reference wrapper
- m.def("return_vec_of_reference_wrapper", [](std::reference_wrapper p4) {
- static UserType p1{1}, p2{2}, p3{3};
- return std::vector> {
- std::ref(p1), std::ref(p2), std::ref(p3), p4
- };
- });
-
- // test_stl_pass_by_pointer
- m.def("stl_pass_by_pointer", [](std::vector* v) { return *v; }, "v"_a=nullptr);
-
- // #1258: pybind11/stl.h converts string to vector
- m.def("func_with_string_or_vector_string_arg_overload", [](std::vector) { return 1; });
- m.def("func_with_string_or_vector_string_arg_overload", [](std::list) { return 2; });
- m.def("func_with_string_or_vector_string_arg_overload", [](std::string) { return 3; });
-
- class Placeholder {
- public:
- Placeholder() { print_created(this); }
- Placeholder(const Placeholder &) = delete;
- ~Placeholder() { print_destroyed(this); }
- };
- py::class_(m, "Placeholder");
-
- /// test_stl_vector_ownership
- m.def("test_stl_ownership",
- []() {
- std::vector result;
- result.push_back(new Placeholder());
- return result;
- },
- py::return_value_policy::take_ownership);
-
- m.def("array_cast_sequence", [](std::array x) { return x; });
-
- /// test_issue_1561
- struct Issue1561Inner { std::string data; };
- struct Issue1561Outer { std::vector list; };
-
- py::class_(m, "Issue1561Inner")
- .def(py::init())
- .def_readwrite("data", &Issue1561Inner::data);
-
- py::class_(m, "Issue1561Outer")
- .def(py::init<>())
- .def_readwrite("list", &Issue1561Outer::list);
-}
diff --git a/spaces/CVPR/LIVE/pydiffvg_tensorflow/image.py b/spaces/CVPR/LIVE/pydiffvg_tensorflow/image.py
deleted file mode 100644
index 18eb1e6b66ae077b1c9d4b534a5fce250fe3958a..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/pydiffvg_tensorflow/image.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import numpy as np
-import skimage
-import skimage.io
-import os
-
-def imwrite(img, filename, gamma = 2.2, normalize = False):
- directory = os.path.dirname(filename)
- if directory != '' and not os.path.exists(directory):
- os.makedirs(directory)
-
- if not isinstance(img, np.ndarray):
- img = img.numpy()
- if normalize:
- img_rng = np.max(img) - np.min(img)
- if img_rng > 0:
- img = (img - np.min(img)) / img_rng
- img = np.clip(img, 0.0, 1.0)
- if img.ndim==2:
- #repeat along the third dimension
- img=np.expand_dims(img,2)
- img[:, :, :3] = np.power(img[:, :, :3], 1.0/gamma)
- skimage.io.imsave(filename, (img * 255).astype(np.uint8))
\ No newline at end of file
diff --git a/spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/build.py b/spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/build.py
deleted file mode 100644
index 3f407420c13b3c28878aaea98e6958093b9d2caf..0000000000000000000000000000000000000000
--- a/spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/build.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-from detectron2.layers import ShapeSpec
-from detectron2.utils.registry import Registry
-
-from .backbone import Backbone
-
-BACKBONE_REGISTRY = Registry("BACKBONE")
-BACKBONE_REGISTRY.__doc__ = """
-Registry for backbones, which extract feature maps from images
-
-The registered object must be a callable that accepts two arguments:
-
-1. A :class:`detectron2.config.CfgNode`
-2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification.
-
-Registered object must return instance of :class:`Backbone`.
-"""
-
-
-def build_backbone(cfg, input_shape=None):
- """
- Build a backbone from `cfg.MODEL.BACKBONE.NAME`.
-
- Returns:
- an instance of :class:`Backbone`
- """
- if input_shape is None:
- input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
-
- backbone_name = cfg.MODEL.BACKBONE.NAME
- backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape)
- assert isinstance(backbone, Backbone)
- return backbone
-
-def build_text_backbone(cfg, input_shape=None):
- """
- Build a backbone from `cfg.MODEL.BACKBONE.NAME`.
-
- Returns:
- an instance of :class:`Backbone`
- """
- if input_shape is None:
- input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
-
- backbone_name = cfg.MODEL.BACKBONE.NAME
- backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape)
- assert isinstance(backbone, Backbone)
- return backbone
\ No newline at end of file
diff --git a/spaces/ChallengeHub/Chinese-LangChain/corpus/zh_wikipedia/README.md b/spaces/ChallengeHub/Chinese-LangChain/corpus/zh_wikipedia/README.md
deleted file mode 100644
index 63d61d9b98690f306fc3d3fbdb4fda6c18d65164..0000000000000000000000000000000000000000
--- a/spaces/ChallengeHub/Chinese-LangChain/corpus/zh_wikipedia/README.md
+++ /dev/null
@@ -1,114 +0,0 @@
-## 知识库构建
-
-
-### 1 Wikipedia构建
-
-参考教程:https://blog.51cto.com/u_15127535/2697309
-
-
-一、维基百科
-
-维基百科(Wikipedia),是一个基于维基技术的多语言百科全书协作计划,也是一部用不同语言写成的网络百科全书。维基百科是由吉米·威尔士与拉里·桑格两人合作创建的,于2001年1月13日在互联网上推出网站服务,并在2001年1月15日正式展开网络百科全书的项目。
-
-
-
-二、维基百科处理
-
-1 环境配置(1)编程语言采用 python3(2)Gensim第三方库,Gensim是一个Python的工具包,其中有包含了中文维基百科数据处理的类,使用方便。
-Gensim : https://github.com/RaRe-Technologies/gensim
-
-使用 pip install gensim 安装gensim。
-
-(3)OpenCC第三方库,是中文字符转换,包括中文简体繁体相互转换等。
-
-OpenCC:https://github.com/BYVoid/OpenCC,OpenCC源码采用c++实现,如果会用c++的可以使用根据介绍,make编译源码。
-
-OpenCC也有python版本实现,可以通过pip安装(pip install opencc-python),速度要比c++版慢,但是使用方便,安装简单,推荐使用pip安装。
-
-
-
-2 数据下载
-
-中文维基百科数据按月进行更新备份,一般情况下,下载当前最新的数据,下载地址(https://dumps.wikimedia.org/zhwiki/latest/),我们下载的数据是:zhwiki-latest-pages-articles.xml.bz2。
-
-中文维基百科数据一般包含如下几个部分:
-
-
-
-训练词向量采用的数据是正文数据,下面我们将对正文数据进行处理。
-
-
-
-3 数据抽取
-
-下载下来的数据是压缩文件(bz2,gz),不需要解压,这里已经写好了一份利用gensim处理维基百科数据的脚本
-
-wikidata_processhttps://github.com/bamtercelboo/corpus_process_script/tree/master/wikidata_process
-
-使用:
-
-python wiki_process.py zhwiki-latest-pages-articles.xml.bz2 zhwiki-latest.txt
-
-这部分需要一些的时间,处理过后的得到一份中文维基百科正文数据(zhwiki-latest.txt)。
-
-输出文件类似于:
-
-歐幾里得 西元前三世紀的古希臘數學家 現在被認為是幾何之父 此畫為拉斐爾的作品 雅典學院 数学 是利用符号语言研究數量 结构 变化以及空间等概念的一門学科
-
-
-
-4 中文繁体转简体
-
-经过上述脚本得到的文件包含了大量的中文繁体字,我们需要将其转换成中文简体字。
-
-我们利用OpenCC进行繁体转简体的操作,这里已经写好了一份python版本的脚本来进行处理
-
-chinese_t2s
-
-https://github.com/bamtercelboo/corpus_process_script/tree/master/chinese_t2s
-
-使用:
-
-python chinese_t2s.py –input input_file –output output_file
-
-like:
-
-python chinese_t2s.py –input zhwiki-latest.txt –output zhwiki-latest-simplified.txt
-
-输出文件类似于
-
-欧几里得 西元前三世纪的古希腊数学家 现在被认为是几何之父 此画为拉斐尔的作品 雅典学院 数学 是利用符号语言研究数量 结构 变化以及空间等概念的一门学科
-
- 5.清洗语料
-
-上述处理已经得到了我们想要的数据,但是在其他的一些任务中,还需要对这份数据进行简单的处理,像词向量任务,在这得到的数据里,还包含很多的英文,日文,德语,中文标点,乱码等一些字符,我们要把这些字符清洗掉,只留下中文字符,仅仅留下中文字符只是一种处理方案,不同的任务需要不同的处理,这里已经写好了一份脚本
-
-clean
-
-https://github.com/bamtercelboo/corpus_process_script/tree/master/clean
-
-使用:
-
-python clean_corpus.py –input input_file –output output_file
-
-like:
-
-python clean_corpus.py –input zhwiki-latest-simplified.txt –output zhwiki-latest-simplified_cleaned.txt
-
-效果:
-
-input:
-
-哲学 哲学(英语:philosophy)是对普遍的和基本的问题的研究,这些问题通常和存在、知识、价值、理性、心灵、语言等有关。
-
-output:
-
-哲学哲学英语是对普遍的和基本的问题的研究这些问题通常和存在知识价值理性心灵语言等有关
-
-
-
-三、数据处理脚本
-
-近在github上新开了一个Repositorycorpus-process-scripthttps://github.com/bamtercelboo/corpus_process_script在这个repo,将存放中英文数据处理脚本,语言不限,会有详细的README,希望对大家能有一些帮助。
-References
-
diff --git a/spaces/ChengZ/DeepDanbooru_string0/app.py b/spaces/ChengZ/DeepDanbooru_string0/app.py
deleted file mode 100644
index 49019837c9207cc68cb37be0342f3bc44fd0decb..0000000000000000000000000000000000000000
--- a/spaces/ChengZ/DeepDanbooru_string0/app.py
+++ /dev/null
@@ -1,185 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import annotations
-
-import argparse
-import functools
-import os
-import html
-import pathlib
-import tarfile
-
-import deepdanbooru as dd
-import gradio as gr
-import huggingface_hub
-import numpy as np
-import PIL.Image
-import tensorflow as tf
-import piexif
-import piexif.helper
-
-TITLE = 'DeepDanbooru String'
-
-TOKEN = os.environ['TOKEN']
-MODEL_REPO = 'CikeyQI/DeepDanbooru_string'
-MODEL_FILENAME = 'model-resnet_custom_v3.h5'
-LABEL_FILENAME = 'tags.txt'
-
-
-def parse_args() -> argparse.Namespace:
- parser = argparse.ArgumentParser()
- parser.add_argument('--score-slider-step', type=float, default=0.05)
- parser.add_argument('--score-threshold', type=float, default=0.5)
- parser.add_argument('--theme', type=str, default='dark-grass')
- parser.add_argument('--live', action='store_true')
- parser.add_argument('--share', action='store_true')
- parser.add_argument('--port', type=int)
- parser.add_argument('--disable-queue',
- dest='enable_queue',
- action='store_false')
- parser.add_argument('--allow-flagging', type=str, default='never')
- return parser.parse_args()
-
-
-def load_sample_image_paths() -> list[pathlib.Path]:
- image_dir = pathlib.Path('images')
- if not image_dir.exists():
- dataset_repo = 'hysts/sample-images-TADNE'
- path = huggingface_hub.hf_hub_download(dataset_repo,
- 'images.tar.gz',
- repo_type='dataset',
- use_auth_token=TOKEN)
- with tarfile.open(path) as f:
- f.extractall()
- return sorted(image_dir.glob('*'))
-
-
-def load_model() -> tf.keras.Model:
- path = huggingface_hub.hf_hub_download(MODEL_REPO,
- MODEL_FILENAME,
- use_auth_token=TOKEN)
- model = tf.keras.models.load_model(path)
- return model
-
-
-def load_labels() -> list[str]:
- path = huggingface_hub.hf_hub_download(MODEL_REPO,
- LABEL_FILENAME,
- use_auth_token=TOKEN)
- with open(path) as f:
- labels = [line.strip() for line in f.readlines()]
- return labels
-
-def plaintext_to_html(text):
- text = "" + "
\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "
"
- return text
-
-def predict(image: PIL.Image.Image, score_threshold: float,
- model: tf.keras.Model, labels: list[str]) -> dict[str, float]:
- rawimage = image
- _, height, width, _ = model.input_shape
- image = np.asarray(image)
- image = tf.image.resize(image,
- size=(height, width),
- method=tf.image.ResizeMethod.AREA,
- preserve_aspect_ratio=True)
- image = image.numpy()
- image = dd.image.transform_and_pad_image(image, width, height)
- image = image / 255.
- probs = model.predict(image[None, ...])[0]
- probs = probs.astype(float)
- res = dict()
- for prob, label in zip(probs.tolist(), labels):
- if prob < score_threshold:
- continue
- res[label] = prob
- b = dict(sorted(res.items(),key=lambda item:item[1], reverse=True))
- a = ', '.join(list(b.keys())).replace('_',' ').replace('(','\(').replace(')','\)')
- c = ', '.join(list(b.keys()))
-
- items = rawimage.info
- geninfo = ''
-
- if "exif" in rawimage.info:
- exif = piexif.load(rawimage.info["exif"])
- exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'')
- try:
- exif_comment = piexif.helper.UserComment.load(exif_comment)
- except ValueError:
- exif_comment = exif_comment.decode('utf8', errors="ignore")
-
- items['exif comment'] = exif_comment
- geninfo = exif_comment
-
- for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
- 'loop', 'background', 'timestamp', 'duration']:
- items.pop(field, None)
-
- geninfo = items.get('parameters', geninfo)
-
- info = f"""
-PNG Info
-"""
- for key, text in items.items():
- info += f"""
-
-{plaintext_to_html(str(key))}
-{plaintext_to_html(str(text))}
-
-""".strip()+"\n"
-
- if len(info) == 0:
- message = "Nothing found in the image."
- info = f"{message}
"
-
- return (a,c,res,info)
-
-
-def main():
- args = parse_args()
- model = load_model()
- labels = load_labels()
-
- func = functools.partial(predict, model=model, labels=labels)
- func = functools.update_wrapper(func, predict)
-
- gr.Interface(
- func,
- [
- gr.inputs.Image(type='pil', label='Input'),
- gr.inputs.Slider(0,
- 1,
- step=args.score_slider_step,
- default=args.score_threshold,
- label='Score Threshold'),
- ],
- [
- gr.outputs.Textbox(label='Output (string)'),
- gr.outputs.Textbox(label='Output (raw string)'),
- gr.outputs.Label(label='Output (label)'),
- gr.outputs.HTML()
- ],
- examples=[
- ['miku.jpg',0.5],
- ['miku2.jpg',0.5]
- ],
- title=TITLE,
- description='''
-Demo for [KichangKim/DeepDanbooru](https://github.com/KichangKim/DeepDanbooru) with "ready to copy" prompt and a prompt analyzer.
-
-Modified from [hysts/DeepDanbooru](https://huggingface.co/spaces/hysts/DeepDanbooru)
-
-PNG Info code forked from [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
- ''',
- theme=args.theme,
- allow_flagging=args.allow_flagging,
- live=args.live,
- ).launch(
- enable_queue=args.enable_queue,
- server_port=args.port,
- share=args.share,
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/Clebersla/RVC_V2_Huggingface_Version/i18n/locale_diff.py b/spaces/Clebersla/RVC_V2_Huggingface_Version/i18n/locale_diff.py
deleted file mode 100644
index 257277965e0866a86d0361863a8f1b408c4f71ab..0000000000000000000000000000000000000000
--- a/spaces/Clebersla/RVC_V2_Huggingface_Version/i18n/locale_diff.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import json
-import os
-from collections import OrderedDict
-
-# Define the standard file name
-standard_file = "zh_CN.json"
-
-# Find all JSON files in the directory
-dir_path = "./"
-languages = [
- f for f in os.listdir(dir_path) if f.endswith(".json") and f != standard_file
-]
-
-# Load the standard file
-with open(standard_file, "r", encoding="utf-8") as f:
- standard_data = json.load(f, object_pairs_hook=OrderedDict)
-
-# Loop through each language file
-for lang_file in languages:
- # Load the language file
- with open(lang_file, "r", encoding="utf-8") as f:
- lang_data = json.load(f, object_pairs_hook=OrderedDict)
-
- # Find the difference between the language file and the standard file
- diff = set(standard_data.keys()) - set(lang_data.keys())
-
- miss = set(lang_data.keys()) - set(standard_data.keys())
-
- # Add any missing keys to the language file
- for key in diff:
- lang_data[key] = key
-
- # Del any extra keys to the language file
- for key in miss:
- del lang_data[key]
-
- # Sort the keys of the language file to match the order of the standard file
- lang_data = OrderedDict(
- sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0]))
- )
-
- # Save the updated language file
- with open(lang_file, "w", encoding="utf-8") as f:
- json.dump(lang_data, f, ensure_ascii=False, indent=4)
- f.write("\n")
diff --git "a/spaces/Cong723/gpt-academic-public/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" "b/spaces/Cong723/gpt-academic-public/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py"
deleted file mode 100644
index 505086455af8d2676055ab084cf97058b954c7d5..0000000000000000000000000000000000000000
--- "a/spaces/Cong723/gpt-academic-public/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py"
+++ /dev/null
@@ -1,112 +0,0 @@
-from toolbox import update_ui
-from toolbox import CatchException, report_execption
-from .crazy_utils import read_and_clean_pdf_text
-from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
-fast_debug = False
-
-
-def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
- import tiktoken
- print('begin analysis on:', file_name)
-
- ############################## <第 0 步,切割PDF> ##################################
- # 递归地切割PDF文件,每一块(尽量是完整的一个section,比如introduction,experiment等,必要时再进行切割)
- # 的长度必须小于 2500 个 Token
- file_content, page_one = read_and_clean_pdf_text(file_name) # (尝试)按照章节切割PDF
-
- TOKEN_LIMIT_PER_FRAGMENT = 2500
-
- from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
- from request_llm.bridge_all import model_info
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
- def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
- paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
- txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
- page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
- txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
- # 为了更好的效果,我们剥离Introduction之后的部分(如果有)
- paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
-
- ############################## <第 1 步,从摘要中提取高价值信息,放到history中> ##################################
- final_results = []
- final_results.append(paper_meta)
-
- ############################## <第 2 步,迭代地历遍整个文章,提取精炼信息> ##################################
- i_say_show_user = f'首先你在英文语境下通读整篇论文。'; gpt_say = "[Local Message] 收到。" # 用户提示
- chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI
-
- iteration_results = []
- last_iteration_result = paper_meta # 初始值是摘要
- MAX_WORD_TOTAL = 4096
- n_fragment = len(paper_fragments)
- if n_fragment >= 20: print('文章极长,不能达到预期效果')
- for i in range(n_fragment):
- NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment
- i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i]}"
- i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i][:200]}"
- gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
- llm_kwargs, chatbot,
- history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果
- sys_prompt="Extract the main idea of this section." # 提示
- )
- iteration_results.append(gpt_say)
- last_iteration_result = gpt_say
-
- ############################## <第 3 步,整理history> ##################################
- final_results.extend(iteration_results)
- final_results.append(f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。')
- # 接下来两句话只显示在界面上,不起实际作用
- i_say_show_user = f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。'; gpt_say = "[Local Message] 收到。"
- chatbot.append([i_say_show_user, gpt_say])
-
- ############################## <第 4 步,设置一个token上限,防止回答时Token溢出> ##################################
- from .crazy_utils import input_clipping
- _, final_results = input_clipping("", final_results, max_token_limit=3200)
- yield from update_ui(chatbot=chatbot, history=final_results) # 注意这里的历史记录被替代了
-
-
-@CatchException
-def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- import glob, os
-
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "理解PDF论文内容,并且将结合上下文内容,进行学术解答。函数插件贡献者: Hanzoe, binary-husky"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import fitz
- except:
- report_execption(chatbot, history,
- a = f"解析项目: {txt}",
- b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
-
- # 清空历史,以免输入溢出
- history = []
-
- # 检测输入参数,如没有给定输入参数,直接退出
- if os.path.exists(txt):
- project_folder = txt
- else:
- if txt == "":
- txt = '空空如也的输入栏'
- report_execption(chatbot, history,
- a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
-
- # 搜索需要处理的文件清单
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
- # 如果没找到任何文件
- if len(file_manifest) == 0:
- report_execption(chatbot, history,
- a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- txt = file_manifest[0]
- # 开始正式执行任务
- yield from 解析PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/cliTools.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/cliTools.py
deleted file mode 100644
index 8322ea9ebb7cd1dd907829a985b9833058bc54c1..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/cliTools.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""Collection of utilities for command-line interfaces and console scripts."""
-import os
-import re
-
-
-numberAddedRE = re.compile(r"#\d+$")
-
-
-def makeOutputFileName(
- input, outputDir=None, extension=None, overWrite=False, suffix=""
-):
- """Generates a suitable file name for writing output.
-
- Often tools will want to take a file, do some kind of transformation to it,
- and write it out again. This function determines an appropriate name for the
- output file, through one or more of the following steps:
-
- - changing the output directory
- - appending suffix before file extension
- - replacing the file extension
- - suffixing the filename with a number (``#1``, ``#2``, etc.) to avoid
- overwriting an existing file.
-
- Args:
- input: Name of input file.
- outputDir: Optionally, a new directory to write the file into.
- suffix: Optionally, a string suffix is appended to file name before
- the extension.
- extension: Optionally, a replacement for the current file extension.
- overWrite: Overwriting an existing file is permitted if true; if false
- and the proposed filename exists, a new name will be generated by
- adding an appropriate number suffix.
-
- Returns:
- str: Suitable output filename
- """
- dirName, fileName = os.path.split(input)
- fileName, ext = os.path.splitext(fileName)
- if outputDir:
- dirName = outputDir
- fileName = numberAddedRE.split(fileName)[0]
- if extension is None:
- extension = os.path.splitext(input)[1]
- output = os.path.join(dirName, fileName + suffix + extension)
- n = 1
- if not overWrite:
- while os.path.exists(output):
- output = os.path.join(
- dirName, fileName + suffix + "#" + repr(n) + extension
- )
- n += 1
- return output
diff --git a/spaces/Daextream/Whisper-Auto-Subtitled-Video-Generator/languages.py b/spaces/Daextream/Whisper-Auto-Subtitled-Video-Generator/languages.py
deleted file mode 100644
index 0551511f86eb917fd0668d971b21e177c387b7ef..0000000000000000000000000000000000000000
--- a/spaces/Daextream/Whisper-Auto-Subtitled-Video-Generator/languages.py
+++ /dev/null
@@ -1,101 +0,0 @@
-LANGUAGES = {
- "en": "eng",
- "zh": "zho",
- "de": "deu",
- "es": "spa",
- "ru": "rus",
- "ko": "kor",
- "fr": "fra",
- "ja": "jpn",
- "pt": "por",
- "tr": "tur",
- "pl": "pol",
- "ca": "cat",
- "nl": "nld",
- "ar": "ara",
- "sv": "swe",
- "it": "ita",
- "id": "ind",
- "hi": "hin",
- "fi": "fin",
- "vi": "vie",
- "iw": "heb",
- "uk": "ukr",
- "el": "ell",
- "ms": "msa",
- "cs": "ces",
- "ro": "ron",
- "da": "dan",
- "hu": "hun",
- "ta": "tam",
- "no": "nor",
- "th": "tha",
- "ur": "urd",
- "hr": "hrv",
- "bg": "bul",
- "lt": "lit",
- "la": "lat",
- "mi": "mri",
- "ml": "mal",
- "cy": "cym",
- "sk": "slk",
- "te": "tel",
- "fa": "fas",
- "lv": "lav",
- "bn": "ben",
- "sr": "srp",
- "az": "aze",
- "sl": "slv",
- "kn": "kan",
- "et": "est",
- "mk": "mkd",
- "br": "bre",
- "eu": "eus",
- "is": "isl",
- "hy": "hye",
- "ne": "nep",
- "mn": "mon",
- "bs": "bos",
- "kk": "kaz",
- "sq": "sqi",
- "sw": "swa",
- "gl": "glg",
- "mr": "mar",
- "pa": "pan",
- "si": "sin",
- "km": "khm",
- "sn": "sna",
- "yo": "yor",
- "so": "som",
- "af": "afr",
- "oc": "oci",
- "ka": "kat",
- "be": "bel",
- "tg": "tgk",
- "sd": "snd",
- "gu": "guj",
- "am": "amh",
- "yi": "yid",
- "lo": "lao",
- "uz": "uzb",
- "fo": "fao",
- "ht": "hat",
- "ps": "pus",
- "tk": "tuk",
- "nn": "nno",
- "mt": "mlt",
- "sa": "san",
- "lb": "ltz",
- "my": "mya",
- "bo": "bod",
- "tl": "tgl",
- "mg": "mlg",
- "as": "asm",
- "tt": "tat",
- "haw": "haw",
- "ln": "lin",
- "ha": "hau",
- "ba": "bak",
- "jw": "jav",
- "su": "sun",
-}
\ No newline at end of file
diff --git a/spaces/Dauzy/whisper-webui/app-local.py b/spaces/Dauzy/whisper-webui/app-local.py
deleted file mode 100644
index c7717d096ca5f95177f0dba03cd62ca729bae9f3..0000000000000000000000000000000000000000
--- a/spaces/Dauzy/whisper-webui/app-local.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Run the app with no audio file restrictions
-from app import create_ui
-from src.config import ApplicationConfig
-
-create_ui(ApplicationConfig.create_default(input_audio_max_duration=-1))
\ No newline at end of file
diff --git a/spaces/Demi2809/rvc-models/infer_pack/attentions.py b/spaces/Demi2809/rvc-models/infer_pack/attentions.py
deleted file mode 100644
index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000
--- a/spaces/Demi2809/rvc-models/infer_pack/attentions.py
+++ /dev/null
@@ -1,417 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from infer_pack import commons
-from infer_pack import modules
-from infer_pack.modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- window_size=10,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- window_size=window_size,
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- proximal_bias=False,
- proximal_init=True,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- proximal_bias=proximal_bias,
- proximal_init=proximal_init,
- )
- )
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(
- MultiHeadAttention(
- hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- causal=True,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
- device=x.device, dtype=x.dtype
- )
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(
- self,
- channels,
- out_channels,
- n_heads,
- p_dropout=0.0,
- window_size=None,
- heads_share=True,
- block_length=None,
- proximal_bias=False,
- proximal_init=False,
- ):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
- self.emb_rel_v = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert (
- t_s == t_t
- ), "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(
- query / math.sqrt(self.k_channels), key_relative_embeddings
- )
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(
- device=scores.device, dtype=scores.dtype
- )
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert (
- t_s == t_t
- ), "Local attention is only available for self-attention."
- block_mask = (
- torch.ones_like(scores)
- .triu(-self.block_length)
- .tril(self.block_length)
- )
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(
- self.emb_rel_v, t_s
- )
- output = output + self._matmul_with_relative_values(
- relative_weights, value_relative_embeddings
- )
- output = (
- output.transpose(2, 3).contiguous().view(b, d, t_t)
- ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
- )
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[
- :, slice_start_position:slice_end_position
- ]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(
- x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
- )
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
- :, :, :length, length - 1 :
- ]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(
- x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
- )
- x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- filter_channels,
- kernel_size,
- p_dropout=0.0,
- activation=None,
- causal=False,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/Dormin22/Proxy/greeting.md b/spaces/Dormin22/Proxy/greeting.md
deleted file mode 100644
index 9df71687dc75de4b8afd93c54cb779c720ff2ed2..0000000000000000000000000000000000000000
--- a/spaces/Dormin22/Proxy/greeting.md
+++ /dev/null
@@ -1 +0,0 @@
-Hope is the last thing ever lost.
\ No newline at end of file
diff --git a/spaces/DragGan/DragGan-Inversion/README.md b/spaces/DragGan/DragGan-Inversion/README.md
deleted file mode 100644
index 7ea7dcbb011aebb97612f542ec5a20a8e412cb0d..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/README.md
+++ /dev/null
@@ -1,77 +0,0 @@
----
-title: DragGan - Drag Your GAN - Inversion
-emoji: 🔄🐉
-colorFrom: purple
-colorTo: pink
-sdk: gradio
-python_version: 3.8.17
-sdk_version: 3.36.1
-app_file: visualizer_drag_gradio_inversion.py
-pinned: false
----
-
-
-# Drag Your GAN: Interactive Point-based Manipulation on the Generative Image Manifold
-
-https://arxiv.org/abs/2305.10973
-https://huggingface.co/DragGan/DragGan-Models
-
-
-
-
-
-**Figure:** *Drag your GAN.*
-
-> **Drag Your GAN: Interactive Point-based Manipulation on the Generative Image Manifold**
-> Xingang Pan, Ayush Tewari, Thomas Leimkühler, Lingjie Liu, Abhimitra Meka, Christian Theobalt
-> *SIGGRAPH 2023 Conference Proceedings*
-
-## Requirements
-
-Please follow the requirements of [https://github.com/NVlabs/stylegan3](https://github.com/NVlabs/stylegan3).
-
-## Download pre-trained StyleGAN2 weights
-
-To download pre-trained weights, simply run:
-```sh
-sh scripts/download_model.sh
-```
-If you want to try StyleGAN-Human and the Landscapes HQ (LHQ) dataset, please download weights from these links: [StyleGAN-Human](https://drive.google.com/file/d/1dlFEHbu-WzQWJl7nBBZYcTyo000H9hVm/view?usp=sharing), [LHQ](https://drive.google.com/file/d/16twEf0T9QINAEoMsWefoWiyhcTd-aiWc/view?usp=sharing), and put them under `./checkpoints`.
-
-Feel free to try other pretrained StyleGAN.
-
-## Run DragGAN GUI
-
-To start the DragGAN GUI, simply run:
-```sh
-sh scripts/gui.sh
-```
-
-This GUI supports editing GAN-generated images. To edit a real image, you need to first perform GAN inversion using tools like [PTI](https://github.com/danielroich/PTI). Then load the new latent code and model weights to the GUI.
-
-You can run DragGAN Gradio demo as well:
-```sh
-python visualizer_drag_gradio.py
-```
-
-## Acknowledgement
-
-This code is developed based on [StyleGAN3](https://github.com/NVlabs/stylegan3). Part of the code is borrowed from [StyleGAN-Human](https://github.com/stylegan-human/StyleGAN-Human).
-
-## License
-
-The code related to the DragGAN algorithm is licensed under [CC-BY-NC](https://creativecommons.org/licenses/by-nc/4.0/).
-However, most of this project are available under a separate license terms: all codes used or modified from [StyleGAN3](https://github.com/NVlabs/stylegan3) is under the [Nvidia Source Code License](https://github.com/NVlabs/stylegan3/blob/main/LICENSE.txt).
-
-Any form of use and derivative of this code must preserve the watermarking functionality.
-
-## BibTeX
-
-```bibtex
-@inproceedings{pan2023draggan,
- title={Drag Your GAN: Interactive Point-based Manipulation on the Generative Image Manifold},
- author={Pan, Xingang and Tewari, Ayush, and Leimk{\"u}hler, Thomas and Liu, Lingjie and Meka, Abhimitra and Theobalt, Christian},
- booktitle = {ACM SIGGRAPH 2023 Conference Proceedings},
- year={2023}
-}
-```
diff --git a/spaces/DragGan/DragGan/stylegan_human/training/dataset.py b/spaces/DragGan/DragGan/stylegan_human/training/dataset.py
deleted file mode 100644
index 68c356e3b89b63211e0b4bdde88babcffd26d59e..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan/stylegan_human/training/dataset.py
+++ /dev/null
@@ -1,238 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Streaming images and labels from datasets created with dataset_tool.py."""
-
-import os
-import numpy as np
-import zipfile
-import PIL.Image
-import json
-import torch
-import dnnlib
-
-try:
- import pyspng
-except ImportError:
- pyspng = None
-
-#----------------------------------------------------------------------------
-
-class Dataset(torch.utils.data.Dataset):
- def __init__(self,
- name, # Name of the dataset.
- raw_shape, # Shape of the raw image data (NCHW).
- max_size = None, # Artificially limit the size of the dataset. None = no limit. Applied before xflip.
- use_labels = False, # Enable conditioning labels? False = label dimension is zero.
- xflip = False, # Artificially double the size of the dataset via x-flips. Applied after max_size.
- random_seed = 0, # Random seed to use when applying max_size.
- ):
- self._name = name
- self._raw_shape = list(raw_shape)
- self._use_labels = use_labels
- self._raw_labels = None
- self._label_shape = None
-
- # Apply max_size.
- self._raw_idx = np.arange(self._raw_shape[0], dtype=np.int64)
- if (max_size is not None) and (self._raw_idx.size > max_size):
- np.random.RandomState(random_seed).shuffle(self._raw_idx)
- self._raw_idx = np.sort(self._raw_idx[:max_size])
-
- # Apply xflip.
- self._xflip = np.zeros(self._raw_idx.size, dtype=np.uint8)
- if xflip:
- self._raw_idx = np.tile(self._raw_idx, 2)
- self._xflip = np.concatenate([self._xflip, np.ones_like(self._xflip)])
-
- def _get_raw_labels(self):
- if self._raw_labels is None:
- self._raw_labels = self._load_raw_labels() if self._use_labels else None
- if self._raw_labels is None:
- self._raw_labels = np.zeros([self._raw_shape[0], 0], dtype=np.float32)
- assert isinstance(self._raw_labels, np.ndarray)
- assert self._raw_labels.shape[0] == self._raw_shape[0]
- assert self._raw_labels.dtype in [np.float32, np.int64]
- if self._raw_labels.dtype == np.int64:
- assert self._raw_labels.ndim == 1
- assert np.all(self._raw_labels >= 0)
- return self._raw_labels
-
- def close(self): # to be overridden by subclass
- pass
-
- def _load_raw_image(self, raw_idx): # to be overridden by subclass
- raise NotImplementedError
-
- def _load_raw_labels(self): # to be overridden by subclass
- raise NotImplementedError
-
- def __getstate__(self):
- return dict(self.__dict__, _raw_labels=None)
-
- def __del__(self):
- try:
- self.close()
- except:
- pass
-
- def __len__(self):
- return self._raw_idx.size
-
- def __getitem__(self, idx):
- image = self._load_raw_image(self._raw_idx[idx])
- assert isinstance(image, np.ndarray)
- assert list(image.shape) == self.image_shape
- assert image.dtype == np.uint8
- if self._xflip[idx]:
- assert image.ndim == 3 # CHW
- image = image[:, :, ::-1]
- return image.copy(), self.get_label(idx)
-
- def get_label(self, idx):
- label = self._get_raw_labels()[self._raw_idx[idx]]
- if label.dtype == np.int64:
- onehot = np.zeros(self.label_shape, dtype=np.float32)
- onehot[label] = 1
- label = onehot
- return label.copy()
-
- def get_details(self, idx):
- d = dnnlib.EasyDict()
- d.raw_idx = int(self._raw_idx[idx])
- d.xflip = (int(self._xflip[idx]) != 0)
- d.raw_label = self._get_raw_labels()[d.raw_idx].copy()
- return d
-
- @property
- def name(self):
- return self._name
-
- @property
- def image_shape(self):
- return list(self._raw_shape[1:])
-
- @property
- def num_channels(self):
- assert len(self.image_shape) == 3 # CHW
- return self.image_shape[0]
-
- @property
- def resolution(self):
- assert len(self.image_shape) == 3 # CHW
- assert self.image_shape[1] == self.image_shape[2]
- return self.image_shape[1]
-
- @property
- def label_shape(self):
- if self._label_shape is None:
- raw_labels = self._get_raw_labels()
- if raw_labels.dtype == np.int64:
- self._label_shape = [int(np.max(raw_labels)) + 1]
- else:
- self._label_shape = raw_labels.shape[1:]
- return list(self._label_shape)
-
- @property
- def label_dim(self):
- assert len(self.label_shape) == 1
- return self.label_shape[0]
-
- @property
- def has_labels(self):
- return any(x != 0 for x in self.label_shape)
-
- @property
- def has_onehot_labels(self):
- return self._get_raw_labels().dtype == np.int64
-
-#----------------------------------------------------------------------------
-
-class ImageFolderDataset(Dataset):
- def __init__(self,
- path, # Path to directory or zip.
- resolution = None, # Ensure specific resolution, None = highest available.
- **super_kwargs, # Additional arguments for the Dataset base class.
- ):
- self._path = path
- self._zipfile = None
-
- if os.path.isdir(self._path):
- self._type = 'dir'
- self._all_fnames = {os.path.relpath(os.path.join(root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files}
- elif self._file_ext(self._path) == '.zip':
- self._type = 'zip'
- self._all_fnames = set(self._get_zipfile().namelist())
- else:
- raise IOError('Path must point to a directory or zip')
-
- PIL.Image.init()
- self._image_fnames = sorted(fname for fname in self._all_fnames if self._file_ext(fname) in PIL.Image.EXTENSION)
- if len(self._image_fnames) == 0:
- raise IOError('No image files found in the specified path')
-
- name = os.path.splitext(os.path.basename(self._path))[0]
- raw_shape = [len(self._image_fnames)] + list(self._load_raw_image(0).shape)
- if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution):
- raise IOError('Image files do not match the specified resolution')
- super().__init__(name=name, raw_shape=raw_shape, **super_kwargs)
-
- @staticmethod
- def _file_ext(fname):
- return os.path.splitext(fname)[1].lower()
-
- def _get_zipfile(self):
- assert self._type == 'zip'
- if self._zipfile is None:
- self._zipfile = zipfile.ZipFile(self._path)
- return self._zipfile
-
- def _open_file(self, fname):
- if self._type == 'dir':
- return open(os.path.join(self._path, fname), 'rb')
- if self._type == 'zip':
- return self._get_zipfile().open(fname, 'r')
- return None
-
- def close(self):
- try:
- if self._zipfile is not None:
- self._zipfile.close()
- finally:
- self._zipfile = None
-
- def __getstate__(self):
- return dict(super().__getstate__(), _zipfile=None)
-
- def _load_raw_image(self, raw_idx):
- fname = self._image_fnames[raw_idx]
- with self._open_file(fname) as f:
- if pyspng is not None and self._file_ext(fname) == '.png':
- image = pyspng.load(f.read())
- else:
- image = np.array(PIL.Image.open(f))
- if image.ndim == 2:
- image = image[:, :, np.newaxis] # HW => HWC
- image = image.transpose(2, 0, 1) # HWC => CHW
- return image
-
- def _load_raw_labels(self):
- fname = 'dataset.json'
- if fname not in self._all_fnames:
- return None
- with self._open_file(fname) as f:
- labels = json.load(f)['labels']
- if labels is None:
- return None
- labels = dict(labels)
- labels = [labels[fname.replace('\\', '/')] for fname in self._image_fnames]
- labels = np.array(labels)
- labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim])
- return labels
-
-#----------------------------------------------------------------------------
diff --git a/spaces/EDGAhab/VITS-Aatrox-AI/utils.py b/spaces/EDGAhab/VITS-Aatrox-AI/utils.py
deleted file mode 100644
index b445fb65836a0b97e46426300eea9a820179797a..0000000000000000000000000000000000000000
--- a/spaces/EDGAhab/VITS-Aatrox-AI/utils.py
+++ /dev/null
@@ -1,258 +0,0 @@
-import os
-import glob
-import sys
-import argparse
-import logging
-import json
-import subprocess
-import numpy as np
-from scipy.io.wavfile import read
-import torch
-
-MATPLOTLIB_FLAG = False
-
-logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
-logger = logging
-
-
-def load_checkpoint(checkpoint_path, model, optimizer=None):
- assert os.path.isfile(checkpoint_path)
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
- iteration = checkpoint_dict['iteration']
- learning_rate = checkpoint_dict['learning_rate']
- if optimizer is not None:
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
- saved_state_dict = checkpoint_dict['model']
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict= {}
- for k, v in state_dict.items():
- try:
- new_state_dict[k] = saved_state_dict[k]
- except:
- logger.info("%s is not in the checkpoint" % k)
- new_state_dict[k] = v
- if hasattr(model, 'module'):
- model.module.load_state_dict(new_state_dict)
- else:
- model.load_state_dict(new_state_dict)
- logger.info("Loaded checkpoint '{}' (iteration {})" .format(
- checkpoint_path, iteration))
- return model, optimizer, learning_rate, iteration
-
-
-def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
- logger.info("Saving model and optimizer state at iteration {} to {}".format(
- iteration, checkpoint_path))
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- torch.save({'model': state_dict,
- 'iteration': iteration,
- 'optimizer': optimizer.state_dict(),
- 'learning_rate': learning_rate}, checkpoint_path)
-
-
-def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
- for k, v in scalars.items():
- writer.add_scalar(k, v, global_step)
- for k, v in histograms.items():
- writer.add_histogram(k, v, global_step)
- for k, v in images.items():
- writer.add_image(k, v, global_step, dataformats='HWC')
- for k, v in audios.items():
- writer.add_audio(k, v, global_step, audio_sampling_rate)
-
-
-def latest_checkpoint_path(dir_path, regex="G_*.pth"):
- f_list = glob.glob(os.path.join(dir_path, regex))
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
- x = f_list[-1]
- print(x)
- return x
-
-
-def plot_spectrogram_to_numpy(spectrogram):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(10,2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
- plt.xlabel("Frames")
- plt.ylabel("Channels")
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def plot_alignment_to_numpy(alignment, info=None):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(6, 4))
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
- interpolation='none')
- fig.colorbar(im, ax=ax)
- xlabel = 'Decoder timestep'
- if info is not None:
- xlabel += '\n\n' + info
- plt.xlabel(xlabel)
- plt.ylabel('Encoder timestep')
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def load_wav_to_torch(full_path):
- sampling_rate, data = read(full_path)
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
-
-
-def load_filepaths_and_text(filename, split="|"):
- with open(filename, encoding='utf-8') as f:
- filepaths_and_text = [line.strip().split(split) for line in f]
- return filepaths_and_text
-
-
-def get_hparams(init=True):
- parser = argparse.ArgumentParser()
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
- help='JSON file for configuration')
- parser.add_argument('-m', '--model', type=str, required=True,
- help='Model name')
-
- args = parser.parse_args()
- model_dir = os.path.join("./logs", args.model)
-
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
-
- config_path = args.config
- config_save_path = os.path.join(model_dir, "config.json")
- if init:
- with open(config_path, "r") as f:
- data = f.read()
- with open(config_save_path, "w") as f:
- f.write(data)
- else:
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_dir(model_dir):
- config_save_path = os.path.join(model_dir, "config.json")
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_file(config_path):
- with open(config_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- return hparams
-
-
-def check_git_hash(model_dir):
- source_dir = os.path.dirname(os.path.realpath(__file__))
- if not os.path.exists(os.path.join(source_dir, ".git")):
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
- source_dir
- ))
- return
-
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
-
- path = os.path.join(model_dir, "githash")
- if os.path.exists(path):
- saved_hash = open(path).read()
- if saved_hash != cur_hash:
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
- saved_hash[:8], cur_hash[:8]))
- else:
- open(path, "w").write(cur_hash)
-
-
-def get_logger(model_dir, filename="train.log"):
- global logger
- logger = logging.getLogger(os.path.basename(model_dir))
- logger.setLevel(logging.DEBUG)
-
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- h = logging.FileHandler(os.path.join(model_dir, filename))
- h.setLevel(logging.DEBUG)
- h.setFormatter(formatter)
- logger.addHandler(h)
- return logger
-
-
-class HParams():
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- if type(v) == dict:
- v = HParams(**v)
- self[k] = v
-
- def keys(self):
- return self.__dict__.keys()
-
- def items(self):
- return self.__dict__.items()
-
- def values(self):
- return self.__dict__.values()
-
- def __len__(self):
- return len(self.__dict__)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __setitem__(self, key, value):
- return setattr(self, key, value)
-
- def __contains__(self, key):
- return key in self.__dict__
-
- def __repr__(self):
- return self.__dict__.__repr__()
diff --git a/spaces/Eddycrack864/Applio-Inference/lib/infer_pack/attentions.py b/spaces/Eddycrack864/Applio-Inference/lib/infer_pack/attentions.py
deleted file mode 100644
index 05501be1871643f78dddbeaa529c96667031a8db..0000000000000000000000000000000000000000
--- a/spaces/Eddycrack864/Applio-Inference/lib/infer_pack/attentions.py
+++ /dev/null
@@ -1,417 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from lib.infer_pack import commons
-from lib.infer_pack import modules
-from lib.infer_pack.modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- window_size=10,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- window_size=window_size,
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- proximal_bias=False,
- proximal_init=True,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- proximal_bias=proximal_bias,
- proximal_init=proximal_init,
- )
- )
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(
- MultiHeadAttention(
- hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- causal=True,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
- device=x.device, dtype=x.dtype
- )
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(
- self,
- channels,
- out_channels,
- n_heads,
- p_dropout=0.0,
- window_size=None,
- heads_share=True,
- block_length=None,
- proximal_bias=False,
- proximal_init=False,
- ):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
- self.emb_rel_v = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert (
- t_s == t_t
- ), "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(
- query / math.sqrt(self.k_channels), key_relative_embeddings
- )
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(
- device=scores.device, dtype=scores.dtype
- )
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert (
- t_s == t_t
- ), "Local attention is only available for self-attention."
- block_mask = (
- torch.ones_like(scores)
- .triu(-self.block_length)
- .tril(self.block_length)
- )
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(
- self.emb_rel_v, t_s
- )
- output = output + self._matmul_with_relative_values(
- relative_weights, value_relative_embeddings
- )
- output = (
- output.transpose(2, 3).contiguous().view(b, d, t_t)
- ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
- )
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[
- :, slice_start_position:slice_end_position
- ]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(
- x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
- )
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
- :, :, :length, length - 1 :
- ]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(
- x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
- )
- x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- filter_channels,
- kernel_size,
- p_dropout=0.0,
- activation=None,
- causal=False,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/Egrt/GCycleGAN/nets/resnest/resnest.py b/spaces/Egrt/GCycleGAN/nets/resnest/resnest.py
deleted file mode 100644
index a9fe5f79d347349aeb1db8ed5af5f2f8415a8b4d..0000000000000000000000000000000000000000
--- a/spaces/Egrt/GCycleGAN/nets/resnest/resnest.py
+++ /dev/null
@@ -1,60 +0,0 @@
-"""
-@author: Jun Wang
-@date: 20210301
-@contact: jun21wangustc@gmail.com
-"""
-
-# based on:
-# https://github.com/zhanghang1989/ResNeSt/blob/master/resnest/torch/resnest.py
-
-import torch
-import torch.nn as nn
-from .resnet import ResNet, Bottleneck
-
-class Flatten(nn.Module):
- def forward(self, input):
- return input.view(input.size(0), -1)
-
-def l2_norm(input,axis=1):
- norm = torch.norm(input,2,axis,True)
- output = torch.div(input, norm)
- return output
-
-class ResNeSt(nn.Module):
- def __init__(self, num_layers=50, drop_ratio=0.4, feat_dim=512, out_h=7, out_w=7):
- super(ResNeSt, self).__init__()
- self.input_layer = nn.Sequential(nn.Conv2d(3, 64, (3, 3), 1, 1 ,bias=False),
- nn.BatchNorm2d(64),
- nn.PReLU(64))
- self.output_layer = nn.Sequential(nn.BatchNorm2d(2048),
- nn.Dropout(drop_ratio),
- Flatten(),
- nn.Linear(2048 * out_h * out_w, feat_dim),
- nn.BatchNorm1d(feat_dim))
- if num_layers == 50:
- self.body = ResNet(Bottleneck, [3, 4, 6, 3],
- radix=2, groups=1, bottleneck_width=64,
- deep_stem=True, stem_width=32, avg_down=True,
- avd=True, avd_first=False)
- elif num_layers == 101:
- self.body = ResNet(Bottleneck, [3, 4, 23, 3],
- radix=2, groups=1, bottleneck_width=64,
- deep_stem=True, stem_width=64, avg_down=True,
- avd=True, avd_first=False)
- elif num_layers == 200:
- self.body = ResNet(Bottleneck, [3, 24, 36, 3],
- radix=2, groups=1, bottleneck_width=64,
- deep_stem=True, stem_width=64, avg_down=True,
- avd=True, avd_first=False)
- elif num_layers == 269:
- self.body = ResNet(Bottleneck, [3, 30, 48, 8],
- radix=2, groups=1, bottleneck_width=64,
- deep_stem=True, stem_width=64, avg_down=True,
- avd=True, avd_first=False)
- else:
- pass
- def forward(self, x):
- x = self.input_layer(x)
- x = self.body(x)
- x = self.output_layer(x)
- return l2_norm(x)
diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/train.py b/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/train.py
deleted file mode 100644
index a01c0dfccdb8b02283100ec5b792c33afaf22f5e..0000000000000000000000000000000000000000
--- a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/train.py
+++ /dev/null
@@ -1,225 +0,0 @@
-import argparse
-import datetime
-import logging
-import math
-import copy
-import random
-import time
-import torch
-from os import path as osp
-
-from basicsr.data import build_dataloader, build_dataset
-from basicsr.data.data_sampler import EnlargedSampler
-from basicsr.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher
-from basicsr.models import build_model
-from basicsr.utils import (MessageLogger, check_resume, get_env_info, get_root_logger, init_tb_logger,
- init_wandb_logger, make_exp_dirs, mkdir_and_rename, set_random_seed)
-from basicsr.utils.dist_util import get_dist_info, init_dist
-from basicsr.utils.options import dict2str, parse
-
-import warnings
-# ignore UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`.
-warnings.filterwarnings("ignore", category=UserWarning)
-
-def parse_options(root_path, is_train=True):
- parser = argparse.ArgumentParser()
- parser.add_argument('-opt', type=str, required=True, help='Path to option YAML file.')
- parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none', help='job launcher')
- parser.add_argument('--local_rank', type=int, default=0)
- args = parser.parse_args()
- opt = parse(args.opt, root_path, is_train=is_train)
-
- # distributed settings
- if args.launcher == 'none':
- opt['dist'] = False
- print('Disable distributed.', flush=True)
- else:
- opt['dist'] = True
- if args.launcher == 'slurm' and 'dist_params' in opt:
- init_dist(args.launcher, **opt['dist_params'])
- else:
- init_dist(args.launcher)
-
- opt['rank'], opt['world_size'] = get_dist_info()
-
- # random seed
- seed = opt.get('manual_seed')
- if seed is None:
- seed = random.randint(1, 10000)
- opt['manual_seed'] = seed
- set_random_seed(seed + opt['rank'])
-
- return opt
-
-
-def init_loggers(opt):
- log_file = osp.join(opt['path']['log'], f"train_{opt['name']}.log")
- logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file)
- logger.info(get_env_info())
- logger.info(dict2str(opt))
-
- # initialize wandb logger before tensorboard logger to allow proper sync:
- if (opt['logger'].get('wandb') is not None) and (opt['logger']['wandb'].get('project') is not None):
- assert opt['logger'].get('use_tb_logger') is True, ('should turn on tensorboard when using wandb')
- init_wandb_logger(opt)
- tb_logger = None
- if opt['logger'].get('use_tb_logger'):
- tb_logger = init_tb_logger(log_dir=osp.join('tb_logger', opt['name']))
- return logger, tb_logger
-
-
-def create_train_val_dataloader(opt, logger):
- # create train and val dataloaders
- train_loader, val_loader = None, None
- for phase, dataset_opt in opt['datasets'].items():
- if phase == 'train':
- dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1)
- train_set = build_dataset(dataset_opt)
- train_sampler = EnlargedSampler(train_set, opt['world_size'], opt['rank'], dataset_enlarge_ratio)
- train_loader = build_dataloader(
- train_set,
- dataset_opt,
- num_gpu=opt['num_gpu'],
- dist=opt['dist'],
- sampler=train_sampler,
- seed=opt['manual_seed'])
-
- num_iter_per_epoch = math.ceil(
- len(train_set) * dataset_enlarge_ratio / (dataset_opt['batch_size_per_gpu'] * opt['world_size']))
- total_iters = int(opt['train']['total_iter'])
- total_epochs = math.ceil(total_iters / (num_iter_per_epoch))
- logger.info('Training statistics:'
- f'\n\tNumber of train images: {len(train_set)}'
- f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}'
- f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}'
- f'\n\tWorld size (gpu number): {opt["world_size"]}'
- f'\n\tRequire iter number per epoch: {num_iter_per_epoch}'
- f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.')
-
- elif phase == 'val':
- val_set = build_dataset(dataset_opt)
- val_loader = build_dataloader(
- val_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed'])
- logger.info(f'Number of val images/folders in {dataset_opt["name"]}: ' f'{len(val_set)}')
- else:
- raise ValueError(f'Dataset phase {phase} is not recognized.')
-
- return train_loader, train_sampler, val_loader, total_epochs, total_iters
-
-
-def train_pipeline(root_path):
- # parse options, set distributed setting, set ramdom seed
- opt = parse_options(root_path, is_train=True)
-
- torch.backends.cudnn.benchmark = True
- # torch.backends.cudnn.deterministic = True
-
- # load resume states if necessary
- if opt['path'].get('resume_state'):
- device_id = torch.cuda.current_device()
- resume_state = torch.load(
- opt['path']['resume_state'], map_location=lambda storage, loc: storage.cuda(device_id))
- else:
- resume_state = None
-
- # mkdir for experiments and logger
- if resume_state is None:
- make_exp_dirs(opt)
- if opt['logger'].get('use_tb_logger') and opt['rank'] == 0:
- mkdir_and_rename(osp.join('tb_logger', opt['name']))
-
- # initialize loggers
- logger, tb_logger = init_loggers(opt)
-
- # create train and validation dataloaders
- result = create_train_val_dataloader(opt, logger)
- train_loader, train_sampler, val_loader, total_epochs, total_iters = result
-
- # create model
- if resume_state: # resume training
- check_resume(opt, resume_state['iter'])
- model = build_model(opt)
- model.resume_training(resume_state) # handle optimizers and schedulers
- logger.info(f"Resuming training from epoch: {resume_state['epoch']}, " f"iter: {resume_state['iter']}.")
- start_epoch = resume_state['epoch']
- current_iter = resume_state['iter']
- else:
- model = build_model(opt)
- start_epoch = 0
- current_iter = 0
-
- # create message logger (formatted outputs)
- msg_logger = MessageLogger(opt, current_iter, tb_logger)
-
- # dataloader prefetcher
- prefetch_mode = opt['datasets']['train'].get('prefetch_mode')
- if prefetch_mode is None or prefetch_mode == 'cpu':
- prefetcher = CPUPrefetcher(train_loader)
- elif prefetch_mode == 'cuda':
- prefetcher = CUDAPrefetcher(train_loader, opt)
- logger.info(f'Use {prefetch_mode} prefetch dataloader')
- if opt['datasets']['train'].get('pin_memory') is not True:
- raise ValueError('Please set pin_memory=True for CUDAPrefetcher.')
- else:
- raise ValueError(f'Wrong prefetch_mode {prefetch_mode}.' "Supported ones are: None, 'cuda', 'cpu'.")
-
- # training
- logger.info(f'Start training from epoch: {start_epoch}, iter: {current_iter+1}')
- data_time, iter_time = time.time(), time.time()
- start_time = time.time()
-
- for epoch in range(start_epoch, total_epochs + 1):
- train_sampler.set_epoch(epoch)
- prefetcher.reset()
- train_data = prefetcher.next()
-
- while train_data is not None:
- data_time = time.time() - data_time
-
- current_iter += 1
- if current_iter > total_iters:
- break
- # update learning rate
- model.update_learning_rate(current_iter, warmup_iter=opt['train'].get('warmup_iter', -1))
- # training
- model.feed_data(train_data)
- model.optimize_parameters(current_iter)
- iter_time = time.time() - iter_time
- # log
- if current_iter % opt['logger']['print_freq'] == 0:
- log_vars = {'epoch': epoch, 'iter': current_iter}
- log_vars.update({'lrs': model.get_current_learning_rate()})
- log_vars.update({'time': iter_time, 'data_time': data_time})
- log_vars.update(model.get_current_log())
- msg_logger(log_vars)
-
- # save models and training states
- if current_iter % opt['logger']['save_checkpoint_freq'] == 0:
- logger.info('Saving models and training states.')
- model.save(epoch, current_iter)
-
- # validation
- if opt.get('val') is not None and opt['datasets'].get('val') is not None \
- and (current_iter % opt['val']['val_freq'] == 0):
- model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img'])
-
- data_time = time.time()
- iter_time = time.time()
- train_data = prefetcher.next()
- # end of iter
-
- # end of epoch
-
- consumed_time = str(datetime.timedelta(seconds=int(time.time() - start_time)))
- logger.info(f'End of training. Time consumed: {consumed_time}')
- logger.info('Save the latest model.')
- model.save(epoch=-1, current_iter=-1) # -1 stands for the latest
- if opt.get('val') is not None and opt['datasets'].get('val'):
- model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img'])
- if tb_logger:
- tb_logger.close()
-
-
-if __name__ == '__main__':
- root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
- train_pipeline(root_path)
diff --git a/spaces/FelixLuoX/stable_diffusion_test/app.py b/spaces/FelixLuoX/stable_diffusion_test/app.py
deleted file mode 100644
index b6f79bfcd44575ec329b81661e66b911cf660983..0000000000000000000000000000000000000000
--- a/spaces/FelixLuoX/stable_diffusion_test/app.py
+++ /dev/null
@@ -1,494 +0,0 @@
-import gradio as gr
-import io
-from PIL import Image, PngImagePlugin
-import base64
-import requests
-import json
-import ui_functions as uifn
-
-txt2img_defaults = {
- 'prompt': '',
- 'ddim_steps': 50,
- 'toggles': [1, 2, 3],
- 'sampler_name': 'k_lms',
- 'ddim_eta': 0.0,
- 'n_iter': 1,
- 'batch_size': 1,
- 'cfg_scale': 7.5,
- 'seed': '',
- 'height': 512,
- 'width': 512,
- 'fp': None,
- 'variant_amount': 0.0,
- 'variant_seed': '',
- 'submit_on_enter': 'Yes',
-}
-
-img2img_defaults = {
- 'prompt': '',
- 'ddim_steps': 50,
- 'toggles': [1, 4, 5],
- 'sampler_name': 'k_lms',
- 'ddim_eta': 0.0,
- 'n_iter': 1,
- 'batch_size': 1,
- 'cfg_scale': 5.0,
- 'denoising_strength': 0.75,
- 'mask_mode': 1,
- 'resize_mode': 0,
- 'seed': '',
- 'height': 512,
- 'width': 512,
- 'fp': None,
-}
-sample_img2img = None
-job_manager = None
-RealESRGAN = True
-show_embeddings = False
-
-img2img_resize_modes = [
- "Just resize",
- "Crop and resize",
- "Resize and fill",
-]
-
-img2img_toggles = [
- 'Create prompt matrix (separate multiple prompts using |, and get all combinations of them)',
- 'Normalize Prompt Weights (ensure sum of weights add up to 1.0)',
- 'Loopback (use images from previous batch when creating next batch)',
- 'Random loopback seed',
- 'Save individual images',
- 'Save grid',
- 'Sort samples by prompt',
- 'Write sample info files',
- 'Write sample info to one file',
- 'jpg samples',
-]
-
-img2img_toggle_defaults = [img2img_toggles[i] for i in img2img_defaults['toggles']]
-
-def read_content(file_path: str) -> str:
- """read the content of target file
- """
- with open(file_path, 'r', encoding='utf-8') as f:
- content = f.read()
-
- return content
-
-def base2picture(resbase64):
- res=resbase64.split(',')[1]
- img_b64decode = base64.b64decode(res)
- image = io.BytesIO(img_b64decode)
- img = Image.open(image)
- return img
-
-def filter_content(raw_style: str):
- if "(" in raw_style:
- i = raw_style.index("(")
- else :
- i = -1
-
- if i == -1:
- return raw_style
- else :
- return raw_style[:i]
-
-def request_images(raw_text, class_draw, style_draw, batch_size):
- if filter_content(class_draw) != "国画":
- if filter_content(class_draw) != "通用":
- raw_text = raw_text + f",{filter_content(class_draw)}"
-
- for sty in style_draw:
- raw_text = raw_text + f",{filter_content(sty)}"
- print(f"raw text is {raw_text}")
- url = "http://flagart.baai.ac.cn/api/general/"
- elif filter_content(class_draw) == "国画":
- if raw_text.endswith("国画"):
- pass
- else :
- raw_text = raw_text + ",国画"
- url = "http://flagart.baai.ac.cn/api/guohua/"
-
- d = {"data":[raw_text, batch_size]}
- r = requests.post(url, json=d, headers={"Content-Type": "application/json", "Accept": "*/*", "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive"})
- result_text = r.text
- content = json.loads(result_text)["data"][0]
- images = []
- for i in range(batch_size):
- # print(content[i])
- images.append(base2picture(content[i]))
-
- return images
-
-def call_JS(sd_method, **kwargs):
- param_str = json.dumps(kwargs)
- return f"async (...x) => {{ return await SD.{sd_method}({{ x, ...{param_str} }}) ?? []; }}"
-
-def encode_pil_to_base64(pil_image):
- with io.BytesIO() as output_bytes:
-
- # Copy any text-only metadata
- use_metadata = False
- metadata = PngImagePlugin.PngInfo()
- for key, value in pil_image.info.items():
- if isinstance(key, str) and isinstance(value, str):
- metadata.add_text(key, value)
- use_metadata = True
-
- pil_image.save(
- output_bytes, "PNG", pnginfo=(metadata if use_metadata else None)
- )
- bytes_data = output_bytes.getvalue()
- base64_str = str(base64.b64encode(bytes_data), "utf-8")
- return "data:image/png;base64," + base64_str
-
-def img2img(*args):
-
- # 处理image
- for i, item in enumerate(args):
- # print(type(item))
- if type(item) == dict:
- args[i]['image'] = encode_pil_to_base64(item['image'])
- args[i]['mask'] = encode_pil_to_base64(item['mask'])
- else:
- print(i,type(item))
- print(item)
-
- batch_size = args[8]
-
- url = "http://flagart.baai.ac.cn/api/img2img/"
- d = {"data":args}
- r = requests.post(url, json=d, headers={"Content-Type": "application/json", "Accept": "*/*", "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive"})
- print(r)
- result_text = r.text
- content = json.loads(result_text)["data"][0]
- images = []
- for i in range(batch_size):
- # print(content[i])
- images.append(base2picture(content[i]))
- # content = json.loads(result_text)
- # print(result_text)
- # print("服务器已经把东西返回来啦!!!!!!!乌拉乌拉!!!!!")
- return images
-
-
-examples = [
- '水墨蝴蝶和牡丹花,国画',
- '苍劲有力的墨竹,国画',
- '暴风雨中的灯塔',
- '机械小松鼠,科学幻想',
- '中国水墨山水画,国画',
- "Lighthouse in the storm",
- "A dog",
- "Landscape by 张大千",
- "A tiger 长了兔子耳朵",
- "A baby bird 铅笔素描",
-
-]
-
-if __name__ == "__main__":
- block = gr.Blocks(css=read_content('style.css'))
-
- with block:
- # gr.HTML(read_content("header.html"))
-
- with gr.Tabs() as tabs:
-
- with gr.TabItem("文生图(Text-to-img)"):
-
- with gr.Group():
- with gr.Box():
- with gr.Row().style(mobile_collapse=False, equal_height=True):
- text = gr.Textbox(
- label="Prompt",
- show_label=False,
- max_lines=1,
- placeholder="Input text(输入文字)",
- interactive=True,
- ).style(
- border=(True, False, True, True),
- rounded=(True, False, False, True),
- container=False,
- )
-
- btn = gr.Button("Generate image").style(
- margin=False,
- rounded=(True, True, True, True),
- )
- with gr.Row().style(mobile_collapse=False, equal_height=True):
- class_draw = gr.Dropdown(["通用(general)", "国画(traditional Chinese painting)",
- "照片,摄影(picture photography)", "油画(oil painting)",
- "铅笔素描(pencil sketch)", "CG",
- "水彩画(watercolor painting)", "水墨画(ink and wash)",
- "插画(illustrations)", "3D", "图生图(img2img)"],
- label="生成类型(type)",
- show_label=True,
- value="通用(general)")
- with gr.Row().style(mobile_collapse=False, equal_height=True):
- style_draw = gr.CheckboxGroup(["蒸汽朋克(steampunk)", "电影摄影风格(film photography)",
- "概念艺术(concept art)", "Warming lighting",
- "Dramatic lighting", "Natural lighting",
- "虚幻引擎(unreal engine)", "4k", "8k",
- "充满细节(full details)"],
- label="画面风格(style)",
- show_label=True,
- )
- with gr.Row().style(mobile_collapse=False, equal_height=True):
- sample_size = gr.Slider(minimum=1,
- maximum=4,
- step=1,
- label="生成数量(number)",
- show_label=True,
- interactive=True,
- )
-
- gallery = gr.Gallery(
- label="Generated images", show_label=False, elem_id="gallery"
- ).style(grid=[2], height="auto")
-
- gr.Examples(examples=examples, fn=request_images, inputs=text, outputs=gallery, examples_per_page=100)
- text.submit(request_images, inputs=[text, class_draw, style_draw, sample_size], outputs=gallery)
- btn.click(request_images, inputs=[text, class_draw, style_draw, sample_size], outputs=gallery)
-
- with gr.TabItem("图生图(Img-to-Img)", id="img2img_tab"):
- with gr.Row(elem_id="prompt_row"):
- img2img_prompt = gr.Textbox(label="Prompt",
- elem_id='img2img_prompt_input',
- placeholder="神奇的森林,流淌的河流.",
- lines=1,
- max_lines=1 if txt2img_defaults['submit_on_enter'] == 'Yes' else 25,
- value=img2img_defaults['prompt'],
- show_label=False).style()
-
- img2img_btn_mask = gr.Button("Generate", variant="primary", visible=False,
- elem_id="img2img_mask_btn")
- img2img_btn_editor = gr.Button("Generate", variant="primary", elem_id="img2img_edit_btn")
- with gr.Row().style(equal_height=False):
- with gr.Column():
- gr.Markdown('#### 输入图像')
- img2img_image_mask = gr.Image(
- value=sample_img2img,
- source="upload",
- interactive=True,
- tool="sketch",
- type='pil',
- elem_id="img2img_mask",
- image_mode="RGBA"
- )
- img2img_image_editor = gr.Image(
- value=sample_img2img,
- source="upload",
- interactive=True,
- tool="select",
- type='pil',
- visible=False,
- image_mode="RGBA",
- elem_id="img2img_editor"
- )
-
- with gr.Tabs():
- with gr.TabItem("编辑设置"):
- with gr.Row():
- # disable Uncrop for now
- choices=["Mask", "Crop", "Uncrop"]
- img2img_image_editor_mode = gr.Radio(choices=["Mask"],
- label="编辑模式",
- value="Mask", elem_id='edit_mode_select',
- visible=True)
- img2img_mask = gr.Radio(choices=["保留mask区域", "生成mask区域"],
- label="Mask 方式",
- #value=img2img_mask_modes[img2img_defaults['mask_mode']],
- value = "生成mask区域",
- visible=True)
-
- img2img_mask_blur_strength = gr.Slider(minimum=1, maximum=10, step=1,
- label="How much blurry should the mask be? (to avoid hard edges)",
- value=3, visible=False)
-
- img2img_resize = gr.Radio(label="Resize mode",
- choices=["Just resize", "Crop and resize",
- "Resize and fill"],
- value=img2img_resize_modes[
- img2img_defaults['resize_mode']], visible=False)
-
- img2img_painterro_btn = gr.Button("Advanced Editor",visible=False)
- # with gr.TabItem("Hints",visible=False):
- # img2img_help = gr.Markdown(visible=False, value=uifn.help_text)
-
- with gr.Column():
- gr.Markdown('#### 编辑后的图片')
- output_img2img_gallery = gr.Gallery(label="Images", elem_id="img2img_gallery_output").style(
- grid=[4, 4, 4])
- img2img_job_ui = job_manager.draw_gradio_ui() if job_manager else None
- with gr.Column(visible=False):
- with gr.Tabs(visible=False):
- with gr.TabItem("", id="img2img_actions_tab",visible=False):
- gr.Markdown("Select an image, then press one of the buttons below")
- with gr.Row():
- output_img2img_copy_to_clipboard_btn = gr.Button("Copy to clipboard")
- output_img2img_copy_to_input_btn = gr.Button("Push to img2img input")
- output_img2img_copy_to_mask_btn = gr.Button("Push to img2img input mask")
-
- gr.Markdown("Warning: This will clear your current image and mask settings!")
- with gr.TabItem("", id="img2img_output_info_tab",visible=False):
- output_img2img_params = gr.Textbox(label="Generation parameters")
- with gr.Row():
- output_img2img_copy_params = gr.Button("Copy full parameters").click(
- inputs=output_img2img_params, outputs=[],
- _js='(x) => {navigator.clipboard.writeText(x.replace(": ",":"))}', fn=None,
- show_progress=False)
- output_img2img_seed = gr.Number(label='Seed', interactive=False, visible=False)
- output_img2img_copy_seed = gr.Button("Copy only seed").click(
- inputs=output_img2img_seed, outputs=[],
- _js=call_JS("gradioInputToClipboard"), fn=None, show_progress=False)
- output_img2img_stats = gr.HTML(label='Stats')
-
- gr.Markdown('# 编辑设置')
-
- with gr.Row():
- with gr.Column():
- img2img_width = gr.Slider(minimum=64, maximum=2048, step=64, label="图片宽度",
- value=img2img_defaults["width"])
- img2img_height = gr.Slider(minimum=64, maximum=2048, step=64, label="图片高度",
- value=img2img_defaults["height"])
- img2img_cfg = gr.Slider(minimum=-40.0, maximum=30.0, step=0.5,
- label='文本引导强度',
- value=img2img_defaults['cfg_scale'], elem_id='cfg_slider')
- img2img_seed = gr.Textbox(label="随机种子", lines=1, max_lines=1,
- value=img2img_defaults["seed"])
- img2img_batch_count = gr.Slider(minimum=1, maximum=50, step=1,
- label='生成数量',
- value=img2img_defaults['n_iter'])
- img2img_dimensions_info_text_box = gr.Textbox(
- label="长宽比设置")
- with gr.Column():
- img2img_steps = gr.Slider(minimum=1, maximum=250, step=1, label="采样步数",
- value=img2img_defaults['ddim_steps'])
-
- img2img_sampling = gr.Dropdown(label='采样方式',
- choices=["DDIM", 'k_dpm_2_a', 'k_dpm_2', 'k_euler_a', 'k_euler',
- 'k_heun', 'k_lms'],
- value=img2img_defaults['sampler_name'])
-
- img2img_denoising = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising Strength',
- value=img2img_defaults['denoising_strength'],visible=False)
-
- img2img_toggles = gr.CheckboxGroup(label='', choices=img2img_toggles,
- value=img2img_toggle_defaults,visible=False)
-
- img2img_realesrgan_model_name = gr.Dropdown(label='RealESRGAN model',
- choices=['RealESRGAN_x4plus',
- 'RealESRGAN_x4plus_anime_6B'],
- value='RealESRGAN_x4plus',
- visible=RealESRGAN is not None) # TODO: Feels like I shouldnt slot it in here.
-
- img2img_embeddings = gr.File(label="Embeddings file for textual inversion",
- visible=show_embeddings)
-
- img2img_image_editor_mode.change(
- uifn.change_image_editor_mode,
- [img2img_image_editor_mode,
- img2img_image_editor,
- img2img_image_mask,
- img2img_resize,
- img2img_width,
- img2img_height
- ],
- [img2img_image_editor, img2img_image_mask, img2img_btn_editor, img2img_btn_mask,
- img2img_painterro_btn, img2img_mask, img2img_mask_blur_strength]
- )
-
- # img2img_image_editor_mode.change(
- # uifn.update_image_mask,
- # [img2img_image_editor, img2img_resize, img2img_width, img2img_height],
- # img2img_image_mask
- # )
-
- # output_txt2img_copy_to_input_btn.click(
- # uifn.copy_img_to_input,
- # [output_txt2img_gallery],
- # [img2img_image_editor, img2img_image_mask, tabs],
- # _js=call_JS("moveImageFromGallery",
- # fromId="txt2img_gallery_output",
- # toId="img2img_editor")
- # )
-
- output_img2img_copy_to_input_btn.click(
- uifn.copy_img_to_edit,
- [output_img2img_gallery],
- [img2img_image_editor, tabs, img2img_image_editor_mode],
- _js=call_JS("moveImageFromGallery",
- fromId="img2img_gallery_output",
- toId="img2img_editor")
- )
- output_img2img_copy_to_mask_btn.click(
- uifn.copy_img_to_mask,
- [output_img2img_gallery],
- [img2img_image_mask, tabs, img2img_image_editor_mode],
- _js=call_JS("moveImageFromGallery",
- fromId="img2img_gallery_output",
- toId="img2img_editor")
- )
-
- output_img2img_copy_to_clipboard_btn.click(fn=None, inputs=output_img2img_gallery, outputs=[],
- _js=call_JS("copyImageFromGalleryToClipboard",
- fromId="img2img_gallery_output")
- )
-
- img2img_func = img2img
- img2img_inputs = [img2img_prompt, img2img_image_editor_mode, img2img_mask,
- img2img_mask_blur_strength, img2img_steps, img2img_sampling, img2img_toggles,
- img2img_realesrgan_model_name, img2img_batch_count, img2img_cfg,
- img2img_denoising, img2img_seed, img2img_height, img2img_width, img2img_resize,
- img2img_image_mask]
- # img2img_outputs = [output_img2img_gallery, output_img2img_seed, output_img2img_params,
- # output_img2img_stats]
-
- img2img_outputs = [output_img2img_gallery]
-
- # If a JobManager was passed in then wrap the Generate functions
- if img2img_job_ui:
- img2img_func, img2img_inputs, img2img_outputs = img2img_job_ui.wrap_func(
- func=img2img_func,
- inputs=img2img_inputs,
- outputs=img2img_outputs,
- )
-
- img2img_btn_mask.click(
- img2img_func,
- img2img_inputs,
- img2img_outputs
- )
-
- def img2img_submit_params():
- # print([img2img_prompt, img2img_image_editor_mode, img2img_mask,
- # img2img_mask_blur_strength, img2img_steps, img2img_sampling, img2img_toggles,
- # img2img_realesrgan_model_name, img2img_batch_count, img2img_cfg,
- # img2img_denoising, img2img_seed, img2img_height, img2img_width, img2img_resize,
- # img2img_image_editor, img2img_image_mask, img2img_embeddings])
- return (img2img_func,
- img2img_inputs,
- img2img_outputs)
-
- img2img_btn_editor.click(*img2img_submit_params())
-
- # GENERATE ON ENTER
- img2img_prompt.submit(None, None, None,
- _js=call_JS("clickFirstVisibleButton",
- rowId="prompt_row"))
-
- img2img_painterro_btn.click(None,
- [img2img_image_editor, img2img_image_mask, img2img_image_editor_mode],
- [img2img_image_editor, img2img_image_mask],
- _js=call_JS("Painterro.init", toId="img2img_editor")
- )
-
- img2img_width.change(fn=uifn.update_dimensions_info, inputs=[img2img_width, img2img_height],
- outputs=img2img_dimensions_info_text_box)
- img2img_height.change(fn=uifn.update_dimensions_info, inputs=[img2img_width, img2img_height],
- outputs=img2img_dimensions_info_text_box)
-
- # gr.HTML(read_content("footer.html"))
- # gr.Image('./contributors.png')
-
- block.queue(max_size=50, concurrency_count=20).launch()
\ No newline at end of file
diff --git a/spaces/Fernando22/freegpt-webui/README.md b/spaces/Fernando22/freegpt-webui/README.md
deleted file mode 100644
index 35279f351cdd06266746ff798cdb9bac48681082..0000000000000000000000000000000000000000
--- a/spaces/Fernando22/freegpt-webui/README.md
+++ /dev/null
@@ -1,195 +0,0 @@
----
-title: FreeGPT WebUI
-emoji: 🚀
-colorFrom: blue
-colorTo: yellow
-sdk: docker
-sdk_version: 1.24.0
-app_file: run.py
-pinned: true
-app_port: 1338
-duplicated_from: monra/freegpt-webui
----
-
-# FreeGPT WebUI
-## GPT 3.5/4
-
-NOT REQUIRE ANY API KEY ❌🔑
-
-This project features a WebUI utilizing the [G4F API](https://github.com/xtekky/gpt4free).
-Experience the power of ChatGPT with a user-friendly interface, enhanced jailbreaks, and completely free.
-
-## Known bugs 🚧
-- Stream mode not working properly.
-
-## News 📢
-I have created a new version of FreeGPT WebUI using the [ChimeraGPT API](https://chimeragpt.adventblocks.cc/).
-
-
-This free API allows you to use various AI chat models, including GPT-4, GPT-4-32k, Claude-2, Claude-2-100k, and more.
-Check out the project here: [FreeGPT WebUI - Chimera Version](https://github.com/ramonvc/freegpt-webui/tree/chimeragpt-version).
-
-## Project Hosting and Demonstration 🌐🚀
-The project is hosted on multiple platforms to be tested and modified.
-|Plataform|Status|API Key|Free|Repo|Demo|
-|--|--|--|--|--|--|
-|[replit](https://replit.com/)||◼️|☑️|[FreeGPT WebUI](https://replit.com/@ramonvc/freegpt-webui)|[Chat](https://freegpt-webui.ramonvc.repl.co/chat/)
-|[hugging face](https://huggingface.co)||◼️|☑️|[FreeGPT WebUI](https://huggingface.co/spaces/monra/freegpt-webui/tree/main)|[Chat](https://huggingface.co/spaces/monra/freegpt-webui)
-|[replit](https://replit.com/)||☑️|☑️|[FreeGPT WebUI - Chimera Version](https://replit.com/@ramonvc/freegpt-webui-chimera)|[Chat](https://freegpt-webui-chimera.ramonvc.repl.co/chat/)
-
-## Note ℹ️
-
- FreeGPT is a project that utilizes various free AI conversation API Providers. Each Provider is an API that provides responses generated by different AI models. The source code related to these services is available in G4F folder.
-
-It is important to note that, due to the extensive reach of this project, the free services registered here may receive a significant number of requests, which can result in temporary unavailability or access limitations. Therefore, it is common to encounter these services being offline or unstable.
-
-We recommend that you search for your own Providers and add them to your personal projects to avoid service instability and unavailability. Within the project, in the Providers folder, you will find several examples of Providers that have worked in the past or are still functioning. It is easy to follow the logic of these examples to find free GPT services and incorporate the requests into your specific FreeGPT project.
-
-Please note that the choice and integration of additional Providers are the user's responsibility and are not directly related to the FreeGPT project, as the project serves as an example of how to combine the G4F API with a web interface.
-
-
-## Table of Contents
-- [To-Do List](#to-do-list-%EF%B8%8F)
-- [Getting Started](#getting-started-white_check_mark)
- - [Cloning the Repository](#cloning-the-repository-inbox_tray)
- - [Install Dependencies](#install-dependencies-wrench)
-- [Running the Application](#running-the-application-rocket)
-- [Docker](#docker-)
- - [Prerequisites](#prerequisites)
- - [Running the Docker](#running-the-docker)
-- [Incorporated Projects](#incorporated-projects-busts_in_silhouette)
- - [WebUI](#webui)
- - [API FreeGPT](#api-g4f)
-- [Star History](#star-history)
-- [Legal Notice](#legal-notice)
-
-##
-
-## To-Do List ✔️
-
-- [x] Integrate the free GPT API into the WebUI
-- [x] Create Docker support
-- [x] Improve the Jailbreak functionality
-- [x] Add the GPT-4 model
-- [x] Enhance the user interface
-- [ ] Check status of API Providers (online/offline)
-- [ ] Enable editing and creating Jailbreaks/Roles in the WebUI
-- [ ] Refactor web client
-
-## Getting Started :white_check_mark:
-To get started with this project, you'll need to clone the repository and have [Python](https://www.python.org/downloads/) installed on your system.
-
-### Cloning the Repository :inbox_tray:
-Run the following command to clone the repository:
-
-```
-git clone https://github.com/ramonvc/freegpt-webui.git
-```
-
-### Install Dependencies :wrench:
-Navigate to the project directory:
-```
-cd freegpt-webui
-```
-
-Install the dependencies:
-```
-pip install -r requirements.txt
-```
-## Running the Application :rocket:
-To run the application, run the following command:
-```
-python run.py
-```
-
-Access the application in your browser using the URL:
-```
-http://127.0.0.1:1338
-```
-or
-```
-http://localhost:1338
-```
-
-
-## Docker 🐳
-### Prerequisites
-Before you start, make sure you have installed [Docker](https://www.docker.com/get-started) on your machine.
-
-### Running the Docker
-Pull the Docker image from Docker Hub:
-```
-docker pull ramonvc/freegpt-webui
-```
-
-Run the application using Docker:
-```
-docker run -p 1338:1338 ramonvc/freegpt-webui
-```
-
-Access the application in your browser using the URL:
-```
-http://127.0.0.1:1338
-```
-or
-```
-http://localhost:1338
-```
-
-When you're done using the application, stop the Docker containers using the following command:
-```
-docker stop
-```
-
-## Incorporated Projects :busts_in_silhouette:
-I highly recommend visiting and supporting both projects.
-
-### WebUI
-The application interface was incorporated from the [chatgpt-clone](https://github.com/xtekky/chatgpt-clone) repository.
-
-### API G4F
-The free GPT-4 API was incorporated from the [GPT4Free](https://github.com/xtekky/gpt4free) repository.
-
-
-
-## Star History
-[](https://star-history.com/#ramonvc/freegpt-webui&Timeline)
-
-
-
-## Legal Notice
-This repository is _not_ associated with or endorsed by providers of the APIs contained in this GitHub repository. This
-project is intended **for educational purposes only**. This is just a little personal project. Sites may contact me to
-improve their security or request the removal of their site from this repository.
-
-Please note the following:
-
-1. **Disclaimer**: The APIs, services, and trademarks mentioned in this repository belong to their respective owners.
- This project is _not_ claiming any right over them nor is it affiliated with or endorsed by any of the providers
- mentioned.
-
-2. **Responsibility**: The author of this repository is _not_ responsible for any consequences, damages, or losses
- arising from the use or misuse of this repository or the content provided by the third-party APIs. Users are solely
- responsible for their actions and any repercussions that may follow. We strongly recommend the users to follow the
- TOS of the each Website.
-
-3. **Educational Purposes Only**: This repository and its content are provided strictly for educational purposes. By
- using the information and code provided, users acknowledge that they are using the APIs and models at their own risk
- and agree to comply with any applicable laws and regulations.
-
-4. **Copyright**: All content in this repository, including but not limited to code, images, and documentation, is the
- intellectual property of the repository author, unless otherwise stated. Unauthorized copying, distribution, or use
- of any content in this repository is strictly prohibited without the express written consent of the repository
- author.
-
-5. **Indemnification**: Users agree to indemnify, defend, and hold harmless the author of this repository from and
- against any and all claims, liabilities, damages, losses, or expenses, including legal fees and costs, arising out of
- or in any way connected with their use or misuse of this repository, its content, or related third-party APIs.
-
-6. **Updates and Changes**: The author reserves the right to modify, update, or remove any content, information, or
- features in this repository at any time without prior notice. Users are responsible for regularly reviewing the
- content and any changes made to this repository.
-
-By using this repository or any code related to it, you agree to these terms. The author is not responsible for any
-copies, forks, or reuploads made by other users. This is the author's only account and repository. To prevent
-impersonation or irresponsible actions, you may comply with the GNU GPL license this Repository uses.
diff --git a/spaces/FrankZxShen/vits-fast-finetuning-pcr/text/__init__.py b/spaces/FrankZxShen/vits-fast-finetuning-pcr/text/__init__.py
deleted file mode 100644
index 11e5586c347c3071a9d1aca0425d112f45402e85..0000000000000000000000000000000000000000
--- a/spaces/FrankZxShen/vits-fast-finetuning-pcr/text/__init__.py
+++ /dev/null
@@ -1,60 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-from text.symbols import symbols
-
-
-# Mappings from symbol to numeric ID and vice versa:
-_symbol_to_id = {s: i for i, s in enumerate(symbols)}
-_id_to_symbol = {i: s for i, s in enumerate(symbols)}
-
-
-def text_to_sequence(text, symbols, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- sequence = []
- symbol_to_id = {s: i for i, s in enumerate(symbols)}
- clean_text = _clean_text(text, cleaner_names)
- print(clean_text)
- print(f" length:{len(clean_text)}")
- for symbol in clean_text:
- if symbol not in symbol_to_id.keys():
- continue
- symbol_id = symbol_to_id[symbol]
- sequence += [symbol_id]
- print(f" length:{len(sequence)}")
- return sequence
-
-
-def cleaned_text_to_sequence(cleaned_text, symbols):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- symbol_to_id = {s: i for i, s in enumerate(symbols)}
- sequence = [symbol_to_id[symbol] for symbol in cleaned_text if symbol in symbol_to_id.keys()]
- return sequence
-
-
-def sequence_to_text(sequence):
- '''Converts a sequence of IDs back to a string'''
- result = ''
- for symbol_id in sequence:
- s = _id_to_symbol[symbol_id]
- result += s
- return result
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/FridaZuley/RVC_HFKawaii/i18n/scan_i18n.py b/spaces/FridaZuley/RVC_HFKawaii/i18n/scan_i18n.py
deleted file mode 100644
index f3e52cf4f9f06d78877d77d2353f666aa759e36f..0000000000000000000000000000000000000000
--- a/spaces/FridaZuley/RVC_HFKawaii/i18n/scan_i18n.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import ast
-import glob
-import json
-from collections import OrderedDict
-
-
-def extract_i18n_strings(node):
- i18n_strings = []
-
- if (
- isinstance(node, ast.Call)
- and isinstance(node.func, ast.Name)
- and node.func.id == "i18n"
- ):
- for arg in node.args:
- if isinstance(arg, ast.Str):
- i18n_strings.append(arg.s)
-
- for child_node in ast.iter_child_nodes(node):
- i18n_strings.extend(extract_i18n_strings(child_node))
-
- return i18n_strings
-
-
-# scan the directory for all .py files (recursively)
-# for each file, parse the code into an AST
-# for each AST, extract the i18n strings
-
-strings = []
-for filename in glob.iglob("**/*.py", recursive=True):
- with open(filename, "r") as f:
- code = f.read()
- if "I18nAuto" in code:
- tree = ast.parse(code)
- i18n_strings = extract_i18n_strings(tree)
- print(filename, len(i18n_strings))
- strings.extend(i18n_strings)
-code_keys = set(strings)
-"""
-n_i18n.py
-gui_v1.py 26
-app.py 16
-infer-web.py 147
-scan_i18n.py 0
-i18n.py 0
-lib/train/process_ckpt.py 1
-"""
-print()
-print("Total unique:", len(code_keys))
-
-
-standard_file = "i18n/locale/zh_CN.json"
-with open(standard_file, "r", encoding="utf-8") as f:
- standard_data = json.load(f, object_pairs_hook=OrderedDict)
-standard_keys = set(standard_data.keys())
-
-# Define the standard file name
-unused_keys = standard_keys - code_keys
-print("Unused keys:", len(unused_keys))
-for unused_key in unused_keys:
- print("\t", unused_key)
-
-missing_keys = code_keys - standard_keys
-print("Missing keys:", len(missing_keys))
-for missing_key in missing_keys:
- print("\t", missing_key)
-
-code_keys_dict = OrderedDict()
-for s in strings:
- code_keys_dict[s] = s
-
-# write back
-with open(standard_file, "w", encoding="utf-8") as f:
- json.dump(code_keys_dict, f, ensure_ascii=False, indent=4, sort_keys=True)
- f.write("\n")
diff --git a/spaces/GEM/DatasetCardForm/formatting/README.md b/spaces/GEM/DatasetCardForm/formatting/README.md
deleted file mode 100644
index e63fb61691eda571a3df1f89fe45eff76f579dfb..0000000000000000000000000000000000000000
--- a/spaces/GEM/DatasetCardForm/formatting/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Formatting Utilities
-
-The resources in this folder are used to format the saved dataset card in a readable format (JSON, Markdown).
-
-File | Description
---- | ---
-[`key_to_question.json`](https://huggingface.co/spaces/GEM/DatasetCardForm/blob/main/formatting/key_to_question.json) | Maps from the saved key back to the original question, which can then be parsed into question/answer pairs.
-[`reformat_json.py`](https://huggingface.co/spaces/GEM/DatasetCardForm/blob/main/formatting/reformat_json.py) | Uses `key_to_question.json` to transform dataset cards into a JSON organized by Data Card standards (e.g., sections, subsections, scopes). Everything labeled `N/A` in the original card will be rendered; empty fields will be completely omitted.
-[`json_to_md.py`](https://huggingface.co/spaces/GEM/DatasetCardForm/blob/main/formatting/json_to_md.py) | Transforms output from `reformat_json.py` into Markdown that is compatible with Data Cards Labs (e.g., special comment syntax).
\ No newline at end of file
diff --git a/spaces/GTR-32X/uboa/README.md b/spaces/GTR-32X/uboa/README.md
deleted file mode 100644
index cfca43b0aa887aacbb1ba57f63c34d0da25b0b90..0000000000000000000000000000000000000000
--- a/spaces/GTR-32X/uboa/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Uboa Proxy
-emoji: 📉
-colorFrom: yellow
-colorTo: pink
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/GaenKoki/voicevox/test/test_connect_base64_waves.py b/spaces/GaenKoki/voicevox/test/test_connect_base64_waves.py
deleted file mode 100644
index e50c8f517e64e178f180abab0ed2372878848f86..0000000000000000000000000000000000000000
--- a/spaces/GaenKoki/voicevox/test/test_connect_base64_waves.py
+++ /dev/null
@@ -1,130 +0,0 @@
-import base64
-import io
-from unittest import TestCase
-
-import numpy as np
-import numpy.testing
-import soundfile
-from scipy.signal import resample
-
-from voicevox_engine.utility import ConnectBase64WavesException, connect_base64_waves
-
-
-def generate_sine_wave_ndarray(
- seconds: float, samplerate: int, frequency: float
-) -> np.ndarray:
- x = np.linspace(0, seconds, int(seconds * samplerate), endpoint=False)
- wave = np.sin(2 * np.pi * frequency * x).astype(np.float32)
-
- return wave
-
-
-def encode_bytes(wave_ndarray: np.ndarray, samplerate: int) -> bytes:
- wave_bio = io.BytesIO()
- soundfile.write(
- file=wave_bio,
- data=wave_ndarray,
- samplerate=samplerate,
- format="WAV",
- subtype="FLOAT",
- )
- wave_bio.seek(0)
-
- return wave_bio.getvalue()
-
-
-def generate_sine_wave_bytes(
- seconds: float, samplerate: int, frequency: float
-) -> bytes:
- wave_ndarray = generate_sine_wave_ndarray(seconds, samplerate, frequency)
- return encode_bytes(wave_ndarray, samplerate)
-
-
-def encode_base64(wave_bytes: bytes) -> str:
- return base64.standard_b64encode(wave_bytes).decode("utf-8")
-
-
-def generate_sine_wave_base64(seconds: float, samplerate: int, frequency: float) -> str:
- wave_bytes = generate_sine_wave_bytes(seconds, samplerate, frequency)
- wave_base64 = encode_base64(wave_bytes)
- return wave_base64
-
-
-class TestConnectBase64Waves(TestCase):
- def test_connect(self):
- samplerate = 1000
- wave = generate_sine_wave_ndarray(
- seconds=2, samplerate=samplerate, frequency=10
- )
- wave_base64 = encode_base64(encode_bytes(wave, samplerate=samplerate))
-
- wave_x2_ref = np.concatenate([wave, wave])
-
- wave_x2, _ = connect_base64_waves(waves=[wave_base64, wave_base64])
-
- self.assertEqual(wave_x2_ref.shape, wave_x2.shape)
-
- self.assertTrue((wave_x2_ref == wave_x2).all())
-
- def test_no_wave_error(self):
- self.assertRaises(ConnectBase64WavesException, connect_base64_waves, waves=[])
-
- def test_invalid_base64_error(self):
- wave_1000hz = generate_sine_wave_base64(
- seconds=2, samplerate=1000, frequency=10
- )
- wave_1000hz_broken = wave_1000hz[1:] # remove head 1 char
-
- self.assertRaises(
- ConnectBase64WavesException,
- connect_base64_waves,
- waves=[
- wave_1000hz_broken,
- ],
- )
-
- def test_invalid_wave_file_error(self):
- wave_1000hz = generate_sine_wave_bytes(seconds=2, samplerate=1000, frequency=10)
- wave_1000hz_broken_bytes = wave_1000hz[1:] # remove head 1 byte
- wave_1000hz_broken = encode_base64(wave_1000hz_broken_bytes)
-
- self.assertRaises(
- ConnectBase64WavesException,
- connect_base64_waves,
- waves=[
- wave_1000hz_broken,
- ],
- )
-
- def test_different_frequency(self):
- wave_24000hz = generate_sine_wave_ndarray(
- seconds=1, samplerate=24000, frequency=10
- )
- wave_1000hz = generate_sine_wave_ndarray(
- seconds=2, samplerate=1000, frequency=10
- )
- wave_24000_base64 = encode_base64(encode_bytes(wave_24000hz, samplerate=24000))
- wave_1000_base64 = encode_base64(encode_bytes(wave_1000hz, samplerate=1000))
-
- wave_1000hz_to2400hz = resample(wave_1000hz, 24000 * len(wave_1000hz) // 1000)
- wave_x2_ref = np.concatenate([wave_24000hz, wave_1000hz_to2400hz])
-
- wave_x2, _ = connect_base64_waves(waves=[wave_24000_base64, wave_1000_base64])
-
- self.assertEqual(wave_x2_ref.shape, wave_x2.shape)
- numpy.testing.assert_array_almost_equal(wave_x2_ref, wave_x2)
-
- def test_different_channels(self):
- wave_1000hz = generate_sine_wave_ndarray(
- seconds=2, samplerate=1000, frequency=10
- )
- wave_2ch_1000hz = np.array([wave_1000hz, wave_1000hz]).T
- wave_1ch_base64 = encode_base64(encode_bytes(wave_1000hz, samplerate=1000))
- wave_2ch_base64 = encode_base64(encode_bytes(wave_2ch_1000hz, samplerate=1000))
-
- wave_x2_ref = np.concatenate([wave_2ch_1000hz, wave_2ch_1000hz])
-
- wave_x2, _ = connect_base64_waves(waves=[wave_1ch_base64, wave_2ch_base64])
-
- self.assertEqual(wave_x2_ref.shape, wave_x2.shape)
- self.assertTrue((wave_x2_ref == wave_x2).all())
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py
deleted file mode 100644
index dd5153e6ef0ef16b8607279634ce6f1593bd3c1c..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py
+++ /dev/null
@@ -1,6 +0,0 @@
-_base_ = 'mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py'
-model = dict(
- pretrained='open-mmlab://regnetx_3.2gf',
- backbone=dict(
- dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
- stage_with_dcn=(False, True, True, True)))
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmcv_custom/checkpoint.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmcv_custom/checkpoint.py
deleted file mode 100644
index 51322c1c3802f357481065a70dc5152469d80eb8..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/mmcv_custom/checkpoint.py
+++ /dev/null
@@ -1,500 +0,0 @@
-# Copyright (c) Open-MMLab. All rights reserved.
-import io
-import os
-import os.path as osp
-import pkgutil
-import time
-import warnings
-from collections import OrderedDict
-from importlib import import_module
-from tempfile import TemporaryDirectory
-
-import torch
-import torchvision
-from torch.optim import Optimizer
-from torch.utils import model_zoo
-from torch.nn import functional as F
-
-import mmcv
-from mmcv.fileio import FileClient
-from mmcv.fileio import load as load_file
-from mmcv.parallel import is_module_wrapper
-from mmcv.utils import mkdir_or_exist
-from mmcv.runner import get_dist_info
-
-ENV_MMCV_HOME = 'MMCV_HOME'
-ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
-DEFAULT_CACHE_DIR = '~/.cache'
-
-
-def _get_mmcv_home():
- mmcv_home = os.path.expanduser(
- os.getenv(
- ENV_MMCV_HOME,
- os.path.join(
- os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
-
- mkdir_or_exist(mmcv_home)
- return mmcv_home
-
-
-def load_state_dict(module, state_dict, strict=False, logger=None):
- """Load state_dict to a module.
-
- This method is modified from :meth:`torch.nn.Module.load_state_dict`.
- Default value for ``strict`` is set to ``False`` and the message for
- param mismatch will be shown even if strict is False.
-
- Args:
- module (Module): Module that receives the state_dict.
- state_dict (OrderedDict): Weights.
- strict (bool): whether to strictly enforce that the keys
- in :attr:`state_dict` match the keys returned by this module's
- :meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
- logger (:obj:`logging.Logger`, optional): Logger to log the error
- message. If not specified, print function will be used.
- """
- unexpected_keys = []
- all_missing_keys = []
- err_msg = []
-
- metadata = getattr(state_dict, '_metadata', None)
- state_dict = state_dict.copy()
- if metadata is not None:
- state_dict._metadata = metadata
-
- # use _load_from_state_dict to enable checkpoint version control
- def load(module, prefix=''):
- # recursively check parallel module in case that the model has a
- # complicated structure, e.g., nn.Module(nn.Module(DDP))
- if is_module_wrapper(module):
- module = module.module
- local_metadata = {} if metadata is None else metadata.get(
- prefix[:-1], {})
- module._load_from_state_dict(state_dict, prefix, local_metadata, True,
- all_missing_keys, unexpected_keys,
- err_msg)
- for name, child in module._modules.items():
- if child is not None:
- load(child, prefix + name + '.')
-
- load(module)
- load = None # break load->load reference cycle
-
- # ignore "num_batches_tracked" of BN layers
- missing_keys = [
- key for key in all_missing_keys if 'num_batches_tracked' not in key
- ]
-
- if unexpected_keys:
- err_msg.append('unexpected key in source '
- f'state_dict: {", ".join(unexpected_keys)}\n')
- if missing_keys:
- err_msg.append(
- f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
-
- rank, _ = get_dist_info()
- if len(err_msg) > 0 and rank == 0:
- err_msg.insert(
- 0, 'The model and loaded state dict do not match exactly\n')
- err_msg = '\n'.join(err_msg)
- if strict:
- raise RuntimeError(err_msg)
- elif logger is not None:
- logger.warning(err_msg)
- else:
- print(err_msg)
-
-
-def load_url_dist(url, model_dir=None):
- """In distributed setting, this function only download checkpoint at local
- rank 0."""
- rank, world_size = get_dist_info()
- rank = int(os.environ.get('LOCAL_RANK', rank))
- if rank == 0:
- checkpoint = model_zoo.load_url(url, model_dir=model_dir)
- if world_size > 1:
- torch.distributed.barrier()
- if rank > 0:
- checkpoint = model_zoo.load_url(url, model_dir=model_dir)
- return checkpoint
-
-
-def load_pavimodel_dist(model_path, map_location=None):
- """In distributed setting, this function only download checkpoint at local
- rank 0."""
- try:
- from pavi import modelcloud
- except ImportError:
- raise ImportError(
- 'Please install pavi to load checkpoint from modelcloud.')
- rank, world_size = get_dist_info()
- rank = int(os.environ.get('LOCAL_RANK', rank))
- if rank == 0:
- model = modelcloud.get(model_path)
- with TemporaryDirectory() as tmp_dir:
- downloaded_file = osp.join(tmp_dir, model.name)
- model.download(downloaded_file)
- checkpoint = torch.load(downloaded_file, map_location=map_location)
- if world_size > 1:
- torch.distributed.barrier()
- if rank > 0:
- model = modelcloud.get(model_path)
- with TemporaryDirectory() as tmp_dir:
- downloaded_file = osp.join(tmp_dir, model.name)
- model.download(downloaded_file)
- checkpoint = torch.load(
- downloaded_file, map_location=map_location)
- return checkpoint
-
-
-def load_fileclient_dist(filename, backend, map_location):
- """In distributed setting, this function only download checkpoint at local
- rank 0."""
- rank, world_size = get_dist_info()
- rank = int(os.environ.get('LOCAL_RANK', rank))
- allowed_backends = ['ceph']
- if backend not in allowed_backends:
- raise ValueError(f'Load from Backend {backend} is not supported.')
- if rank == 0:
- fileclient = FileClient(backend=backend)
- buffer = io.BytesIO(fileclient.get(filename))
- checkpoint = torch.load(buffer, map_location=map_location)
- if world_size > 1:
- torch.distributed.barrier()
- if rank > 0:
- fileclient = FileClient(backend=backend)
- buffer = io.BytesIO(fileclient.get(filename))
- checkpoint = torch.load(buffer, map_location=map_location)
- return checkpoint
-
-
-def get_torchvision_models():
- model_urls = dict()
- for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
- if ispkg:
- continue
- _zoo = import_module(f'torchvision.models.{name}')
- if hasattr(_zoo, 'model_urls'):
- _urls = getattr(_zoo, 'model_urls')
- model_urls.update(_urls)
- return model_urls
-
-
-def get_external_models():
- mmcv_home = _get_mmcv_home()
- default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
- default_urls = load_file(default_json_path)
- assert isinstance(default_urls, dict)
- external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
- if osp.exists(external_json_path):
- external_urls = load_file(external_json_path)
- assert isinstance(external_urls, dict)
- default_urls.update(external_urls)
-
- return default_urls
-
-
-def get_mmcls_models():
- mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
- mmcls_urls = load_file(mmcls_json_path)
-
- return mmcls_urls
-
-
-def get_deprecated_model_names():
- deprecate_json_path = osp.join(mmcv.__path__[0],
- 'model_zoo/deprecated.json')
- deprecate_urls = load_file(deprecate_json_path)
- assert isinstance(deprecate_urls, dict)
-
- return deprecate_urls
-
-
-def _process_mmcls_checkpoint(checkpoint):
- state_dict = checkpoint['state_dict']
- new_state_dict = OrderedDict()
- for k, v in state_dict.items():
- if k.startswith('backbone.'):
- new_state_dict[k[9:]] = v
- new_checkpoint = dict(state_dict=new_state_dict)
-
- return new_checkpoint
-
-
-def _load_checkpoint(filename, map_location=None):
- """Load checkpoint from somewhere (modelzoo, file, url).
-
- Args:
- filename (str): Accept local filepath, URL, ``torchvision://xxx``,
- ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
- details.
- map_location (str | None): Same as :func:`torch.load`. Default: None.
-
- Returns:
- dict | OrderedDict: The loaded checkpoint. It can be either an
- OrderedDict storing model weights or a dict containing other
- information, which depends on the checkpoint.
- """
- if filename.startswith('modelzoo://'):
- warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
- 'use "torchvision://" instead')
- model_urls = get_torchvision_models()
- model_name = filename[11:]
- checkpoint = load_url_dist(model_urls[model_name])
- elif filename.startswith('torchvision://'):
- model_urls = get_torchvision_models()
- model_name = filename[14:]
- checkpoint = load_url_dist(model_urls[model_name])
- elif filename.startswith('open-mmlab://'):
- model_urls = get_external_models()
- model_name = filename[13:]
- deprecated_urls = get_deprecated_model_names()
- if model_name in deprecated_urls:
- warnings.warn(f'open-mmlab://{model_name} is deprecated in favor '
- f'of open-mmlab://{deprecated_urls[model_name]}')
- model_name = deprecated_urls[model_name]
- model_url = model_urls[model_name]
- # check if is url
- if model_url.startswith(('http://', 'https://')):
- checkpoint = load_url_dist(model_url)
- else:
- filename = osp.join(_get_mmcv_home(), model_url)
- if not osp.isfile(filename):
- raise IOError(f'{filename} is not a checkpoint file')
- checkpoint = torch.load(filename, map_location=map_location)
- elif filename.startswith('mmcls://'):
- model_urls = get_mmcls_models()
- model_name = filename[8:]
- checkpoint = load_url_dist(model_urls[model_name])
- checkpoint = _process_mmcls_checkpoint(checkpoint)
- elif filename.startswith(('http://', 'https://')):
- checkpoint = load_url_dist(filename)
- elif filename.startswith('pavi://'):
- model_path = filename[7:]
- checkpoint = load_pavimodel_dist(model_path, map_location=map_location)
- elif filename.startswith('s3://'):
- checkpoint = load_fileclient_dist(
- filename, backend='ceph', map_location=map_location)
- else:
- if not osp.isfile(filename):
- raise IOError(f'{filename} is not a checkpoint file')
- checkpoint = torch.load(filename, map_location=map_location)
- return checkpoint
-
-
-def load_checkpoint(model,
- filename,
- map_location='cpu',
- strict=False,
- logger=None):
- """Load checkpoint from a file or URI.
-
- Args:
- model (Module): Module to load checkpoint.
- filename (str): Accept local filepath, URL, ``torchvision://xxx``,
- ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
- details.
- map_location (str): Same as :func:`torch.load`.
- strict (bool): Whether to allow different params for the model and
- checkpoint.
- logger (:mod:`logging.Logger` or None): The logger for error message.
-
- Returns:
- dict or OrderedDict: The loaded checkpoint.
- """
- checkpoint = _load_checkpoint(filename, map_location)
- # OrderedDict is a subclass of dict
- if not isinstance(checkpoint, dict):
- raise RuntimeError(
- f'No state_dict found in checkpoint file {filename}')
- # get state_dict from checkpoint
- if 'state_dict' in checkpoint:
- state_dict = checkpoint['state_dict']
- elif 'model' in checkpoint:
- state_dict = checkpoint['model']
- else:
- state_dict = checkpoint
- # strip prefix of state_dict
- if list(state_dict.keys())[0].startswith('module.'):
- state_dict = {k[7:]: v for k, v in state_dict.items()}
-
- # for MoBY, load model of online branch
- if sorted(list(state_dict.keys()))[0].startswith('encoder'):
- state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')}
-
- # reshape absolute position embedding
- if state_dict.get('absolute_pos_embed') is not None:
- absolute_pos_embed = state_dict['absolute_pos_embed']
- N1, L, C1 = absolute_pos_embed.size()
- N2, C2, H, W = model.absolute_pos_embed.size()
- if N1 != N2 or C1 != C2 or L != H*W:
- logger.warning("Error in loading absolute_pos_embed, pass")
- else:
- state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2)
-
- # interpolate position bias table if needed
- relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]
- for table_key in relative_position_bias_table_keys:
- table_pretrained = state_dict[table_key]
- table_current = model.state_dict()[table_key]
- L1, nH1 = table_pretrained.size()
- L2, nH2 = table_current.size()
- if nH1 != nH2:
- logger.warning(f"Error in loading {table_key}, pass")
- else:
- if L1 != L2:
- S1 = int(L1 ** 0.5)
- S2 = int(L2 ** 0.5)
- table_pretrained_resized = F.interpolate(
- table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
- size=(S2, S2), mode='bicubic')
- state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0)
-
- # load state_dict
- load_state_dict(model, state_dict, strict, logger)
- return checkpoint
-
-
-def weights_to_cpu(state_dict):
- """Copy a model state_dict to cpu.
-
- Args:
- state_dict (OrderedDict): Model weights on GPU.
-
- Returns:
- OrderedDict: Model weights on GPU.
- """
- state_dict_cpu = OrderedDict()
- for key, val in state_dict.items():
- state_dict_cpu[key] = val.cpu()
- return state_dict_cpu
-
-
-def _save_to_state_dict(module, destination, prefix, keep_vars):
- """Saves module state to `destination` dictionary.
-
- This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
-
- Args:
- module (nn.Module): The module to generate state_dict.
- destination (dict): A dict where state will be stored.
- prefix (str): The prefix for parameters and buffers used in this
- module.
- """
- for name, param in module._parameters.items():
- if param is not None:
- destination[prefix + name] = param if keep_vars else param.detach()
- for name, buf in module._buffers.items():
- # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
- if buf is not None:
- destination[prefix + name] = buf if keep_vars else buf.detach()
-
-
-def get_state_dict(module, destination=None, prefix='', keep_vars=False):
- """Returns a dictionary containing a whole state of the module.
-
- Both parameters and persistent buffers (e.g. running averages) are
- included. Keys are corresponding parameter and buffer names.
-
- This method is modified from :meth:`torch.nn.Module.state_dict` to
- recursively check parallel module in case that the model has a complicated
- structure, e.g., nn.Module(nn.Module(DDP)).
-
- Args:
- module (nn.Module): The module to generate state_dict.
- destination (OrderedDict): Returned dict for the state of the
- module.
- prefix (str): Prefix of the key.
- keep_vars (bool): Whether to keep the variable property of the
- parameters. Default: False.
-
- Returns:
- dict: A dictionary containing a whole state of the module.
- """
- # recursively check parallel module in case that the model has a
- # complicated structure, e.g., nn.Module(nn.Module(DDP))
- if is_module_wrapper(module):
- module = module.module
-
- # below is the same as torch.nn.Module.state_dict()
- if destination is None:
- destination = OrderedDict()
- destination._metadata = OrderedDict()
- destination._metadata[prefix[:-1]] = local_metadata = dict(
- version=module._version)
- _save_to_state_dict(module, destination, prefix, keep_vars)
- for name, child in module._modules.items():
- if child is not None:
- get_state_dict(
- child, destination, prefix + name + '.', keep_vars=keep_vars)
- for hook in module._state_dict_hooks.values():
- hook_result = hook(module, destination, prefix, local_metadata)
- if hook_result is not None:
- destination = hook_result
- return destination
-
-
-def save_checkpoint(model, filename, optimizer=None, meta=None):
- """Save checkpoint to file.
-
- The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
- ``optimizer``. By default ``meta`` will contain version and time info.
-
- Args:
- model (Module): Module whose params are to be saved.
- filename (str): Checkpoint filename.
- optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
- meta (dict, optional): Metadata to be saved in checkpoint.
- """
- if meta is None:
- meta = {}
- elif not isinstance(meta, dict):
- raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
- meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
-
- if is_module_wrapper(model):
- model = model.module
-
- if hasattr(model, 'CLASSES') and model.CLASSES is not None:
- # save class name to the meta
- meta.update(CLASSES=model.CLASSES)
-
- checkpoint = {
- 'meta': meta,
- 'state_dict': weights_to_cpu(get_state_dict(model))
- }
- # save optimizer state dict in the checkpoint
- if isinstance(optimizer, Optimizer):
- checkpoint['optimizer'] = optimizer.state_dict()
- elif isinstance(optimizer, dict):
- checkpoint['optimizer'] = {}
- for name, optim in optimizer.items():
- checkpoint['optimizer'][name] = optim.state_dict()
-
- if filename.startswith('pavi://'):
- try:
- from pavi import modelcloud
- from pavi.exception import NodeNotFoundError
- except ImportError:
- raise ImportError(
- 'Please install pavi to load checkpoint from modelcloud.')
- model_path = filename[7:]
- root = modelcloud.Folder()
- model_dir, model_name = osp.split(model_path)
- try:
- model = modelcloud.get(model_dir)
- except NodeNotFoundError:
- model = root.create_training_model(model_dir)
- with TemporaryDirectory() as tmp_dir:
- checkpoint_file = osp.join(tmp_dir, model_name)
- with open(checkpoint_file, 'wb') as f:
- torch.save(checkpoint, f)
- f.flush()
- model.create_file(checkpoint_file, name=model_name)
- else:
- mmcv.mkdir_or_exist(osp.dirname(filename))
- # immediately flush buffer
- with open(filename, 'wb') as f:
- torch.save(checkpoint, f)
- f.flush()
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/fcn_unet_s5-d16.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/fcn_unet_s5-d16.py
deleted file mode 100644
index a33e7972877f902d0e7d18401ca675e3e4e60a18..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/fcn_unet_s5-d16.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained=None,
- backbone=dict(
- type='UNet',
- in_channels=3,
- base_channels=64,
- num_stages=5,
- strides=(1, 1, 1, 1, 1),
- enc_num_convs=(2, 2, 2, 2, 2),
- dec_num_convs=(2, 2, 2, 2),
- downsamples=(True, True, True, True),
- enc_dilations=(1, 1, 1, 1, 1),
- dec_dilations=(1, 1, 1, 1),
- with_cp=False,
- conv_cfg=None,
- norm_cfg=norm_cfg,
- act_cfg=dict(type='ReLU'),
- upsample_cfg=dict(type='InterpConv'),
- norm_eval=False),
- decode_head=dict(
- type='FCNHead',
- in_channels=64,
- in_index=4,
- channels=64,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=2,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=128,
- in_index=3,
- channels=64,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=2,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='slide', crop_size=256, stride=170))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py
deleted file mode 100644
index e4bda3eded693bfd44a8c86ced7ae6ee9963c583..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py
+++ /dev/null
@@ -1,9 +0,0 @@
-_base_ = [
- '../_base_/models/deeplabv3plus_r50-d8.py',
- '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
- '../_base_/schedules/schedule_40k.py'
-]
-model = dict(
- decode_head=dict(align_corners=True),
- auxiliary_head=dict(align_corners=True),
- test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/losses/accuracy.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/losses/accuracy.py
deleted file mode 100644
index c0fd2e7e74a0f721c4a814c09d6e453e5956bb38..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/losses/accuracy.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import torch.nn as nn
-
-
-def accuracy(pred, target, topk=1, thresh=None):
- """Calculate accuracy according to the prediction and target.
-
- Args:
- pred (torch.Tensor): The model prediction, shape (N, num_class, ...)
- target (torch.Tensor): The target of each prediction, shape (N, , ...)
- topk (int | tuple[int], optional): If the predictions in ``topk``
- matches the target, the predictions will be regarded as
- correct ones. Defaults to 1.
- thresh (float, optional): If not None, predictions with scores under
- this threshold are considered incorrect. Default to None.
-
- Returns:
- float | tuple[float]: If the input ``topk`` is a single integer,
- the function will return a single float as accuracy. If
- ``topk`` is a tuple containing multiple integers, the
- function will return a tuple containing accuracies of
- each ``topk`` number.
- """
- assert isinstance(topk, (int, tuple))
- if isinstance(topk, int):
- topk = (topk, )
- return_single = True
- else:
- return_single = False
-
- maxk = max(topk)
- if pred.size(0) == 0:
- accu = [pred.new_tensor(0.) for i in range(len(topk))]
- return accu[0] if return_single else accu
- assert pred.ndim == target.ndim + 1
- assert pred.size(0) == target.size(0)
- assert maxk <= pred.size(1), \
- f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
- pred_value, pred_label = pred.topk(maxk, dim=1)
- # transpose to shape (maxk, N, ...)
- pred_label = pred_label.transpose(0, 1)
- correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label))
- if thresh is not None:
- # Only prediction values larger than thresh are counted as correct
- correct = correct & (pred_value > thresh).t()
- res = []
- for k in topk:
- correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
- res.append(correct_k.mul_(100.0 / target.numel()))
- return res[0] if return_single else res
-
-
-class Accuracy(nn.Module):
- """Accuracy calculation module."""
-
- def __init__(self, topk=(1, ), thresh=None):
- """Module to calculate the accuracy.
-
- Args:
- topk (tuple, optional): The criterion used to calculate the
- accuracy. Defaults to (1,).
- thresh (float, optional): If not None, predictions with scores
- under this threshold are considered incorrect. Default to None.
- """
- super().__init__()
- self.topk = topk
- self.thresh = thresh
-
- def forward(self, pred, target):
- """Forward function to calculate accuracy.
-
- Args:
- pred (torch.Tensor): Prediction of models.
- target (torch.Tensor): Target for each prediction.
-
- Returns:
- tuple[float]: The accuracies under different topk criterions.
- """
- return accuracy(pred, target, self.topk, self.thresh)
diff --git a/spaces/GrantC/learning_goals_bloom/app.py b/spaces/GrantC/learning_goals_bloom/app.py
deleted file mode 100644
index e91d4bd9877d2208d72ca1bf5ccb2c7201cd32fe..0000000000000000000000000000000000000000
--- a/spaces/GrantC/learning_goals_bloom/app.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import gradio as gr
-import requests
-import os
-
-API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
-HF_TOKEN = os.environ["HF_TOKEN"]
-headers = {"Authorization": f"Bearer {HF_TOKEN}"}
-
-def get_results(prompt):
- json_ = {"inputs": prompt,
- "parameters":
- {
- "top_p": 0.9,
- "typical_p":0.2,
- "temperature": 0.8,
- "max_new_tokens": 250,
- "return_full_text": True
- }, "options":
- {
- "use_cache": True,
- "wait_for_model":True
- },}
- try:
- response = requests.post(API_URL, headers=headers, json=json_)
- output = response.json()
- output_tmp = output[0]['generated_text']
- except:
- output_tmp = "Not able to work"
- return output_tmp
-
-
-def learn_goals(topic, text):
- prompt = "Topic: " + topic + '\n'
- prompt = prompt + text + '\n'
- prompt = prompt + """Some examples of learning goals from above are:
- 1. The student will be able to"""
-
- return get_results(prompt)
-
-
-topic = gr.Textbox(lines = 1, placeholder="Topic")
-text = gr.Textbox(lines = 5, placeholder="Text")
-
-iface = gr.Interface(fn=learn_goals, inputs=[topic, text], outputs="text")
-iface.launch()
\ No newline at end of file
diff --git a/spaces/Grezz/generate_human_motion/pyrender/examples/example.py b/spaces/Grezz/generate_human_motion/pyrender/examples/example.py
deleted file mode 100644
index 599a4850a5899cdeb1a76db1c5cf1c91c263cd41..0000000000000000000000000000000000000000
--- a/spaces/Grezz/generate_human_motion/pyrender/examples/example.py
+++ /dev/null
@@ -1,157 +0,0 @@
-"""Examples of using pyrender for viewing and offscreen rendering.
-"""
-import pyglet
-pyglet.options['shadow_window'] = False
-import os
-import numpy as np
-import trimesh
-
-from pyrender import PerspectiveCamera,\
- DirectionalLight, SpotLight, PointLight,\
- MetallicRoughnessMaterial,\
- Primitive, Mesh, Node, Scene,\
- Viewer, OffscreenRenderer, RenderFlags
-
-#==============================================================================
-# Mesh creation
-#==============================================================================
-
-#------------------------------------------------------------------------------
-# Creating textured meshes from trimeshes
-#------------------------------------------------------------------------------
-
-# Fuze trimesh
-fuze_trimesh = trimesh.load('./models/fuze.obj')
-fuze_mesh = Mesh.from_trimesh(fuze_trimesh)
-
-# Drill trimesh
-drill_trimesh = trimesh.load('./models/drill.obj')
-drill_mesh = Mesh.from_trimesh(drill_trimesh)
-drill_pose = np.eye(4)
-drill_pose[0,3] = 0.1
-drill_pose[2,3] = -np.min(drill_trimesh.vertices[:,2])
-
-# Wood trimesh
-wood_trimesh = trimesh.load('./models/wood.obj')
-wood_mesh = Mesh.from_trimesh(wood_trimesh)
-
-# Water bottle trimesh
-bottle_gltf = trimesh.load('./models/WaterBottle.glb')
-bottle_trimesh = bottle_gltf.geometry[list(bottle_gltf.geometry.keys())[0]]
-bottle_mesh = Mesh.from_trimesh(bottle_trimesh)
-bottle_pose = np.array([
- [1.0, 0.0, 0.0, 0.1],
- [0.0, 0.0, -1.0, -0.16],
- [0.0, 1.0, 0.0, 0.13],
- [0.0, 0.0, 0.0, 1.0],
-])
-
-#------------------------------------------------------------------------------
-# Creating meshes with per-vertex colors
-#------------------------------------------------------------------------------
-boxv_trimesh = trimesh.creation.box(extents=0.1*np.ones(3))
-boxv_vertex_colors = np.random.uniform(size=(boxv_trimesh.vertices.shape))
-boxv_trimesh.visual.vertex_colors = boxv_vertex_colors
-boxv_mesh = Mesh.from_trimesh(boxv_trimesh, smooth=False)
-
-#------------------------------------------------------------------------------
-# Creating meshes with per-face colors
-#------------------------------------------------------------------------------
-boxf_trimesh = trimesh.creation.box(extents=0.1*np.ones(3))
-boxf_face_colors = np.random.uniform(size=boxf_trimesh.faces.shape)
-boxf_trimesh.visual.face_colors = boxf_face_colors
-boxf_mesh = Mesh.from_trimesh(boxf_trimesh, smooth=False)
-
-#------------------------------------------------------------------------------
-# Creating meshes from point clouds
-#------------------------------------------------------------------------------
-points = trimesh.creation.icosphere(radius=0.05).vertices
-point_colors = np.random.uniform(size=points.shape)
-points_mesh = Mesh.from_points(points, colors=point_colors)
-
-#==============================================================================
-# Light creation
-#==============================================================================
-
-direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
-spot_l = SpotLight(color=np.ones(3), intensity=10.0,
- innerConeAngle=np.pi/16, outerConeAngle=np.pi/6)
-point_l = PointLight(color=np.ones(3), intensity=10.0)
-
-#==============================================================================
-# Camera creation
-#==============================================================================
-
-cam = PerspectiveCamera(yfov=(np.pi / 3.0))
-cam_pose = np.array([
- [0.0, -np.sqrt(2)/2, np.sqrt(2)/2, 0.5],
- [1.0, 0.0, 0.0, 0.0],
- [0.0, np.sqrt(2)/2, np.sqrt(2)/2, 0.4],
- [0.0, 0.0, 0.0, 1.0]
-])
-
-#==============================================================================
-# Scene creation
-#==============================================================================
-
-scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
-
-#==============================================================================
-# Adding objects to the scene
-#==============================================================================
-
-#------------------------------------------------------------------------------
-# By manually creating nodes
-#------------------------------------------------------------------------------
-fuze_node = Node(mesh=fuze_mesh, translation=np.array([0.1, 0.15, -np.min(fuze_trimesh.vertices[:,2])]))
-scene.add_node(fuze_node)
-boxv_node = Node(mesh=boxv_mesh, translation=np.array([-0.1, 0.10, 0.05]))
-scene.add_node(boxv_node)
-boxf_node = Node(mesh=boxf_mesh, translation=np.array([-0.1, -0.10, 0.05]))
-scene.add_node(boxf_node)
-
-#------------------------------------------------------------------------------
-# By using the add() utility function
-#------------------------------------------------------------------------------
-drill_node = scene.add(drill_mesh, pose=drill_pose)
-bottle_node = scene.add(bottle_mesh, pose=bottle_pose)
-wood_node = scene.add(wood_mesh)
-direc_l_node = scene.add(direc_l, pose=cam_pose)
-spot_l_node = scene.add(spot_l, pose=cam_pose)
-
-#==============================================================================
-# Using the viewer with a default camera
-#==============================================================================
-
-v = Viewer(scene, shadows=True)
-
-#==============================================================================
-# Using the viewer with a pre-specified camera
-#==============================================================================
-cam_node = scene.add(cam, pose=cam_pose)
-v = Viewer(scene, central_node=drill_node)
-
-#==============================================================================
-# Rendering offscreen from that camera
-#==============================================================================
-
-r = OffscreenRenderer(viewport_width=640*2, viewport_height=480*2)
-color, depth = r.render(scene)
-
-import matplotlib.pyplot as plt
-plt.figure()
-plt.imshow(color)
-plt.show()
-
-#==============================================================================
-# Segmask rendering
-#==============================================================================
-
-nm = {node: 20*(i + 1) for i, node in enumerate(scene.mesh_nodes)}
-seg = r.render(scene, RenderFlags.SEG, nm)[0]
-plt.figure()
-plt.imshow(seg)
-plt.show()
-
-r.delete()
-
diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/utils/taskonomy/__init__.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/utils/taskonomy/__init__.py
deleted file mode 100644
index 625719fab6ac4260fc85d153d7c1c49a6f3016ba..0000000000000000000000000000000000000000
--- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/utils/taskonomy/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .taskonomy_dataset import TaskonomyDataset
\ No newline at end of file
diff --git a/spaces/Hallucinate/demo/k_diffusion/__init__.py b/spaces/Hallucinate/demo/k_diffusion/__init__.py
deleted file mode 100644
index 5de9decab9fef99f2dd152f16b82b5806508ffdf..0000000000000000000000000000000000000000
--- a/spaces/Hallucinate/demo/k_diffusion/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from . import augmentation, config, evaluation, external, gns, layers, models, sampling, utils
-from .layers import Denoiser
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/fast_noisy_channel/noisy_channel_beam_search.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/fast_noisy_channel/noisy_channel_beam_search.py
deleted file mode 100644
index 23869ebcd0c438f36e310c8ccddd3b5c07a71182..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/fast_noisy_channel/noisy_channel_beam_search.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-from fairseq.search import Search
-
-
-class NoisyChannelBeamSearch(Search):
-
- def __init__(self, tgt_dict):
- super().__init__(tgt_dict)
- self.fw_scores_buf = None
- self.lm_scores_buf = None
-
- def _init_buffers(self, t):
- # super()._init_buffers(t)
- if self.fw_scores_buf is None:
- self.scores_buf = t.new()
- self.indices_buf = torch.LongTensor().to(device=t.device)
- self.beams_buf = torch.LongTensor().to(device=t.device)
- self.fw_scores_buf = t.new()
- self.lm_scores_buf = t.new()
-
- def combine_fw_bw(self, combine_method, fw_cum, bw, step):
- if combine_method == "noisy_channel":
- fw_norm = fw_cum.div(step + 1)
- lprobs = bw + fw_norm
- elif combine_method == "lm_only":
- lprobs = bw + fw_cum
-
- return lprobs
-
- def step(self, step, fw_lprobs, scores, bw_lprobs, lm_lprobs, combine_method):
- self._init_buffers(fw_lprobs)
- bsz, beam_size, vocab_size = fw_lprobs.size()
-
- if step == 0:
- # at the first step all hypotheses are equally likely, so use
- # only the first beam
- fw_lprobs = fw_lprobs[:, ::beam_size, :].contiguous()
- bw_lprobs = bw_lprobs[:, ::beam_size, :].contiguous()
- # nothing to add since we are at the first step
- fw_lprobs_cum = fw_lprobs
-
- else:
- # make probs contain cumulative scores for each hypothesis
- raw_scores = (scores[:, :, step - 1].unsqueeze(-1))
- fw_lprobs_cum = (fw_lprobs.add(raw_scores))
-
- combined_lprobs = self.combine_fw_bw(combine_method, fw_lprobs_cum, bw_lprobs, step)
-
- # choose the top k according to the combined noisy channel model score
- torch.topk(
- combined_lprobs.view(bsz, -1),
- k=min(
- # Take the best 2 x beam_size predictions. We'll choose the first
- # beam_size of these which don't predict eos to continue with.
- beam_size * 2,
- combined_lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
- ),
- out=(self.scores_buf, self.indices_buf),
- )
- # save corresponding fw and lm scores
- self.fw_scores_buf = torch.gather(fw_lprobs_cum.view(bsz, -1), 1, self.indices_buf)
- self.lm_scores_buf = torch.gather(lm_lprobs.view(bsz, -1), 1, self.indices_buf)
- # Project back into relative indices and beams
- self.beams_buf = self.indices_buf // vocab_size
- self.indices_buf.fmod_(vocab_size)
- return self.scores_buf, self.fw_scores_buf, self.lm_scores_buf, self.indices_buf, self.beams_buf
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/tasks/mm_tasks/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/tasks/mm_tasks/__init__.py
deleted file mode 100644
index 0190ffc87bc9463f20928d57501133839dc988d7..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/tasks/mm_tasks/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .caption import CaptionTask
diff --git a/spaces/Hise/rvc-hololive-models/vc_infer_pipeline.py b/spaces/Hise/rvc-hololive-models/vc_infer_pipeline.py
deleted file mode 100644
index c26d45068f9b6bf2b194b13c3c89f8a06347c124..0000000000000000000000000000000000000000
--- a/spaces/Hise/rvc-hololive-models/vc_infer_pipeline.py
+++ /dev/null
@@ -1,306 +0,0 @@
-import numpy as np, parselmouth, torch, pdb
-from time import time as ttime
-import torch.nn.functional as F
-from config import x_pad, x_query, x_center, x_max
-import scipy.signal as signal
-import pyworld, os, traceback, faiss
-from scipy import signal
-
-bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
-
-
-class VC(object):
- def __init__(self, tgt_sr, device, is_half):
- self.sr = 16000 # hubert输入采样率
- self.window = 160 # 每帧点数
- self.t_pad = self.sr * x_pad # 每条前后pad时间
- self.t_pad_tgt = tgt_sr * x_pad
- self.t_pad2 = self.t_pad * 2
- self.t_query = self.sr * x_query # 查询切点前后查询时间
- self.t_center = self.sr * x_center # 查询切点位置
- self.t_max = self.sr * x_max # 免查询时长阈值
- self.device = device
- self.is_half = is_half
-
- def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None):
- time_step = self.window / self.sr * 1000
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
- if f0_method == "pm":
- f0 = (
- parselmouth.Sound(x, self.sr)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=f0_min,
- pitch_ceiling=f0_max,
- )
- .selected_array["frequency"]
- )
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(
- f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
- )
- elif f0_method == "harvest":
- f0, t = pyworld.harvest(
- x.astype(np.double),
- fs=self.sr,
- f0_ceil=f0_max,
- f0_floor=f0_min,
- frame_period=10,
- )
- f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
- f0 = signal.medfilt(f0, 3)
- f0 *= pow(2, f0_up_key / 12)
- # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
- tf0 = self.sr // self.window # 每秒f0点数
- if inp_f0 is not None:
- delta_t = np.round(
- (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
- ).astype("int16")
- replace_f0 = np.interp(
- list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
- )
- shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0]
- f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
- # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
- f0bak = f0.copy()
- f0_mel = 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
- f0_mel_max - f0_mel_min
- ) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- f0_coarse = np.rint(f0_mel).astype(np.int)
- return f0_coarse, f0bak # 1-0
-
- def vc(
- self,
- model,
- net_g,
- sid,
- audio0,
- pitch,
- pitchf,
- times,
- index,
- big_npy,
- index_rate,
- ): # ,file_index,file_big_npy
- feats = torch.from_numpy(audio0)
- if self.is_half:
- feats = feats.half()
- else:
- feats = feats.float()
- if feats.dim() == 2: # double channels
- feats = feats.mean(-1)
- assert feats.dim() == 1, feats.dim()
- feats = feats.view(1, -1)
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
-
- inputs = {
- "source": feats.to(self.device),
- "padding_mask": padding_mask,
- "output_layer": 9, # layer 9
- }
- t0 = ttime()
- with torch.no_grad():
- logits = model.extract_features(**inputs)
- feats = model.final_proj(logits[0])
-
- if (
- isinstance(index, type(None)) == False
- and isinstance(big_npy, type(None)) == False
- and index_rate != 0
- ):
- npy = feats[0].cpu().numpy()
- if self.is_half:
- npy = npy.astype("float32")
- _, I = index.search(npy, 1)
- npy = big_npy[I.squeeze()]
- if self.is_half:
- npy = npy.astype("float16")
- feats = (
- torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
- + (1 - index_rate) * feats
- )
-
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
- t1 = ttime()
- p_len = audio0.shape[0] // self.window
- if feats.shape[1] < p_len:
- p_len = feats.shape[1]
- if pitch != None and pitchf != None:
- pitch = pitch[:, :p_len]
- pitchf = pitchf[:, :p_len]
- p_len = torch.tensor([p_len], device=self.device).long()
- with torch.no_grad():
- if pitch != None and pitchf != None:
- audio1 = (
- (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768)
- .data.cpu()
- .float()
- .numpy()
- .astype(np.int16)
- )
- else:
- audio1 = (
- (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768)
- .data.cpu()
- .float()
- .numpy()
- .astype(np.int16)
- )
- del feats, p_len, padding_mask
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- t2 = ttime()
- times[0] += t1 - t0
- times[2] += t2 - t1
- return audio1
-
- def pipeline(
- self,
- model,
- net_g,
- sid,
- audio,
- times,
- f0_up_key,
- f0_method,
- file_index,
- file_big_npy,
- index_rate,
- if_f0,
- f0_file=None,
- ):
- if (
- file_big_npy != ""
- and file_index != ""
- and os.path.exists(file_big_npy) == True
- and os.path.exists(file_index) == True
- and index_rate != 0
- ):
- try:
- index = faiss.read_index(file_index)
- big_npy = np.load(file_big_npy)
- except:
- traceback.print_exc()
- index = big_npy = None
- else:
- index = big_npy = None
- print("Feature retrieval library doesn't exist or ratio is 0")
- audio = signal.filtfilt(bh, ah, audio)
- audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
- opt_ts = []
- if audio_pad.shape[0] > self.t_max:
- audio_sum = np.zeros_like(audio)
- for i in range(self.window):
- audio_sum += audio_pad[i : i - self.window]
- for t in range(self.t_center, audio.shape[0], self.t_center):
- opt_ts.append(
- t
- - self.t_query
- + np.where(
- np.abs(audio_sum[t - self.t_query : t + self.t_query])
- == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
- )[0][0]
- )
- s = 0
- audio_opt = []
- t = None
- t1 = ttime()
- audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
- p_len = audio_pad.shape[0] // self.window
- inp_f0 = None
- if hasattr(f0_file, "name") == True:
- try:
- with open(f0_file.name, "r") as f:
- lines = f.read().strip("\n").split("\n")
- inp_f0 = []
- for line in lines:
- inp_f0.append([float(i) for i in line.split(",")])
- inp_f0 = np.array(inp_f0, dtype="float32")
- except:
- traceback.print_exc()
- sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
- pitch, pitchf = None, None
- if if_f0 == 1:
- pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0)
- pitch = pitch[:p_len]
- pitchf = pitchf[:p_len]
- pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
- pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
- t2 = ttime()
- times[1] += t2 - t1
- for t in opt_ts:
- t = t // self.window * self.window
- if if_f0 == 1:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[s : t + self.t_pad2 + self.window],
- pitch[:, s // self.window : (t + self.t_pad2) // self.window],
- pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- else:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[s : t + self.t_pad2 + self.window],
- None,
- None,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- s = t
- if if_f0 == 1:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[t:],
- pitch[:, t // self.window :] if t is not None else pitch,
- pitchf[:, t // self.window :] if t is not None else pitchf,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- else:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[t:],
- None,
- None,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- audio_opt = np.concatenate(audio_opt)
- del pitch, pitchf, sid
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- return audio_opt
diff --git a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/widgets/label_distribution.py b/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/widgets/label_distribution.py
deleted file mode 100644
index 7fd7f39e45e74935f2c0fe634d44a9ea86e2b9a2..0000000000000000000000000000000000000000
--- a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/widgets/label_distribution.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import gradio as gr
-
-from widgets.widget_base import Widget
-from data_measurements.dataset_statistics import DatasetStatisticsCacheClass as dmt_cls
-import utils
-
-logs = utils.prepare_logging(__file__)
-
-
-class LabelDistribution(Widget):
- def __init__(self):
- self.label_dist_plot = gr.Plot(render=False, visible=False)
- self.label_dist_no_label_text = gr.Markdown(
- value="No labels were found in the dataset", render=False, visible=False
- )
- self.label_dist_accordion = gr.Accordion(render=False, label="", open=False)
-
- def render(self):
- with gr.TabItem(label="Label Distribution"):
- gr.Markdown(
- "Use this widget to see how balanced the labels in your dataset are."
- )
- self.label_dist_plot.render()
- self.label_dist_no_label_text.render()
-
- def update(self, dstats: dmt_cls):
- logs.info(f"FIGS labels: {bool(dstats.fig_labels)}")
- if dstats.fig_labels:
- output = {
- self.label_dist_plot: gr.Plot.update(
- value=dstats.fig_labels, visible=True
- ),
- self.label_dist_no_label_text: gr.Markdown.update(visible=False),
- }
- else:
- output = {
- self.label_dist_plot: gr.Plot.update(visible=False),
- self.label_dist_no_label_text: gr.Markdown.update(visible=True),
- }
- return output
-
- @property
- def output_components(self):
- return [self.label_dist_plot, self.label_dist_no_label_text]
-
- def add_events(self, state: gr.State):
- pass
diff --git a/spaces/ICML2022/OFA/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/continuation_eval.py b/spaces/ICML2022/OFA/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/continuation_eval.py
deleted file mode 100644
index 72b92a341dcd1b82035af72b8a6b4edc65783ecc..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/continuation_eval.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-
-from collections import defaultdict
-import numpy as np
-from misc.bleu_utils import sentence_bleu
-import json
-import warnings
-
-
-def get_args():
- import argparse
-
- parser = argparse.ArgumentParser("Tool to calculate Continuation-BLEU2")
- parser.add_argument('--asr-transcript', type=str,
- help='Path to the transcript file.')
- parser.add_argument('--prompts-description', type=str,
- help='Path to the ground-truth continuation')
- parser.add_argument('--manifest', type=str, required=True)
- parser.add_argument('--take-shortest', type=int, default=1000)
-
- args = parser.parse_args()
-
- return args
-
-
-def main():
- # NLTK produces warnings
- warnings.filterwarnings("ignore")
-
- args = get_args()
-
- with open(args.prompts_description, 'r') as fin:
- original_continuations = json.loads(fin.read())
-
- sequence2length = [(k, v[0]) for k, v in original_continuations.items()]
- assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds
-
- sequence2length.sort(key=lambda x: x[1])
- to_take = set(v[0] for v in sequence2length[:args.take_shortest])
-
- with open(args.manifest, 'r') as fin:
- fin.readline()
-
- linenum2file = dict([
- (i, l.split("__")[0]) for (i, l) in enumerate(fin)
- ])
-
- max_files = max(linenum2file.keys())
- continuations = defaultdict(list)
-
- mean_length_after = 0
- n_examples = 0
-
- with open(args.asr_transcript, 'r') as fin:
- for line in fin:
- n_examples += 1
- line = line.split()
- sequence_id = int(line[-1].split('-')[1][:-1])
-
- assert sequence_id <= max_files
-
- sequence_name = linenum2file[sequence_id]
-
- continuations[sequence_name].append(line[:-1])
- mean_length_after += len(line)
-
- mean_length_after /= n_examples
- print(f'Mean length of continuations, in words: {mean_length_after}')
- metric_values = []
-
- mean_ground_truth_words = 0
- n_examples = 0
- n_candidates = 0
-
- for k, candidates in continuations.items():
- if k not in to_take:
- continue
-
- n_examples += 1
-
- ground_truth = original_continuations[k][1].split()
- n_candidates += len(candidates)
- bleu = sentence_bleu(candidates, ground_truth, weights=(
- 0.5, 0.5), no_length_penalty=True, averaging_mode="geometric")
- mean_ground_truth_words += len(ground_truth)
-
- metric_values.append(bleu)
-
- n = len(metric_values)
- print(
- f'Median BLEU over {n} examples: {np.median(metric_values)} +- {np.std(metric_values) / np.sqrt(n)}')
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/model_parallel/models/roberta/model.py b/spaces/ICML2022/OFA/fairseq/fairseq/model_parallel/models/roberta/model.py
deleted file mode 100644
index 77a80ef72057219110b34678a38705549910edd3..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/model_parallel/models/roberta/model.py
+++ /dev/null
@@ -1,225 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-"""
-RoBERTa: A Robustly Optimized BERT Pretraining Approach.
-"""
-
-import logging
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.model_parallel.models.transformer import ModelParallelTransformerEncoder
-from fairseq.models import register_model, register_model_architecture
-from fairseq.models.roberta import (
- roberta_base_architecture,
- roberta_prenorm_architecture,
- RobertaEncoder,
- RobertaModel,
-)
-from fairseq.modules import LayerNorm
-
-
-try:
- from fairseq.model_parallel.megatron.mpu import (
- copy_to_model_parallel_region,
- gather_from_model_parallel_region,
- ColumnParallelLinear,
- VocabParallelEmbedding,
- )
-
- has_megatron_submodule = True
-except (ImportError, ModuleNotFoundError):
- has_megatron_submodule = False
-
-logger = logging.getLogger(__name__)
-
-
-@register_model("model_parallel_roberta")
-class ModelParallelRobertaModel(RobertaModel):
- def __init__(self, args, encoder):
- super().__init__(args, encoder)
-
- self.classification_heads = nn.ModuleDict()
-
- @staticmethod
- def add_args(parser):
- RobertaModel.add_args(parser)
- parser.add_argument(
- "--no-final-layer-norm",
- action="store_true",
- help=(
- "don't add final layernorm (only applicable when "
- "--encoder-normalize-before=True"
- ),
- )
-
- @classmethod
- def build_model(cls, args, task):
- """Build a new model instance."""
-
- # make sure all arguments are present
- base_architecture(args)
-
- task.source_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
- task.target_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
-
- if not hasattr(args, "max_positions"):
- args.max_positions = args.tokens_per_sample
-
- if getattr(args, "untie_weights_roberta", False):
- raise NotImplementedError(
- "--untie-weights-roberta is not supported in model parallel mode"
- )
-
- encoder = ModelParallelRobertaEncoder(args, task.source_dictionary)
- return cls(args, encoder)
-
- def forward(
- self,
- src_tokens,
- features_only=False,
- return_all_hiddens=False,
- classification_head_name=None,
- **kwargs
- ):
- if classification_head_name is not None:
- features_only = True
-
- x, extra = self.encoder(src_tokens, features_only, return_all_hiddens, **kwargs)
-
- if classification_head_name is not None:
- x = self.classification_heads[classification_head_name](x)
- return x, extra
-
- def register_classification_head(
- self, name, num_classes=None, inner_dim=None, **kwargs
- ):
- """Register a classification head."""
- if name in self.classification_heads:
- prev_num_classes = self.classification_heads[name].out_proj.out_features
- prev_inner_dim = self.classification_heads[name].dense.out_features
- if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
- logger.warning(
- 're-registering head "{}" with num_classes {} (prev: {}) '
- "and inner_dim {} (prev: {})".format(
- name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
- )
- )
- self.classification_heads[name] = ModelParallelRobertaClassificationHead(
- self.args.encoder_embed_dim,
- inner_dim or self.args.encoder_embed_dim,
- num_classes,
- self.args.pooler_activation_fn,
- self.args.pooler_dropout,
- )
-
-
-class ModelParallelRobertaLMHead(nn.Module):
- """Head for masked language modeling."""
-
- def __init__(self, embed_dim, output_dim, activation_fn, weight=None):
- super().__init__()
- self.dense = ColumnParallelLinear(embed_dim, embed_dim, gather_output=True)
- self.activation_fn = utils.get_activation_fn(activation_fn)
- self.layer_norm = LayerNorm(embed_dim)
-
- if weight is None:
- weight = nn.Linear(embed_dim, output_dim, bias=False).weight
- self.weight = weight
- self.bias = nn.Parameter(torch.zeros(output_dim))
-
- def forward(self, features, masked_tokens=None, **kwargs):
- # Only project the unmasked tokens while training,
- # saves both memory and computation
- if masked_tokens is not None:
- features = features[masked_tokens, :]
-
- x = self.dense(features)
- x = self.activation_fn(x)
- x = self.layer_norm(x)
-
- x = copy_to_model_parallel_region(x)
- # project back to size of vocabulary with bias
- x = F.linear(x, self.weight)
- x = gather_from_model_parallel_region(x).contiguous()
- x = x + self.bias
- return x
-
-
-class ModelParallelRobertaClassificationHead(nn.Module):
- """Head for sentence-level classification tasks."""
-
- def __init__(
- self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout
- ):
- super().__init__()
- self.dense = ColumnParallelLinear(input_dim, inner_dim, gather_output=True)
- self.activation_fn = utils.get_activation_fn(activation_fn)
- self.dropout = nn.Dropout(p=pooler_dropout)
- self.out_proj = nn.Linear(inner_dim, num_classes)
-
- def forward(self, features, **kwargs):
- x = features[:, 0, :] # take token (equiv. to [CLS])
- x = self.dropout(x)
- x = self.dense(x)
- x = self.activation_fn(x)
- x = self.dropout(x)
- x = self.out_proj(x)
- return x
-
-
-class ModelParallelRobertaEncoder(RobertaEncoder):
- """RoBERTa encoder."""
-
- def __init__(self, args, dictionary):
- super().__init__(args, dictionary)
- assert not self.args.untie_weights_roberta
-
- def build_embedding(self, vocab_size, embedding_dim, padding_idx):
- return VocabParallelEmbedding(vocab_size, embedding_dim, padding_idx)
-
- def build_encoder(self, args, dictionary, embed_tokens):
- return ModelParallelTransformerEncoder(args, dictionary, embed_tokens)
-
- def build_lm_head(self, embed_dim, output_dim, activation_fn, weight):
- return ModelParallelRobertaLMHead(embed_dim, output_dim, activation_fn, weight)
-
-
-@register_model_architecture("model_parallel_roberta", "model_parallel_roberta")
-def base_architecture(args):
- args.no_final_layer_norm = getattr(args, "no_final_layer_norm", False)
- # model parallel RoBERTa defaults to "Pre-LN" formulation
- roberta_prenorm_architecture(args)
-
-
-# earlier versions of model parallel RoBERTa removed the final layer norm
-@register_model_architecture("model_parallel_roberta", "model_parallel_roberta_v1")
-def model_parallel_roberta_v1_architecture(args):
- args.no_final_layer_norm = getattr(args, "no_final_layer_norm", True)
- base_architecture(args)
-
-
-@register_model_architecture(
- "model_parallel_roberta", "model_parallel_roberta_postnorm"
-)
-def model_parallel_roberta_postnorm_architecture(args):
- # the original BERT/RoBERTa uses the "Post-LN" formulation
- roberta_base_architecture(args)
-
-
-@register_model_architecture("model_parallel_roberta", "model_parallel_roberta_base")
-def model_parallel_roberta_base_architecture(args):
- base_architecture(args)
-
-
-@register_model_architecture("model_parallel_roberta", "model_parallel_roberta_large")
-def model_parallel_roberta_large_architecture(args):
- args.encoder_layers = getattr(args, "encoder_layers", 24)
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
- base_architecture(args)
diff --git a/spaces/ICML2022/resefa/models/stylegan_generator.py b/spaces/ICML2022/resefa/models/stylegan_generator.py
deleted file mode 100644
index c0034b34a5b72bfe6b305a9f6ff8d772b391c4f5..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/resefa/models/stylegan_generator.py
+++ /dev/null
@@ -1,999 +0,0 @@
-# python3.7
-"""Contains the implementation of generator described in StyleGAN.
-
-Paper: https://arxiv.org/pdf/1812.04948.pdf
-
-Official TensorFlow implementation: https://github.com/NVlabs/stylegan
-"""
-
-import numpy as np
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torch.cuda.amp import autocast
-
-from .utils.ops import all_gather
-
-__all__ = ['StyleGANGenerator']
-
-# Resolutions allowed.
-_RESOLUTIONS_ALLOWED = [8, 16, 32, 64, 128, 256, 512, 1024]
-
-# Fused-scale options allowed.
-_FUSED_SCALE_ALLOWED = [True, False, 'auto']
-
-# pylint: disable=missing-function-docstring
-
-class StyleGANGenerator(nn.Module):
- """Defines the generator network in StyleGAN.
-
- NOTE: The synthesized images are with `RGB` channel order and pixel range
- [-1, 1].
-
- Settings for the mapping network:
-
- (1) z_dim: Dimension of the input latent space, Z. (default: 512)
- (2) w_dim: Dimension of the output latent space, W. (default: 512)
- (3) repeat_w: Repeat w-code for different layers. (default: True)
- (4) normalize_z: Whether to normalize the z-code. (default: True)
- (5) mapping_layers: Number of layers of the mapping network. (default: 8)
- (6) mapping_fmaps: Number of hidden channels of the mapping network.
- (default: 512)
- (7) mapping_use_wscale: Whether to use weight scaling for the mapping
- network. (default: True)
- (8) mapping_wscale_gain: The factor to control weight scaling for the
- mapping network (default: sqrt(2.0))
- (9) mapping_lr_mul: Learning rate multiplier for the mapping network.
- (default: 0.01)
-
- Settings for conditional generation:
-
- (1) label_dim: Dimension of the additional label for conditional generation.
- In one-hot conditioning case, it is equal to the number of classes. If
- set to 0, conditioning training will be disabled. (default: 0)
- (2) embedding_dim: Dimension of the embedding space, if needed.
- (default: 512)
-
- Settings for the synthesis network:
-
- (1) resolution: The resolution of the output image. (default: -1)
- (2) init_res: The initial resolution to start with convolution. (default: 4)
- (3) image_channels: Number of channels of the output image. (default: 3)
- (4) final_tanh: Whether to use `tanh` to control the final pixel range.
- (default: False)
- (5) fused_scale: The strategy of fusing `upsample` and `conv2d` as one
- operator. `True` means blocks from all resolutions will fuse. `False`
- means blocks from all resolutions will not fuse. `auto` means blocks
- from resolutions higher than (or equal to) `fused_scale_res` will fuse.
- (default: `auto`)
- (6) fused_scale_res: Minimum resolution to fuse `conv2d` and `downsample`
- as one operator. This field only takes effect if `fused_scale` is set
- as `auto`. (default: 128)
- (7) use_wscale: Whether to use weight scaling. (default: True)
- (8) wscale_gain: The factor to control weight scaling. (default: sqrt(2.0))
- (9) lr_mul: Learning rate multiplier for the synthesis network.
- (default: 1.0)
- (10) noise_type: Type of noise added to the convolutional results at each
- layer. (default: `spatial`)
- (11) fmaps_base: Factor to control number of feature maps for each layer.
- (default: 16 << 10)
- (12) fmaps_max: Maximum number of feature maps in each layer. (default: 512)
- (13) filter_kernel: Kernel used for filtering (e.g., downsampling).
- (default: (1, 2, 1))
- (14) eps: A small value to avoid divide overflow. (default: 1e-8)
-
- Runtime settings:
-
- (1) w_moving_decay: Decay factor for updating `w_avg`, which is used for
- training only. Set `None` to disable. (default: None)
- (2) sync_w_avg: Synchronizing the stats of `w_avg` across replicas. If set
- as `True`, the stats will be more accurate, yet the speed maybe a little
- bit slower. (default: False)
- (3) style_mixing_prob: Probability to perform style mixing as a training
- regularization. Set `None` to disable. (default: None)
- (4) trunc_psi: Truncation psi, set `None` to disable. (default: None)
- (5) trunc_layers: Number of layers to perform truncation. (default: None)
- (6) noise_mode: Mode of the layer-wise noise. Support `none`, `random`,
- `const`. (default: `const`)
- (7) enable_amp: Whether to enable automatic mixed precision training.
- (default: False)
- """
-
- def __init__(self,
- # Settings for mapping network.
- z_dim=512,
- w_dim=512,
- repeat_w=True,
- normalize_z=True,
- mapping_layers=8,
- mapping_fmaps=512,
- mapping_use_wscale=True,
- mapping_wscale_gain=np.sqrt(2.0),
- mapping_lr_mul=0.01,
- # Settings for conditional generation.
- label_dim=0,
- embedding_dim=512,
- # Settings for synthesis network.
- resolution=-1,
- init_res=4,
- image_channels=3,
- final_tanh=False,
- fused_scale='auto',
- fused_scale_res=128,
- use_wscale=True,
- wscale_gain=np.sqrt(2.0),
- lr_mul=1.0,
- noise_type='spatial',
- fmaps_base=16 << 10,
- fmaps_max=512,
- filter_kernel=(1, 2, 1),
- eps=1e-8):
- """Initializes with basic settings.
-
- Raises:
- ValueError: If the `resolution` is not supported, or `fused_scale`
- is not supported.
- """
- super().__init__()
-
- if resolution not in _RESOLUTIONS_ALLOWED:
- raise ValueError(f'Invalid resolution: `{resolution}`!\n'
- f'Resolutions allowed: {_RESOLUTIONS_ALLOWED}.')
- if fused_scale not in _FUSED_SCALE_ALLOWED:
- raise ValueError(f'Invalid fused-scale option: `{fused_scale}`!\n'
- f'Options allowed: {_FUSED_SCALE_ALLOWED}.')
-
- self.z_dim = z_dim
- self.w_dim = w_dim
- self.repeat_w = repeat_w
- self.normalize_z = normalize_z
- self.mapping_layers = mapping_layers
- self.mapping_fmaps = mapping_fmaps
- self.mapping_use_wscale = mapping_use_wscale
- self.mapping_wscale_gain = mapping_wscale_gain
- self.mapping_lr_mul = mapping_lr_mul
-
- self.label_dim = label_dim
- self.embedding_dim = embedding_dim
-
- self.resolution = resolution
- self.init_res = init_res
- self.image_channels = image_channels
- self.final_tanh = final_tanh
- self.fused_scale = fused_scale
- self.fused_scale_res = fused_scale_res
- self.use_wscale = use_wscale
- self.wscale_gain = wscale_gain
- self.lr_mul = lr_mul
- self.noise_type = noise_type.lower()
- self.fmaps_base = fmaps_base
- self.fmaps_max = fmaps_max
- self.filter_kernel = filter_kernel
- self.eps = eps
-
- # Dimension of latent space, which is convenient for sampling.
- self.latent_dim = (z_dim,)
-
- # Number of synthesis (convolutional) layers.
- self.num_layers = int(np.log2(resolution // init_res * 2)) * 2
-
- self.mapping = MappingNetwork(input_dim=z_dim,
- output_dim=w_dim,
- num_outputs=self.num_layers,
- repeat_output=repeat_w,
- normalize_input=normalize_z,
- num_layers=mapping_layers,
- hidden_dim=mapping_fmaps,
- use_wscale=mapping_use_wscale,
- wscale_gain=mapping_wscale_gain,
- lr_mul=mapping_lr_mul,
- label_dim=label_dim,
- embedding_dim=embedding_dim,
- eps=eps)
-
- # This is used for truncation trick.
- if self.repeat_w:
- self.register_buffer('w_avg', torch.zeros(w_dim))
- else:
- self.register_buffer('w_avg', torch.zeros(self.num_layers * w_dim))
-
- self.synthesis = SynthesisNetwork(resolution=resolution,
- init_res=init_res,
- w_dim=w_dim,
- image_channels=image_channels,
- final_tanh=final_tanh,
- fused_scale=fused_scale,
- fused_scale_res=fused_scale_res,
- use_wscale=use_wscale,
- wscale_gain=wscale_gain,
- lr_mul=lr_mul,
- noise_type=noise_type,
- fmaps_base=fmaps_base,
- fmaps_max=fmaps_max,
- filter_kernel=filter_kernel,
- eps=eps)
-
- self.pth_to_tf_var_mapping = {'w_avg': 'dlatent_avg'}
- for key, val in self.mapping.pth_to_tf_var_mapping.items():
- self.pth_to_tf_var_mapping[f'mapping.{key}'] = val
- for key, val in self.synthesis.pth_to_tf_var_mapping.items():
- self.pth_to_tf_var_mapping[f'synthesis.{key}'] = val
-
- def set_space_of_latent(self, space_of_latent):
- """Sets the space to which the latent code belong.
-
- See `SynthesisNetwork` for more details.
- """
- self.synthesis.set_space_of_latent(space_of_latent)
-
- def forward(self,
- z,
- label=None,
- lod=None,
- w_moving_decay=None,
- sync_w_avg=False,
- style_mixing_prob=None,
- trunc_psi=None,
- trunc_layers=None,
- noise_mode='const',
- enable_amp=False):
- mapping_results = self.mapping(z, label)
-
- w = mapping_results['w']
- if self.training and w_moving_decay is not None:
- if sync_w_avg:
- batch_w_avg = all_gather(w.detach()).mean(dim=0)
- else:
- batch_w_avg = w.detach().mean(dim=0)
- self.w_avg.copy_(batch_w_avg.lerp(self.w_avg, w_moving_decay))
-
- wp = mapping_results.pop('wp')
- if self.training and style_mixing_prob is not None:
- if np.random.uniform() < style_mixing_prob:
- new_z = torch.randn_like(z)
- new_wp = self.mapping(new_z, label)['wp']
- lod = self.synthesis.lod.item() if lod is None else lod
- current_layers = self.num_layers - int(lod) * 2
- mixing_cutoff = np.random.randint(1, current_layers)
- wp[:, mixing_cutoff:] = new_wp[:, mixing_cutoff:]
-
- if not self.training:
- trunc_psi = 1.0 if trunc_psi is None else trunc_psi
- trunc_layers = 0 if trunc_layers is None else trunc_layers
- if trunc_psi < 1.0 and trunc_layers > 0:
- w_avg = self.w_avg.reshape(1, -1, self.w_dim)[:, :trunc_layers]
- wp[:, :trunc_layers] = w_avg.lerp(
- wp[:, :trunc_layers], trunc_psi)
-
- with autocast(enabled=enable_amp):
- synthesis_results = self.synthesis(wp,
- lod=lod,
- noise_mode=noise_mode)
-
- return {**mapping_results, **synthesis_results}
-
-
-class MappingNetwork(nn.Module):
- """Implements the latent space mapping module.
-
- Basically, this module executes several dense layers in sequence, and the
- label embedding if needed.
- """
-
- def __init__(self,
- input_dim,
- output_dim,
- num_outputs,
- repeat_output,
- normalize_input,
- num_layers,
- hidden_dim,
- use_wscale,
- wscale_gain,
- lr_mul,
- label_dim,
- embedding_dim,
- eps):
- super().__init__()
-
- self.input_dim = input_dim
- self.output_dim = output_dim
- self.num_outputs = num_outputs
- self.repeat_output = repeat_output
- self.normalize_input = normalize_input
- self.num_layers = num_layers
- self.hidden_dim = hidden_dim
- self.use_wscale = use_wscale
- self.wscale_gain = wscale_gain
- self.lr_mul = lr_mul
- self.label_dim = label_dim
- self.embedding_dim = embedding_dim
- self.eps = eps
-
- self.pth_to_tf_var_mapping = {}
-
- if normalize_input:
- self.norm = PixelNormLayer(dim=1, eps=eps)
-
- if self.label_dim > 0:
- input_dim = input_dim + embedding_dim
- self.embedding = nn.Parameter(
- torch.randn(label_dim, embedding_dim))
- self.pth_to_tf_var_mapping['embedding'] = 'LabelConcat/weight'
-
- if num_outputs is not None and not repeat_output:
- output_dim = output_dim * num_outputs
- for i in range(num_layers):
- in_channels = (input_dim if i == 0 else hidden_dim)
- out_channels = (output_dim if i == (num_layers - 1) else hidden_dim)
- self.add_module(f'dense{i}',
- DenseLayer(in_channels=in_channels,
- out_channels=out_channels,
- add_bias=True,
- use_wscale=use_wscale,
- wscale_gain=wscale_gain,
- lr_mul=lr_mul,
- activation_type='lrelu'))
- self.pth_to_tf_var_mapping[f'dense{i}.weight'] = f'Dense{i}/weight'
- self.pth_to_tf_var_mapping[f'dense{i}.bias'] = f'Dense{i}/bias'
-
- def forward(self, z, label=None):
- if z.ndim != 2 or z.shape[1] != self.input_dim:
- raise ValueError(f'Input latent code should be with shape '
- f'[batch_size, input_dim], where '
- f'`input_dim` equals to {self.input_dim}!\n'
- f'But `{z.shape}` is received!')
-
- if self.label_dim > 0:
- if label is None:
- raise ValueError(f'Model requires an additional label '
- f'(with dimension {self.label_dim}) as input, '
- f'but no label is received!')
- if label.ndim != 2 or label.shape != (z.shape[0], self.label_dim):
- raise ValueError(f'Input label should be with shape '
- f'[batch_size, label_dim], where '
- f'`batch_size` equals to that of '
- f'latent codes ({z.shape[0]}) and '
- f'`label_dim` equals to {self.label_dim}!\n'
- f'But `{label.shape}` is received!')
- label = label.to(dtype=torch.float32)
- embedding = torch.matmul(label, self.embedding)
- z = torch.cat((z, embedding), dim=1)
-
- if self.normalize_input:
- w = self.norm(z)
- else:
- w = z
-
- for i in range(self.num_layers):
- w = getattr(self, f'dense{i}')(w)
-
- wp = None
- if self.num_outputs is not None:
- if self.repeat_output:
- wp = w.unsqueeze(1).repeat((1, self.num_outputs, 1))
- else:
- wp = w.reshape(-1, self.num_outputs, self.output_dim)
-
- results = {
- 'z': z,
- 'label': label,
- 'w': w,
- 'wp': wp,
- }
- if self.label_dim > 0:
- results['embedding'] = embedding
- return results
-
-
-class SynthesisNetwork(nn.Module):
- """Implements the image synthesis module.
-
- Basically, this module executes several convolutional layers in sequence.
- """
-
- def __init__(self,
- resolution,
- init_res,
- w_dim,
- image_channels,
- final_tanh,
- fused_scale,
- fused_scale_res,
- use_wscale,
- wscale_gain,
- lr_mul,
- noise_type,
- fmaps_base,
- fmaps_max,
- filter_kernel,
- eps):
- super().__init__()
-
- self.init_res = init_res
- self.init_res_log2 = int(np.log2(init_res))
- self.resolution = resolution
- self.final_res_log2 = int(np.log2(resolution))
- self.w_dim = w_dim
- self.image_channels = image_channels
- self.final_tanh = final_tanh
- self.fused_scale = fused_scale
- self.fused_scale_res = fused_scale_res
- self.use_wscale = use_wscale
- self.wscale_gain = wscale_gain
- self.lr_mul = lr_mul
- self.noise_type = noise_type.lower()
- self.fmaps_base = fmaps_base
- self.fmaps_max = fmaps_max
- self.eps = eps
-
- self.num_layers = (self.final_res_log2 - self.init_res_log2 + 1) * 2
-
- # Level-of-details (used for progressive training).
- self.register_buffer('lod', torch.zeros(()))
- self.pth_to_tf_var_mapping = {'lod': 'lod'}
-
- for res_log2 in range(self.init_res_log2, self.final_res_log2 + 1):
- res = 2 ** res_log2
- in_channels = self.get_nf(res // 2)
- out_channels = self.get_nf(res)
- block_idx = res_log2 - self.init_res_log2
-
- # First layer (kernel 3x3) with upsampling
- layer_name = f'layer{2 * block_idx}'
- if res == self.init_res:
- self.add_module(layer_name,
- ModulateConvLayer(in_channels=0,
- out_channels=out_channels,
- resolution=res,
- w_dim=w_dim,
- kernel_size=None,
- add_bias=True,
- scale_factor=None,
- fused_scale=None,
- filter_kernel=None,
- use_wscale=use_wscale,
- wscale_gain=wscale_gain,
- lr_mul=lr_mul,
- noise_type=noise_type,
- activation_type='lrelu',
- use_style=True,
- eps=eps))
- tf_layer_name = 'Const'
- self.pth_to_tf_var_mapping[f'{layer_name}.const'] = (
- f'{res}x{res}/{tf_layer_name}/const')
- else:
- self.add_module(
- layer_name,
- ModulateConvLayer(in_channels=in_channels,
- out_channels=out_channels,
- resolution=res,
- w_dim=w_dim,
- kernel_size=3,
- add_bias=True,
- scale_factor=2,
- fused_scale=(res >= fused_scale_res
- if fused_scale == 'auto'
- else fused_scale),
- filter_kernel=filter_kernel,
- use_wscale=use_wscale,
- wscale_gain=wscale_gain,
- lr_mul=lr_mul,
- noise_type=noise_type,
- activation_type='lrelu',
- use_style=True,
- eps=eps))
- tf_layer_name = 'Conv0_up'
- self.pth_to_tf_var_mapping[f'{layer_name}.weight'] = (
- f'{res}x{res}/{tf_layer_name}/weight')
- self.pth_to_tf_var_mapping[f'{layer_name}.bias'] = (
- f'{res}x{res}/{tf_layer_name}/bias')
- self.pth_to_tf_var_mapping[f'{layer_name}.style.weight'] = (
- f'{res}x{res}/{tf_layer_name}/StyleMod/weight')
- self.pth_to_tf_var_mapping[f'{layer_name}.style.bias'] = (
- f'{res}x{res}/{tf_layer_name}/StyleMod/bias')
- self.pth_to_tf_var_mapping[f'{layer_name}.noise_strength'] = (
- f'{res}x{res}/{tf_layer_name}/Noise/weight')
- self.pth_to_tf_var_mapping[f'{layer_name}.noise'] = (
- f'noise{2 * block_idx}')
-
- # Second layer (kernel 3x3) without upsampling.
- layer_name = f'layer{2 * block_idx + 1}'
- self.add_module(layer_name,
- ModulateConvLayer(in_channels=out_channels,
- out_channels=out_channels,
- resolution=res,
- w_dim=w_dim,
- kernel_size=3,
- add_bias=True,
- scale_factor=1,
- fused_scale=False,
- filter_kernel=None,
- use_wscale=use_wscale,
- wscale_gain=wscale_gain,
- lr_mul=lr_mul,
- noise_type=noise_type,
- activation_type='lrelu',
- use_style=True,
- eps=eps))
- tf_layer_name = 'Conv' if res == self.init_res else 'Conv1'
- self.pth_to_tf_var_mapping[f'{layer_name}.weight'] = (
- f'{res}x{res}/{tf_layer_name}/weight')
- self.pth_to_tf_var_mapping[f'{layer_name}.bias'] = (
- f'{res}x{res}/{tf_layer_name}/bias')
- self.pth_to_tf_var_mapping[f'{layer_name}.style.weight'] = (
- f'{res}x{res}/{tf_layer_name}/StyleMod/weight')
- self.pth_to_tf_var_mapping[f'{layer_name}.style.bias'] = (
- f'{res}x{res}/{tf_layer_name}/StyleMod/bias')
- self.pth_to_tf_var_mapping[f'{layer_name}.noise_strength'] = (
- f'{res}x{res}/{tf_layer_name}/Noise/weight')
- self.pth_to_tf_var_mapping[f'{layer_name}.noise'] = (
- f'noise{2 * block_idx + 1}')
-
- # Output convolution layer for each resolution.
- self.add_module(f'output{block_idx}',
- ModulateConvLayer(in_channels=out_channels,
- out_channels=image_channels,
- resolution=res,
- w_dim=w_dim,
- kernel_size=1,
- add_bias=True,
- scale_factor=1,
- fused_scale=False,
- filter_kernel=None,
- use_wscale=use_wscale,
- wscale_gain=1.0,
- lr_mul=lr_mul,
- noise_type='none',
- activation_type='linear',
- use_style=False,
- eps=eps))
- self.pth_to_tf_var_mapping[f'output{block_idx}.weight'] = (
- f'ToRGB_lod{self.final_res_log2 - res_log2}/weight')
- self.pth_to_tf_var_mapping[f'output{block_idx}.bias'] = (
- f'ToRGB_lod{self.final_res_log2 - res_log2}/bias')
-
- def get_nf(self, res):
- """Gets number of feature maps according to the given resolution."""
- return min(self.fmaps_base // res, self.fmaps_max)
-
- def set_space_of_latent(self, space_of_latent):
- """Sets the space to which the latent code belong.
-
- This function is particularly used for choosing how to inject the latent
- code into the convolutional layers. The original generator will take a
- W-Space code and apply it for style modulation after an affine
- transformation. But, sometimes, it may need to directly feed an already
- affine-transformed code into the convolutional layer, e.g., when
- training an encoder for GAN inversion. We term the transformed space as
- Style Space (or Y-Space). This function is designed to tell the
- convolutional layers how to use the input code.
-
- Args:
- space_of_latent: The space to which the latent code belong. Case
- insensitive. Support `W` and `Y`.
- """
- space_of_latent = space_of_latent.upper()
- for module in self.modules():
- if isinstance(module, ModulateConvLayer) and module.use_style:
- setattr(module, 'space_of_latent', space_of_latent)
-
- def forward(self, wp, lod=None, noise_mode='const'):
- lod = self.lod.item() if lod is None else lod
- if lod + self.init_res_log2 > self.final_res_log2:
- raise ValueError(f'Maximum level-of-details (lod) is '
- f'{self.final_res_log2 - self.init_res_log2}, '
- f'but `{lod}` is received!')
-
- results = {'wp': wp}
- x = None
- for res_log2 in range(self.init_res_log2, self.final_res_log2 + 1):
- current_lod = self.final_res_log2 - res_log2
- block_idx = res_log2 - self.init_res_log2
- if lod < current_lod + 1:
- layer = getattr(self, f'layer{2 * block_idx}')
- x, style = layer(x, wp[:, 2 * block_idx], noise_mode)
- results[f'style{2 * block_idx}'] = style
- layer = getattr(self, f'layer{2 * block_idx + 1}')
- x, style = layer(x, wp[:, 2 * block_idx + 1], noise_mode)
- results[f'style{2 * block_idx + 1}'] = style
- if current_lod - 1 < lod <= current_lod:
- image = getattr(self, f'output{block_idx}')(x)
- elif current_lod < lod < current_lod + 1:
- alpha = np.ceil(lod) - lod
- temp = getattr(self, f'output{block_idx}')(x)
- image = F.interpolate(image, scale_factor=2, mode='nearest')
- image = temp * alpha + image * (1 - alpha)
- elif lod >= current_lod + 1:
- image = F.interpolate(image, scale_factor=2, mode='nearest')
-
- if self.final_tanh:
- image = torch.tanh(image)
- results['image'] = image
- return results
-
-
-class PixelNormLayer(nn.Module):
- """Implements pixel-wise feature vector normalization layer."""
-
- def __init__(self, dim, eps):
- super().__init__()
- self.dim = dim
- self.eps = eps
-
- def extra_repr(self):
- return f'dim={self.dim}, epsilon={self.eps}'
-
- def forward(self, x):
- scale = (x.square().mean(dim=self.dim, keepdim=True) + self.eps).rsqrt()
- return x * scale
-
-
-class Blur(torch.autograd.Function):
- """Defines blur operation with customized gradient computation."""
-
- @staticmethod
- def forward(ctx, x, kernel):
- assert kernel.shape[2] == 3 and kernel.shape[3] == 3
- ctx.save_for_backward(kernel)
- y = F.conv2d(input=x,
- weight=kernel,
- bias=None,
- stride=1,
- padding=1,
- groups=x.shape[1])
- return y
-
- @staticmethod
- def backward(ctx, dy):
- kernel, = ctx.saved_tensors
- dx = F.conv2d(input=dy,
- weight=kernel.flip((2, 3)),
- bias=None,
- stride=1,
- padding=1,
- groups=dy.shape[1])
- return dx, None, None
-
-
-class ModulateConvLayer(nn.Module):
- """Implements the convolutional layer with style modulation."""
-
- def __init__(self,
- in_channels,
- out_channels,
- resolution,
- w_dim,
- kernel_size,
- add_bias,
- scale_factor,
- fused_scale,
- filter_kernel,
- use_wscale,
- wscale_gain,
- lr_mul,
- noise_type,
- activation_type,
- use_style,
- eps):
- """Initializes with layer settings.
-
- Args:
- in_channels: Number of channels of the input tensor.
- out_channels: Number of channels of the output tensor.
- resolution: Resolution of the output tensor.
- w_dim: Dimension of W space for style modulation.
- kernel_size: Size of the convolutional kernels.
- add_bias: Whether to add bias onto the convolutional result.
- scale_factor: Scale factor for upsampling.
- fused_scale: Whether to fuse `upsample` and `conv2d` as one
- operator, using transpose convolution.
- filter_kernel: Kernel used for filtering.
- use_wscale: Whether to use weight scaling.
- wscale_gain: Gain factor for weight scaling.
- lr_mul: Learning multiplier for both weight and bias.
- noise_type: Type of noise added to the feature map after the
- convolution (if needed). Support `none`, `spatial` and
- `channel`.
- activation_type: Type of activation.
- use_style: Whether to apply style modulation.
- eps: A small value to avoid divide overflow.
- """
- super().__init__()
-
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.resolution = resolution
- self.w_dim = w_dim
- self.kernel_size = kernel_size
- self.add_bias = add_bias
- self.scale_factor = scale_factor
- self.fused_scale = fused_scale
- self.filter_kernel = filter_kernel
- self.use_wscale = use_wscale
- self.wscale_gain = wscale_gain
- self.lr_mul = lr_mul
- self.noise_type = noise_type.lower()
- self.activation_type = activation_type
- self.use_style = use_style
- self.eps = eps
-
- # Set up noise.
- if self.noise_type == 'none':
- pass
- elif self.noise_type == 'spatial':
- self.register_buffer(
- 'noise', torch.randn(1, 1, resolution, resolution))
- self.noise_strength = nn.Parameter(
- torch.zeros(1, out_channels, 1, 1))
- elif self.noise_type == 'channel':
- self.register_buffer(
- 'noise', torch.randn(1, out_channels, 1, 1))
- self.noise_strength = nn.Parameter(
- torch.zeros(1, 1, resolution, resolution))
- else:
- raise NotImplementedError(f'Not implemented noise type: '
- f'`{noise_type}`!')
-
- # Set up bias.
- if add_bias:
- self.bias = nn.Parameter(torch.zeros(out_channels))
- self.bscale = lr_mul
- else:
- self.bias = None
-
- # Set up activation.
- assert activation_type in ['linear', 'relu', 'lrelu']
-
- # Set up style.
- if use_style:
- self.space_of_latent = 'W'
- self.style = DenseLayer(in_channels=w_dim,
- out_channels=out_channels * 2,
- add_bias=True,
- use_wscale=use_wscale,
- wscale_gain=1.0,
- lr_mul=1.0,
- activation_type='linear')
-
- if in_channels == 0: # First layer.
- self.const = nn.Parameter(
- torch.ones(1, out_channels, resolution, resolution))
- return
-
- # Set up weight.
- weight_shape = (out_channels, in_channels, kernel_size, kernel_size)
- fan_in = kernel_size * kernel_size * in_channels
- wscale = wscale_gain / np.sqrt(fan_in)
- if use_wscale:
- self.weight = nn.Parameter(torch.randn(*weight_shape) / lr_mul)
- self.wscale = wscale * lr_mul
- else:
- self.weight = nn.Parameter(
- torch.randn(*weight_shape) * wscale / lr_mul)
- self.wscale = lr_mul
-
- # Set up upsampling filter (if needed).
- if scale_factor > 1:
- assert filter_kernel is not None
- kernel = np.array(filter_kernel, dtype=np.float32).reshape(1, -1)
- kernel = kernel.T.dot(kernel)
- kernel = kernel / np.sum(kernel)
- kernel = kernel[np.newaxis, np.newaxis]
- self.register_buffer('filter', torch.from_numpy(kernel))
-
- if scale_factor > 1 and fused_scale: # use transpose convolution.
- self.stride = scale_factor
- else:
- self.stride = 1
- self.padding = kernel_size // 2
-
- def extra_repr(self):
- return (f'in_ch={self.in_channels}, '
- f'out_ch={self.out_channels}, '
- f'ksize={self.kernel_size}, '
- f'wscale_gain={self.wscale_gain:.3f}, '
- f'bias={self.add_bias}, '
- f'lr_mul={self.lr_mul:.3f}, '
- f'upsample={self.scale_factor}, '
- f'fused_scale={self.fused_scale}, '
- f'upsample_filter={self.filter_kernel}, '
- f'noise_type={self.noise_type}, '
- f'act={self.activation_type}, '
- f'use_style={self.use_style}')
-
- def forward_style(self, w):
- """Gets style code from the given input.
-
- More specifically, if the input is from W-Space, it will be projected by
- an affine transformation. If it is from the Style Space (Y-Space), no
- operation is required.
-
- NOTE: For codes from Y-Space, we use slicing to make sure the dimension
- is correct, in case that the code is padded before fed into this layer.
- """
- space_of_latent = self.space_of_latent.upper()
- if space_of_latent == 'W':
- if w.ndim != 2 or w.shape[1] != self.w_dim:
- raise ValueError(f'The input tensor should be with shape '
- f'[batch_size, w_dim], where '
- f'`w_dim` equals to {self.w_dim}!\n'
- f'But `{w.shape}` is received!')
- style = self.style(w)
- elif space_of_latent == 'Y':
- if w.ndim != 2 or w.shape[1] < self.out_channels * 2:
- raise ValueError(f'The input tensor should be with shape '
- f'[batch_size, y_dim], where '
- f'`y_dim` equals to {self.out_channels * 2}!\n'
- f'But `{w.shape}` is received!')
- style = w[:, :self.out_channels * 2]
- else:
- raise NotImplementedError(f'Not implemented `space_of_latent`: '
- f'`{space_of_latent}`!')
- return style
-
- def forward(self, x, w=None, noise_mode='const'):
- if self.in_channels == 0:
- assert x is None
- x = self.const.repeat(w.shape[0], 1, 1, 1)
- else:
- weight = self.weight
- if self.wscale != 1.0:
- weight = weight * self.wscale
-
- if self.scale_factor > 1 and self.fused_scale:
- weight = F.pad(weight, (1, 1, 1, 1, 0, 0, 0, 0), 'constant', 0)
- weight = (weight[:, :, 1:, 1:] + weight[:, :, :-1, 1:] +
- weight[:, :, 1:, :-1] + weight[:, :, :-1, :-1])
- x = F.conv_transpose2d(x,
- weight=weight.transpose(0, 1),
- bias=None,
- stride=self.stride,
- padding=self.padding)
- else:
- if self.scale_factor > 1:
- up = self.scale_factor
- x = F.interpolate(x, scale_factor=up, mode='nearest')
- x = F.conv2d(x,
- weight=weight,
- bias=None,
- stride=self.stride,
- padding=self.padding)
-
- if self.scale_factor > 1:
- # Disable `autocast` for customized autograd function.
- # Please check reference:
- # https://pytorch.org/docs/stable/notes/amp_examples.html#autocast-and-custom-autograd-functions
- with autocast(enabled=False):
- f = self.filter.repeat(self.out_channels, 1, 1, 1)
- x = Blur.apply(x.float(), f) # Always use FP32.
-
- # Prepare noise.
- noise_mode = noise_mode.lower()
- if self.noise_type != 'none' and noise_mode != 'none':
- if noise_mode == 'random':
- noise = torch.randn(
- (x.shape[0], *self.noise.shape[1:]), device=x.device)
- elif noise_mode == 'const':
- noise = self.noise
- else:
- raise ValueError(f'Unknown noise mode `{noise_mode}`!')
- x = x + noise * self.noise_strength
-
- if self.bias is not None:
- bias = self.bias
- if self.bscale != 1.0:
- bias = bias * self.bscale
- x = x + bias.reshape(1, self.out_channels, 1, 1)
-
- if self.activation_type == 'linear':
- pass
- elif self.activation_type == 'relu':
- x = F.relu(x, inplace=True)
- elif self.activation_type == 'lrelu':
- x = F.leaky_relu(x, negative_slope=0.2, inplace=True)
- else:
- raise NotImplementedError(f'Not implemented activation type '
- f'`{self.activation_type}`!')
-
- if not self.use_style:
- return x
-
- # Instance normalization.
- x = x - x.mean(dim=(2, 3), keepdim=True)
- scale = (x.square().mean(dim=(2, 3), keepdim=True) + self.eps).rsqrt()
- x = x * scale
- # Style modulation.
- style = self.forward_style(w)
- style_split = style.unsqueeze(2).unsqueeze(3).chunk(2, dim=1)
- x = x * (style_split[0] + 1) + style_split[1]
-
- return x, style
-
-
-class DenseLayer(nn.Module):
- """Implements the dense layer."""
-
- def __init__(self,
- in_channels,
- out_channels,
- add_bias,
- use_wscale,
- wscale_gain,
- lr_mul,
- activation_type):
- """Initializes with layer settings.
-
- Args:
- in_channels: Number of channels of the input tensor.
- out_channels: Number of channels of the output tensor.
- add_bias: Whether to add bias onto the fully-connected result.
- use_wscale: Whether to use weight scaling.
- wscale_gain: Gain factor for weight scaling.
- lr_mul: Learning multiplier for both weight and bias.
- activation_type: Type of activation.
- """
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.add_bias = add_bias
- self.use_wscale = use_wscale
- self.wscale_gain = wscale_gain
- self.lr_mul = lr_mul
- self.activation_type = activation_type
-
- weight_shape = (out_channels, in_channels)
- wscale = wscale_gain / np.sqrt(in_channels)
- if use_wscale:
- self.weight = nn.Parameter(torch.randn(*weight_shape) / lr_mul)
- self.wscale = wscale * lr_mul
- else:
- self.weight = nn.Parameter(
- torch.randn(*weight_shape) * wscale / lr_mul)
- self.wscale = lr_mul
-
- if add_bias:
- self.bias = nn.Parameter(torch.zeros(out_channels))
- self.bscale = lr_mul
- else:
- self.bias = None
-
- assert activation_type in ['linear', 'relu', 'lrelu']
-
- def extra_repr(self):
- return (f'in_ch={self.in_channels}, '
- f'out_ch={self.out_channels}, '
- f'wscale_gain={self.wscale_gain:.3f}, '
- f'bias={self.add_bias}, '
- f'lr_mul={self.lr_mul:.3f}, '
- f'act={self.activation_type}')
-
- def forward(self, x):
- if x.ndim != 2:
- x = x.flatten(start_dim=1)
-
- weight = self.weight
- if self.wscale != 1.0:
- weight = weight * self.wscale
- bias = None
- if self.bias is not None:
- bias = self.bias
- if self.bscale != 1.0:
- bias = bias * self.bscale
-
- x = F.linear(x, weight=weight, bias=bias)
-
- if self.activation_type == 'linear':
- pass
- elif self.activation_type == 'relu':
- x = F.relu(x, inplace=True)
- elif self.activation_type == 'lrelu':
- x = F.leaky_relu(x, negative_slope=0.2, inplace=True)
- else:
- raise NotImplementedError(f'Not implemented activation type '
- f'`{self.activation_type}`!')
-
- return x
-
-# pylint: enable=missing-function-docstring
diff --git a/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/models/GroundingDINO/csrc/vision.cpp b/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/models/GroundingDINO/csrc/vision.cpp
deleted file mode 100644
index c1f2c50c82909bbd5492c163d634af77a3ba1781..0000000000000000000000000000000000000000
--- a/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/models/GroundingDINO/csrc/vision.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-
-#include "MsDeformAttn/ms_deform_attn.h"
-
-namespace groundingdino {
-
-#ifdef WITH_CUDA
-extern int get_cudart_version();
-#endif
-
-std::string get_cuda_version() {
-#ifdef WITH_CUDA
- std::ostringstream oss;
-
- // copied from
- // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231
- auto printCudaStyleVersion = [&](int v) {
- oss << (v / 1000) << "." << (v / 10 % 100);
- if (v % 10 != 0) {
- oss << "." << (v % 10);
- }
- };
- printCudaStyleVersion(get_cudart_version());
- return oss.str();
-#else
- return std::string("not available");
-#endif
-}
-
-// similar to
-// https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp
-std::string get_compiler_version() {
- std::ostringstream ss;
-#if defined(__GNUC__)
-#ifndef __clang__
- { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; }
-#endif
-#endif
-
-#if defined(__clang_major__)
- {
- ss << "clang " << __clang_major__ << "." << __clang_minor__ << "."
- << __clang_patchlevel__;
- }
-#endif
-
-#if defined(_MSC_VER)
- { ss << "MSVC " << _MSC_FULL_VER; }
-#endif
- return ss.str();
-}
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
- m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward");
- m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward");
-}
-
-} // namespace groundingdino
\ No newline at end of file
diff --git a/spaces/IDEA-Research/Grounded-SAM/segment_anything/segment_anything/__init__.py b/spaces/IDEA-Research/Grounded-SAM/segment_anything/segment_anything/__init__.py
deleted file mode 100644
index 34383d83f5e76bc801f31b20e5651e383be348b6..0000000000000000000000000000000000000000
--- a/spaces/IDEA-Research/Grounded-SAM/segment_anything/segment_anything/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from .build_sam import (
- build_sam,
- build_sam_vit_h,
- build_sam_vit_l,
- build_sam_vit_b,
- sam_model_registry,
-)
-from .predictor import SamPredictor
-from .automatic_mask_generator import SamAutomaticMaskGenerator
diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/data/prefetch_dataloader.py b/spaces/Iceclear/StableSR/StableSR/basicsr/data/prefetch_dataloader.py
deleted file mode 100644
index 332abd32fcb004e6892d12dc69848a4454e3c503..0000000000000000000000000000000000000000
--- a/spaces/Iceclear/StableSR/StableSR/basicsr/data/prefetch_dataloader.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import queue as Queue
-import threading
-import torch
-from torch.utils.data import DataLoader
-
-
-class PrefetchGenerator(threading.Thread):
- """A general prefetch generator.
-
- Reference: https://stackoverflow.com/questions/7323664/python-generator-pre-fetch
-
- Args:
- generator: Python generator.
- num_prefetch_queue (int): Number of prefetch queue.
- """
-
- def __init__(self, generator, num_prefetch_queue):
- threading.Thread.__init__(self)
- self.queue = Queue.Queue(num_prefetch_queue)
- self.generator = generator
- self.daemon = True
- self.start()
-
- def run(self):
- for item in self.generator:
- self.queue.put(item)
- self.queue.put(None)
-
- def __next__(self):
- next_item = self.queue.get()
- if next_item is None:
- raise StopIteration
- return next_item
-
- def __iter__(self):
- return self
-
-
-class PrefetchDataLoader(DataLoader):
- """Prefetch version of dataloader.
-
- Reference: https://github.com/IgorSusmelj/pytorch-styleguide/issues/5#
-
- TODO:
- Need to test on single gpu and ddp (multi-gpu). There is a known issue in
- ddp.
-
- Args:
- num_prefetch_queue (int): Number of prefetch queue.
- kwargs (dict): Other arguments for dataloader.
- """
-
- def __init__(self, num_prefetch_queue, **kwargs):
- self.num_prefetch_queue = num_prefetch_queue
- super(PrefetchDataLoader, self).__init__(**kwargs)
-
- def __iter__(self):
- return PrefetchGenerator(super().__iter__(), self.num_prefetch_queue)
-
-
-class CPUPrefetcher():
- """CPU prefetcher.
-
- Args:
- loader: Dataloader.
- """
-
- def __init__(self, loader):
- self.ori_loader = loader
- self.loader = iter(loader)
-
- def next(self):
- try:
- return next(self.loader)
- except StopIteration:
- return None
-
- def reset(self):
- self.loader = iter(self.ori_loader)
-
-
-class CUDAPrefetcher():
- """CUDA prefetcher.
-
- Reference: https://github.com/NVIDIA/apex/issues/304#
-
- It may consume more GPU memory.
-
- Args:
- loader: Dataloader.
- opt (dict): Options.
- """
-
- def __init__(self, loader, opt):
- self.ori_loader = loader
- self.loader = iter(loader)
- self.opt = opt
- self.stream = torch.cuda.Stream()
- self.device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu')
- self.preload()
-
- def preload(self):
- try:
- self.batch = next(self.loader) # self.batch is a dict
- except StopIteration:
- self.batch = None
- return None
- # put tensors to gpu
- with torch.cuda.stream(self.stream):
- for k, v in self.batch.items():
- if torch.is_tensor(v):
- self.batch[k] = self.batch[k].to(device=self.device, non_blocking=True)
-
- def next(self):
- torch.cuda.current_stream().wait_stream(self.stream)
- batch = self.batch
- self.preload()
- return batch
-
- def reset(self):
- self.loader = iter(self.ori_loader)
- self.preload()
diff --git a/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/text/__init__.py b/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/text/__init__.py
deleted file mode 100644
index 11e5586c347c3071a9d1aca0425d112f45402e85..0000000000000000000000000000000000000000
--- a/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/text/__init__.py
+++ /dev/null
@@ -1,60 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-from text.symbols import symbols
-
-
-# Mappings from symbol to numeric ID and vice versa:
-_symbol_to_id = {s: i for i, s in enumerate(symbols)}
-_id_to_symbol = {i: s for i, s in enumerate(symbols)}
-
-
-def text_to_sequence(text, symbols, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- sequence = []
- symbol_to_id = {s: i for i, s in enumerate(symbols)}
- clean_text = _clean_text(text, cleaner_names)
- print(clean_text)
- print(f" length:{len(clean_text)}")
- for symbol in clean_text:
- if symbol not in symbol_to_id.keys():
- continue
- symbol_id = symbol_to_id[symbol]
- sequence += [symbol_id]
- print(f" length:{len(sequence)}")
- return sequence
-
-
-def cleaned_text_to_sequence(cleaned_text, symbols):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- symbol_to_id = {s: i for i, s in enumerate(symbols)}
- sequence = [symbol_to_id[symbol] for symbol in cleaned_text if symbol in symbol_to_id.keys()]
- return sequence
-
-
-def sequence_to_text(sequence):
- '''Converts a sequence of IDs back to a string'''
- result = ''
- for symbol_id in sequence:
- s = _id_to_symbol[symbol_id]
- result += s
- return result
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/IlyaGusev/saiga_mistral_7b_gguf/app.py b/spaces/IlyaGusev/saiga_mistral_7b_gguf/app.py
deleted file mode 100644
index 25d5f5ed7101a119fd1dc034ff2acb2f81cc3e12..0000000000000000000000000000000000000000
--- a/spaces/IlyaGusev/saiga_mistral_7b_gguf/app.py
+++ /dev/null
@@ -1,210 +0,0 @@
-import gradio as gr
-
-import copy
-import random
-import os
-import requests
-import time
-import sys
-
-from huggingface_hub import snapshot_download
-from llama_cpp import Llama
-
-
-SYSTEM_PROMPT = "Ты — Сайга, русскоязычный автоматический ассистент. Ты разговариваешь с людьми и помогаешь им."
-SYSTEM_TOKEN = 1587
-USER_TOKEN = 2188
-BOT_TOKEN = 12435
-LINEBREAK_TOKEN = 13
-
-
-ROLE_TOKENS = {
- "user": USER_TOKEN,
- "bot": BOT_TOKEN,
- "system": SYSTEM_TOKEN
-}
-
-
-def get_message_tokens(model, role, content):
- message_tokens = model.tokenize(content.encode("utf-8"))
- message_tokens.insert(1, ROLE_TOKENS[role])
- message_tokens.insert(2, LINEBREAK_TOKEN)
- message_tokens.append(model.token_eos())
- return message_tokens
-
-
-def get_system_tokens(model):
- system_message = {"role": "system", "content": SYSTEM_PROMPT}
- return get_message_tokens(model, **system_message)
-
-
-repo_name = "IlyaGusev/saiga_mistral_7b_gguf"
-model_name = "model-q4_K.gguf"
-
-snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_name)
-
-model = Llama(
- model_path=model_name,
- n_ctx=2000,
- n_parts=1,
-)
-
-max_new_tokens = 1500
-
-def user(message, history):
- new_history = history + [[message, None]]
- return "", new_history
-
-
-def bot(
- history,
- system_prompt,
- top_p,
- top_k,
- temp
-):
- tokens = get_system_tokens(model)[:]
- tokens.append(LINEBREAK_TOKEN)
-
- for user_message, bot_message in history[:-1]:
- message_tokens = get_message_tokens(model=model, role="user", content=user_message)
- tokens.extend(message_tokens)
- if bot_message:
- message_tokens = get_message_tokens(model=model, role="bot", content=bot_message)
- tokens.extend(message_tokens)
-
- last_user_message = history[-1][0]
- message_tokens = get_message_tokens(model=model, role="user", content=last_user_message)
- tokens.extend(message_tokens)
-
- role_tokens = [model.token_bos(), BOT_TOKEN, LINEBREAK_TOKEN]
- tokens.extend(role_tokens)
- generator = model.generate(
- tokens,
- top_k=top_k,
- top_p=top_p,
- temp=temp
- )
-
- partial_text = ""
- for i, token in enumerate(generator):
- if token == model.token_eos() or (max_new_tokens is not None and i >= max_new_tokens):
- break
- partial_text += model.detokenize([token]).decode("utf-8", "ignore")
- history[-1][1] = partial_text
- yield history
-
-
-with gr.Blocks(
- theme=gr.themes.Soft()
-) as demo:
- favicon = '
'
- gr.Markdown(
- f"""{favicon}Saiga Mistral 7B GGUF Q4_K
-
- This is a demo of a **Russian**-speaking Mistral-based model. If you are interested in other languages, please check other models, such as [MPT-7B-Chat](https://huggingface.co/spaces/mosaicml/mpt-7b-chat).
-
- Это демонстрационная версия [квантованной Сайги/Мистраль с 7 миллиардами параметров](https://huggingface.co/IlyaGusev/saiga_mistral_7b_gguf), работающая на CPU.
-
- Сайга — это разговорная языковая модель, дообученная на корпусах, сгенерированных ChatGPT, таких как [ru_turbo_alpaca](https://huggingface.co/datasets/IlyaGusev/ru_turbo_alpaca), [ru_turbo_saiga](https://huggingface.co/datasets/IlyaGusev/ru_turbo_saiga) и [gpt_roleplay_realm](https://huggingface.co/datasets/IlyaGusev/gpt_roleplay_realm).
- """
- )
- with gr.Row():
- with gr.Column(scale=5):
- system_prompt = gr.Textbox(label="Системный промпт", placeholder="", value=SYSTEM_PROMPT, interactive=False)
- chatbot = gr.Chatbot(label="Диалог").style(height=400)
- with gr.Column(min_width=80, scale=1):
- with gr.Tab(label="Параметры генерации"):
- top_p = gr.Slider(
- minimum=0.0,
- maximum=1.0,
- value=0.9,
- step=0.05,
- interactive=True,
- label="Top-p",
- )
- top_k = gr.Slider(
- minimum=10,
- maximum=100,
- value=30,
- step=5,
- interactive=True,
- label="Top-k",
- )
- temp = gr.Slider(
- minimum=0.0,
- maximum=2.0,
- value=0.01,
- step=0.01,
- interactive=True,
- label="Температура"
- )
- with gr.Row():
- with gr.Column():
- msg = gr.Textbox(
- label="Отправить сообщение",
- placeholder="Отправить сообщение",
- show_label=False,
- ).style(container=False)
- with gr.Column():
- with gr.Row():
- submit = gr.Button("Отправить")
- stop = gr.Button("Остановить")
- clear = gr.Button("Очистить")
- with gr.Row():
- gr.Markdown(
- """ПРЕДУПРЕЖДЕНИЕ: Модель может генерировать фактически или этически некорректные тексты. Мы не несём за это ответственность."""
- )
-
- # Pressing Enter
- submit_event = msg.submit(
- fn=user,
- inputs=[msg, chatbot],
- outputs=[msg, chatbot],
- queue=False,
- ).success(
- fn=bot,
- inputs=[
- chatbot,
- system_prompt,
- top_p,
- top_k,
- temp
- ],
- outputs=chatbot,
- queue=True,
- )
-
- # Pressing the button
- submit_click_event = submit.click(
- fn=user,
- inputs=[msg, chatbot],
- outputs=[msg, chatbot],
- queue=False,
- ).success(
- fn=bot,
- inputs=[
- chatbot,
- system_prompt,
- top_p,
- top_k,
- temp
- ],
- outputs=chatbot,
- queue=True,
- )
-
- # Stop generation
- stop.click(
- fn=None,
- inputs=None,
- outputs=None,
- cancels=[submit_event, submit_click_event],
- queue=False,
- )
-
- # Clear history
- clear.click(lambda: None, None, chatbot, queue=False)
-
-demo.queue(max_size=128, concurrency_count=1)
-demo.launch()
diff --git a/spaces/Jamel887/Rv-percobaan887/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py b/spaces/Jamel887/Rv-percobaan887/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
deleted file mode 100644
index b2c592527a5966e6f8e79e8c52dc5b414246dcc6..0000000000000000000000000000000000000000
--- a/spaces/Jamel887/Rv-percobaan887/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
-import parselmouth
-import numpy as np
-
-
-class PMF0Predictor(F0Predictor):
- def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
- self.hop_length = hop_length
- self.f0_min = f0_min
- self.f0_max = f0_max
- self.sampling_rate = sampling_rate
-
- def interpolate_f0(self, f0):
- """
- 对F0进行插值处理
- """
-
- data = np.reshape(f0, (f0.size, 1))
-
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
- vuv_vector[data > 0.0] = 1.0
- vuv_vector[data <= 0.0] = 0.0
-
- ip_data = data
-
- frame_number = data.size
- last_value = 0.0
- for i in range(frame_number):
- if data[i] <= 0.0:
- j = i + 1
- for j in range(i + 1, frame_number):
- if data[j] > 0.0:
- break
- if j < frame_number - 1:
- if last_value > 0.0:
- step = (data[j] - data[i - 1]) / float(j - i)
- for k in range(i, j):
- ip_data[k] = data[i - 1] + step * (k - i + 1)
- else:
- for k in range(i, j):
- ip_data[k] = data[j]
- else:
- for k in range(i, frame_number):
- ip_data[k] = last_value
- else:
- ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
- last_value = data[i]
-
- return ip_data[:, 0], vuv_vector[:, 0]
-
- def compute_f0(self, wav, p_len=None):
- x = wav
- if p_len is None:
- p_len = x.shape[0] // self.hop_length
- else:
- assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
- time_step = self.hop_length / self.sampling_rate * 1000
- f0 = (
- parselmouth.Sound(x, self.sampling_rate)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=self.f0_min,
- pitch_ceiling=self.f0_max,
- )
- .selected_array["frequency"]
- )
-
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
- f0, uv = self.interpolate_f0(f0)
- return f0
-
- def compute_f0_uv(self, wav, p_len=None):
- x = wav
- if p_len is None:
- p_len = x.shape[0] // self.hop_length
- else:
- assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
- time_step = self.hop_length / self.sampling_rate * 1000
- f0 = (
- parselmouth.Sound(x, self.sampling_rate)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=self.f0_min,
- pitch_ceiling=self.f0_max,
- )
- .selected_array["frequency"]
- )
-
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
- f0, uv = self.interpolate_f0(f0)
- return f0, uv
diff --git a/spaces/JanDalhuysen/ChatPDF/README.md b/spaces/JanDalhuysen/ChatPDF/README.md
deleted file mode 100644
index 6e4b73a2f20840987415c25cb266f98011e14476..0000000000000000000000000000000000000000
--- a/spaces/JanDalhuysen/ChatPDF/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: ChatPDF
-emoji: 💻
-colorFrom: gray
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.20.0
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: zouguojun/chatPDF
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Jeff2323/ai-comic-factory/src/components/ui/input.tsx b/spaces/Jeff2323/ai-comic-factory/src/components/ui/input.tsx
deleted file mode 100644
index 0757ddebdca3800bbd4a46fe1c2c17dff86c5e2f..0000000000000000000000000000000000000000
--- a/spaces/Jeff2323/ai-comic-factory/src/components/ui/input.tsx
+++ /dev/null
@@ -1,25 +0,0 @@
-import * as React from "react"
-
-import { cn } from "@/lib/utils"
-
-export interface InputProps
- extends React.InputHTMLAttributes {}
-
-const Input = React.forwardRef(
- ({ className, type, ...props }, ref) => {
- return (
-
- )
- }
-)
-Input.displayName = "Input"
-
-export { Input }
diff --git a/spaces/Kayson/InstructDiffusion/dataset/low_level/lowlevel_gopro.py b/spaces/Kayson/InstructDiffusion/dataset/low_level/lowlevel_gopro.py
deleted file mode 100644
index 25f26af6d57a8fe4fbc98247f1878eb96d601dd8..0000000000000000000000000000000000000000
--- a/spaces/Kayson/InstructDiffusion/dataset/low_level/lowlevel_gopro.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# --------------------------------------------------------
-# InstructDiffusion
-# Based on instruct-pix2pix (https://github.com/timothybrooks/instruct-pix2pix)
-# Modified by Chen Li (edward82@stu.xjtu.edu.cn)
-# --------------------------------------------------------
-
-import os
-import numpy as np
-from torch.utils.data import Dataset
-import torch
-from PIL import Image
-import torchvision.transforms.functional as TF
-from pdb import set_trace as stx
-import random
-import cv2
-from PIL import Image
-import torchvision
-
-
-def is_image_file(filename):
- return any(filename.endswith(extension) for extension in ['jpeg', 'JPEG', 'jpg', 'png', 'JPG', 'PNG', 'gif'])
-
-
-class GoPro(Dataset):
- def __init__(self, path, split="train", size=256, interpolation="pil_lanczos",
- flip_prob=0.5, sample_weight=1.0, instruct=False):
- super(GoPro, self).__init__()
-
- inp_files = sorted(os.listdir(os.path.join(path, split, 'input')))
- tar_files = sorted(os.listdir(os.path.join(path, split, 'target')))
-
- self.inp_filenames = [os.path.join(path, split, 'input', x) for x in inp_files if is_image_file(x)]
- self.tar_filenames = [os.path.join(path, split, 'target', x) for x in tar_files if is_image_file(x)]
-
- self.size = size
- self.flip_prob = flip_prob
- self.sample_weight = sample_weight
- self.instruct = instruct
- self.sizex = len(self.tar_filenames) # get the size of target
-
- self.interpolation = {
- "cv_nearest": cv2.INTER_NEAREST,
- "cv_bilinear": cv2.INTER_LINEAR,
- "cv_bicubic": cv2.INTER_CUBIC,
- "cv_area": cv2.INTER_AREA,
- "cv_lanczos": cv2.INTER_LANCZOS4,
- "pil_nearest": Image.NEAREST,
- "pil_bilinear": Image.BILINEAR,
- "pil_bicubic": Image.BICUBIC,
- "pil_box": Image.BOX,
- "pil_hamming": Image.HAMMING,
- "pil_lanczos": Image.LANCZOS,
- }[interpolation]
-
- prompt_path='dataset/prompt/prompt_deblur.txt'
- self.prompt_list=[]
- with open(prompt_path) as f:
- line=f.readline()
- while line:
- line=line.strip('\n')
- self.prompt_list.append(line)
- line=f.readline()
-
- print(f"GoPro has {len(self)} samples!!")
-
- def __len__(self):
- return int(self.sizex * self.sample_weight)
-
- def __getitem__(self, index):
- if self.sample_weight >= 1:
- index_ = index % self.sizex
- else:
- index_ = int(index / self.sample_weight) + random.randint(0, int(1 / self.sample_weight) - 1)
-
- inp_path = self.inp_filenames[index_]
- tar_path = self.tar_filenames[index_]
-
- inp_img = Image.open(inp_path)
- tar_img = Image.open(tar_path)
-
- width, height = inp_img.size
- tar_width, tar_height = tar_img.size
- assert tar_width == width and tar_height == height, "Input and target image mismatch"
- aspect_ratio = float(width) / float(height)
- if width < height:
- new_width = self.size
- new_height = int(self.size / aspect_ratio)
- else:
- new_height = self.size
- new_width = int(self.size * aspect_ratio)
- inp_img = inp_img.resize((new_width, new_height), self.interpolation)
- tar_img = tar_img.resize((new_width, new_height), self.interpolation)
-
- inp_img = np.array(inp_img).astype(np.float32).transpose(2, 0, 1)
- inp_img_tensor = torch.tensor((inp_img / 127.5 - 1.0).astype(np.float32))
- tar_img = np.array(tar_img).astype(np.float32).transpose(2, 0, 1)
- tar_img_tensor = torch.tensor((tar_img / 127.5 - 1.0).astype(np.float32))
- crop = torchvision.transforms.RandomCrop(self.size)
- flip = torchvision.transforms.RandomHorizontalFlip(float(self.flip_prob))
- image_0, image_1 = flip(crop(torch.cat((inp_img_tensor, tar_img_tensor)))).chunk(2)
-
- prompt = random.choice(self.prompt_list)
- if self.instruct:
- prompt = "Image Deblurring: " + prompt
-
- return dict(edited=image_1, edit=dict(c_concat=image_0, c_crossattn=prompt))
\ No newline at end of file
diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/encoder/audio.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/encoder/audio.py
deleted file mode 100644
index 5c087eea5d23d7e23ea0ef277ea4b92e9f4f2d55..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/encoder/audio.py
+++ /dev/null
@@ -1,117 +0,0 @@
-from scipy.ndimage.morphology import binary_dilation
-from encoder.params_data import *
-from pathlib import Path
-from typing import Optional, Union
-from warnings import warn
-import numpy as np
-import librosa
-import struct
-
-try:
- import webrtcvad
-except:
- warn("Unable to import 'webrtcvad'. This package enables noise removal and is recommended.")
- webrtcvad=None
-
-int16_max = (2 ** 15) - 1
-
-
-def preprocess_wav(fpath_or_wav: Union[str, Path, np.ndarray],
- source_sr: Optional[int] = None,
- normalize: Optional[bool] = True,
- trim_silence: Optional[bool] = True):
- """
- Applies the preprocessing operations used in training the Speaker Encoder to a waveform
- either on disk or in memory. The waveform will be resampled to match the data hyperparameters.
-
- :param fpath_or_wav: either a filepath to an audio file (many extensions are supported, not
- just .wav), either the waveform as a numpy array of floats.
- :param source_sr: if passing an audio waveform, the sampling rate of the waveform before
- preprocessing. After preprocessing, the waveform's sampling rate will match the data
- hyperparameters. If passing a filepath, the sampling rate will be automatically detected and
- this argument will be ignored.
- """
- # Load the wav from disk if needed
- if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path):
- wav, source_sr = librosa.load(str(fpath_or_wav), sr=None)
- else:
- wav = fpath_or_wav
-
- # Resample the wav if needed
- if source_sr is not None and source_sr != sampling_rate:
- wav = librosa.resample(wav, source_sr, sampling_rate)
-
- # Apply the preprocessing: normalize volume and shorten long silences
- if normalize:
- wav = normalize_volume(wav, audio_norm_target_dBFS, increase_only=True)
- if webrtcvad and trim_silence:
- wav = trim_long_silences(wav)
-
- return wav
-
-
-def wav_to_mel_spectrogram(wav):
- """
- Derives a mel spectrogram ready to be used by the encoder from a preprocessed audio waveform.
- Note: this not a log-mel spectrogram.
- """
- frames = librosa.feature.melspectrogram(
- y=wav,
- sr=sampling_rate,
- n_fft=int(sampling_rate * mel_window_length / 1000),
- hop_length=int(sampling_rate * mel_window_step / 1000),
- n_mels=mel_n_channels
- )
- return frames.astype(np.float32).T
-
-
-def trim_long_silences(wav):
- """
- Ensures that segments without voice in the waveform remain no longer than a
- threshold determined by the VAD parameters in params.py.
-
- :param wav: the raw waveform as a numpy array of floats
- :return: the same waveform with silences trimmed away (length <= original wav length)
- """
- # Compute the voice detection window size
- samples_per_window = (vad_window_length * sampling_rate) // 1000
-
- # Trim the end of the audio to have a multiple of the window size
- wav = wav[:len(wav) - (len(wav) % samples_per_window)]
-
- # Convert the float waveform to 16-bit mono PCM
- pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16))
-
- # Perform voice activation detection
- voice_flags = []
- vad = webrtcvad.Vad(mode=3)
- for window_start in range(0, len(wav), samples_per_window):
- window_end = window_start + samples_per_window
- voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2],
- sample_rate=sampling_rate))
- voice_flags = np.array(voice_flags)
-
- # Smooth the voice detection with a moving average
- def moving_average(array, width):
- array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2)))
- ret = np.cumsum(array_padded, dtype=float)
- ret[width:] = ret[width:] - ret[:-width]
- return ret[width - 1:] / width
-
- audio_mask = moving_average(voice_flags, vad_moving_average_width)
- audio_mask = np.round(audio_mask).astype(np.bool)
-
- # Dilate the voiced regions
- audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1))
- audio_mask = np.repeat(audio_mask, samples_per_window)
-
- return wav[audio_mask == True]
-
-
-def normalize_volume(wav, target_dBFS, increase_only=False, decrease_only=False):
- if increase_only and decrease_only:
- raise ValueError("Both increase only and decrease only are set")
- dBFS_change = target_dBFS - 10 * np.log10(np.mean(wav ** 2))
- if (dBFS_change < 0 and increase_only) or (dBFS_change > 0 and decrease_only):
- return wav
- return wav * (10 ** (dBFS_change / 20))
diff --git a/spaces/KyanChen/RSPrompter/mmdet/apis/det_inferencer.py b/spaces/KyanChen/RSPrompter/mmdet/apis/det_inferencer.py
deleted file mode 100644
index da4ad17128346de3c8ff268e899f989ddd8b99e9..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/apis/det_inferencer.py
+++ /dev/null
@@ -1,590 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import copy
-import os.path as osp
-import warnings
-from typing import Dict, Iterable, List, Optional, Sequence, Union
-
-import mmcv
-import mmengine
-import numpy as np
-import torch.nn as nn
-from mmengine.dataset import Compose
-from mmengine.fileio import (get_file_backend, isdir, join_path,
- list_dir_or_file)
-from mmengine.infer.infer import BaseInferencer, ModelType
-from mmengine.model.utils import revert_sync_batchnorm
-from mmengine.registry import init_default_scope
-from mmengine.runner.checkpoint import _load_checkpoint_to_model
-from mmengine.visualization import Visualizer
-from rich.progress import track
-
-from mmdet.evaluation import INSTANCE_OFFSET
-from mmdet.registry import DATASETS
-from mmdet.structures import DetDataSample
-from mmdet.structures.mask import encode_mask_results, mask2bbox
-from mmdet.utils import ConfigType
-from ..evaluation import get_classes
-
-try:
- from panopticapi.evaluation import VOID
- from panopticapi.utils import id2rgb
-except ImportError:
- id2rgb = None
- VOID = None
-
-InputType = Union[str, np.ndarray]
-InputsType = Union[InputType, Sequence[InputType]]
-PredType = List[DetDataSample]
-ImgType = Union[np.ndarray, Sequence[np.ndarray]]
-
-IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',
- '.tiff', '.webp')
-
-
-class DetInferencer(BaseInferencer):
- """Object Detection Inferencer.
-
- Args:
- model (str, optional): Path to the config file or the model name
- defined in metafile. For example, it could be
- "rtmdet-s" or 'rtmdet_s_8xb32-300e_coco' or
- "configs/rtmdet/rtmdet_s_8xb32-300e_coco.py".
- If model is not specified, user must provide the
- `weights` saved by MMEngine which contains the config string.
- Defaults to None.
- weights (str, optional): Path to the checkpoint. If it is not specified
- and model is a model name of metafile, the weights will be loaded
- from metafile. Defaults to None.
- device (str, optional): Device to run inference. If None, the available
- device will be automatically used. Defaults to None.
- scope (str, optional): The scope of the model. Defaults to mmdet.
- palette (str): Color palette used for visualization. The order of
- priority is palette -> config -> checkpoint. Defaults to 'none'.
- """
-
- preprocess_kwargs: set = set()
- forward_kwargs: set = set()
- visualize_kwargs: set = {
- 'return_vis',
- 'show',
- 'wait_time',
- 'draw_pred',
- 'pred_score_thr',
- 'img_out_dir',
- 'no_save_vis',
- }
- postprocess_kwargs: set = {
- 'print_result',
- 'pred_out_dir',
- 'return_datasample',
- 'no_save_pred',
- }
-
- def __init__(self,
- model: Optional[Union[ModelType, str]] = None,
- weights: Optional[str] = None,
- device: Optional[str] = None,
- scope: Optional[str] = 'mmdet',
- palette: str = 'none') -> None:
- # A global counter tracking the number of images processed, for
- # naming of the output images
- self.num_visualized_imgs = 0
- self.num_predicted_imgs = 0
- self.palette = palette
- init_default_scope(scope)
- super().__init__(
- model=model, weights=weights, device=device, scope=scope)
- self.model = revert_sync_batchnorm(self.model)
-
- def _load_weights_to_model(self, model: nn.Module,
- checkpoint: Optional[dict],
- cfg: Optional[ConfigType]) -> None:
- """Loading model weights and meta information from cfg and checkpoint.
-
- Args:
- model (nn.Module): Model to load weights and meta information.
- checkpoint (dict, optional): The loaded checkpoint.
- cfg (Config or ConfigDict, optional): The loaded config.
- """
-
- if checkpoint is not None:
- _load_checkpoint_to_model(model, checkpoint)
- checkpoint_meta = checkpoint.get('meta', {})
- # save the dataset_meta in the model for convenience
- if 'dataset_meta' in checkpoint_meta:
- # mmdet 3.x, all keys should be lowercase
- model.dataset_meta = {
- k.lower(): v
- for k, v in checkpoint_meta['dataset_meta'].items()
- }
- elif 'CLASSES' in checkpoint_meta:
- # < mmdet 3.x
- classes = checkpoint_meta['CLASSES']
- model.dataset_meta = {'classes': classes}
- else:
- warnings.warn(
- 'dataset_meta or class names are not saved in the '
- 'checkpoint\'s meta data, use COCO classes by default.')
- model.dataset_meta = {'classes': get_classes('coco')}
- else:
- warnings.warn('Checkpoint is not loaded, and the inference '
- 'result is calculated by the randomly initialized '
- 'model!')
- warnings.warn('weights is None, use COCO classes by default.')
- model.dataset_meta = {'classes': get_classes('coco')}
-
- # Priority: args.palette -> config -> checkpoint
- if self.palette != 'none':
- model.dataset_meta['palette'] = self.palette
- else:
- test_dataset_cfg = copy.deepcopy(cfg.test_dataloader.dataset)
- # lazy init. We only need the metainfo.
- test_dataset_cfg['lazy_init'] = True
- metainfo = DATASETS.build(test_dataset_cfg).metainfo
- cfg_palette = metainfo.get('palette', None)
- if cfg_palette is not None:
- model.dataset_meta['palette'] = cfg_palette
- else:
- if 'palette' not in model.dataset_meta:
- warnings.warn(
- 'palette does not exist, random is used by default. '
- 'You can also set the palette to customize.')
- model.dataset_meta['palette'] = 'random'
-
- def _init_pipeline(self, cfg: ConfigType) -> Compose:
- """Initialize the test pipeline."""
- pipeline_cfg = cfg.test_dataloader.dataset.pipeline
-
- # For inference, the key of ``img_id`` is not used.
- if 'meta_keys' in pipeline_cfg[-1]:
- pipeline_cfg[-1]['meta_keys'] = tuple(
- meta_key for meta_key in pipeline_cfg[-1]['meta_keys']
- if meta_key != 'img_id')
-
- load_img_idx = self._get_transform_idx(pipeline_cfg,
- 'LoadImageFromFile')
- if load_img_idx == -1:
- raise ValueError(
- 'LoadImageFromFile is not found in the test pipeline')
- pipeline_cfg[load_img_idx]['type'] = 'mmdet.InferencerLoader'
- return Compose(pipeline_cfg)
-
- def _get_transform_idx(self, pipeline_cfg: ConfigType, name: str) -> int:
- """Returns the index of the transform in a pipeline.
-
- If the transform is not found, returns -1.
- """
- for i, transform in enumerate(pipeline_cfg):
- if transform['type'] == name:
- return i
- return -1
-
- def _init_visualizer(self, cfg: ConfigType) -> Optional[Visualizer]:
- """Initialize visualizers.
-
- Args:
- cfg (ConfigType): Config containing the visualizer information.
-
- Returns:
- Visualizer or None: Visualizer initialized with config.
- """
- visualizer = super()._init_visualizer(cfg)
- visualizer.dataset_meta = self.model.dataset_meta
- return visualizer
-
- def _inputs_to_list(self, inputs: InputsType) -> list:
- """Preprocess the inputs to a list.
-
- Preprocess inputs to a list according to its type:
-
- - list or tuple: return inputs
- - str:
- - Directory path: return all files in the directory
- - other cases: return a list containing the string. The string
- could be a path to file, a url or other types of string according
- to the task.
-
- Args:
- inputs (InputsType): Inputs for the inferencer.
-
- Returns:
- list: List of input for the :meth:`preprocess`.
- """
- if isinstance(inputs, str):
- backend = get_file_backend(inputs)
- if hasattr(backend, 'isdir') and isdir(inputs):
- # Backends like HttpsBackend do not implement `isdir`, so only
- # those backends that implement `isdir` could accept the inputs
- # as a directory
- filename_list = list_dir_or_file(
- inputs, list_dir=False, suffix=IMG_EXTENSIONS)
- inputs = [
- join_path(inputs, filename) for filename in filename_list
- ]
-
- if not isinstance(inputs, (list, tuple)):
- inputs = [inputs]
-
- return list(inputs)
-
- def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs):
- """Process the inputs into a model-feedable format.
-
- Customize your preprocess by overriding this method. Preprocess should
- return an iterable object, of which each item will be used as the
- input of ``model.test_step``.
-
- ``BaseInferencer.preprocess`` will return an iterable chunked data,
- which will be used in __call__ like this:
-
- .. code-block:: python
-
- def __call__(self, inputs, batch_size=1, **kwargs):
- chunked_data = self.preprocess(inputs, batch_size, **kwargs)
- for batch in chunked_data:
- preds = self.forward(batch, **kwargs)
-
- Args:
- inputs (InputsType): Inputs given by user.
- batch_size (int): batch size. Defaults to 1.
-
- Yields:
- Any: Data processed by the ``pipeline`` and ``collate_fn``.
- """
- chunked_data = self._get_chunk_data(inputs, batch_size)
- yield from map(self.collate_fn, chunked_data)
-
- def _get_chunk_data(self, inputs: Iterable, chunk_size: int):
- """Get batch data from inputs.
-
- Args:
- inputs (Iterable): An iterable dataset.
- chunk_size (int): Equivalent to batch size.
-
- Yields:
- list: batch data.
- """
- inputs_iter = iter(inputs)
- while True:
- try:
- chunk_data = []
- for _ in range(chunk_size):
- inputs_ = next(inputs_iter)
- chunk_data.append((inputs_, self.pipeline(inputs_)))
- yield chunk_data
- except StopIteration:
- if chunk_data:
- yield chunk_data
- break
-
- # TODO: Video and Webcam are currently not supported and
- # may consume too much memory if your input folder has a lot of images.
- # We will be optimized later.
- def __call__(self,
- inputs: InputsType,
- batch_size: int = 1,
- return_vis: bool = False,
- show: bool = False,
- wait_time: int = 0,
- no_save_vis: bool = False,
- draw_pred: bool = True,
- pred_score_thr: float = 0.3,
- return_datasample: bool = False,
- print_result: bool = False,
- no_save_pred: bool = True,
- out_dir: str = '',
- **kwargs) -> dict:
- """Call the inferencer.
-
- Args:
- inputs (InputsType): Inputs for the inferencer.
- batch_size (int): Inference batch size. Defaults to 1.
- show (bool): Whether to display the visualization results in a
- popup window. Defaults to False.
- wait_time (float): The interval of show (s). Defaults to 0.
- no_save_vis (bool): Whether to force not to save prediction
- vis results. Defaults to False.
- draw_pred (bool): Whether to draw predicted bounding boxes.
- Defaults to True.
- pred_score_thr (float): Minimum score of bboxes to draw.
- Defaults to 0.3.
- return_datasample (bool): Whether to return results as
- :obj:`DetDataSample`. Defaults to False.
- print_result (bool): Whether to print the inference result w/o
- visualization to the console. Defaults to False.
- no_save_pred (bool): Whether to force not to save prediction
- results. Defaults to True.
- out_file: Dir to save the inference results or
- visualization. If left as empty, no file will be saved.
- Defaults to ''.
-
- **kwargs: Other keyword arguments passed to :meth:`preprocess`,
- :meth:`forward`, :meth:`visualize` and :meth:`postprocess`.
- Each key in kwargs should be in the corresponding set of
- ``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``
- and ``postprocess_kwargs``.
-
- Returns:
- dict: Inference and visualization results.
- """
- (
- preprocess_kwargs,
- forward_kwargs,
- visualize_kwargs,
- postprocess_kwargs,
- ) = self._dispatch_kwargs(**kwargs)
-
- ori_inputs = self._inputs_to_list(inputs)
- inputs = self.preprocess(
- ori_inputs, batch_size=batch_size, **preprocess_kwargs)
-
- results_dict = {'predictions': [], 'visualization': []}
- for ori_inputs, data in track(inputs, description='Inference'):
- preds = self.forward(data, **forward_kwargs)
- visualization = self.visualize(
- ori_inputs,
- preds,
- return_vis=return_vis,
- show=show,
- wait_time=wait_time,
- draw_pred=draw_pred,
- pred_score_thr=pred_score_thr,
- no_save_vis=no_save_vis,
- img_out_dir=out_dir,
- **visualize_kwargs)
- results = self.postprocess(
- preds,
- visualization,
- return_datasample=return_datasample,
- print_result=print_result,
- no_save_pred=no_save_pred,
- pred_out_dir=out_dir,
- **postprocess_kwargs)
- results_dict['predictions'].extend(results['predictions'])
- if results['visualization'] is not None:
- results_dict['visualization'].extend(results['visualization'])
- return results_dict
-
- def visualize(self,
- inputs: InputsType,
- preds: PredType,
- return_vis: bool = False,
- show: bool = False,
- wait_time: int = 0,
- draw_pred: bool = True,
- pred_score_thr: float = 0.3,
- no_save_vis: bool = False,
- img_out_dir: str = '',
- **kwargs) -> Union[List[np.ndarray], None]:
- """Visualize predictions.
-
- Args:
- inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer.
- preds (List[:obj:`DetDataSample`]): Predictions of the model.
- return_vis (bool): Whether to return the visualization result.
- Defaults to False.
- show (bool): Whether to display the image in a popup window.
- Defaults to False.
- wait_time (float): The interval of show (s). Defaults to 0.
- draw_pred (bool): Whether to draw predicted bounding boxes.
- Defaults to True.
- pred_score_thr (float): Minimum score of bboxes to draw.
- Defaults to 0.3.
- no_save_vis (bool): Whether to force not to save prediction
- vis results. Defaults to False.
- img_out_dir (str): Output directory of visualization results.
- If left as empty, no file will be saved. Defaults to ''.
-
- Returns:
- List[np.ndarray] or None: Returns visualization results only if
- applicable.
- """
- if no_save_vis is True:
- img_out_dir = ''
-
- if not show and img_out_dir == '' and not return_vis:
- return None
-
- if self.visualizer is None:
- raise ValueError('Visualization needs the "visualizer" term'
- 'defined in the config, but got None.')
-
- results = []
-
- for single_input, pred in zip(inputs, preds):
- if isinstance(single_input, str):
- img_bytes = mmengine.fileio.get(single_input)
- img = mmcv.imfrombytes(img_bytes)
- img = img[:, :, ::-1]
- img_name = osp.basename(single_input)
- elif isinstance(single_input, np.ndarray):
- img = single_input.copy()
- img_num = str(self.num_visualized_imgs).zfill(8)
- img_name = f'{img_num}.jpg'
- else:
- raise ValueError('Unsupported input type: '
- f'{type(single_input)}')
-
- out_file = osp.join(img_out_dir, 'vis',
- img_name) if img_out_dir != '' else None
-
- self.visualizer.add_datasample(
- img_name,
- img,
- pred,
- show=show,
- wait_time=wait_time,
- draw_gt=False,
- draw_pred=draw_pred,
- pred_score_thr=pred_score_thr,
- out_file=out_file,
- )
- results.append(self.visualizer.get_image())
- self.num_visualized_imgs += 1
-
- return results
-
- def postprocess(
- self,
- preds: PredType,
- visualization: Optional[List[np.ndarray]] = None,
- return_datasample: bool = False,
- print_result: bool = False,
- no_save_pred: bool = False,
- pred_out_dir: str = '',
- **kwargs,
- ) -> Dict:
- """Process the predictions and visualization results from ``forward``
- and ``visualize``.
-
- This method should be responsible for the following tasks:
-
- 1. Convert datasamples into a json-serializable dict if needed.
- 2. Pack the predictions and visualization results and return them.
- 3. Dump or log the predictions.
-
- Args:
- preds (List[:obj:`DetDataSample`]): Predictions of the model.
- visualization (Optional[np.ndarray]): Visualized predictions.
- return_datasample (bool): Whether to use Datasample to store
- inference results. If False, dict will be used.
- print_result (bool): Whether to print the inference result w/o
- visualization to the console. Defaults to False.
- no_save_pred (bool): Whether to force not to save prediction
- results. Defaults to False.
- pred_out_dir: Dir to save the inference results w/o
- visualization. If left as empty, no file will be saved.
- Defaults to ''.
-
- Returns:
- dict: Inference and visualization results with key ``predictions``
- and ``visualization``.
-
- - ``visualization`` (Any): Returned by :meth:`visualize`.
- - ``predictions`` (dict or DataSample): Returned by
- :meth:`forward` and processed in :meth:`postprocess`.
- If ``return_datasample=False``, it usually should be a
- json-serializable dict containing only basic data elements such
- as strings and numbers.
- """
- if no_save_pred is True:
- pred_out_dir = ''
-
- result_dict = {}
- results = preds
- if not return_datasample:
- results = []
- for pred in preds:
- result = self.pred2dict(pred, pred_out_dir)
- results.append(result)
- elif pred_out_dir != '':
- warnings.warn('Currently does not support saving datasample '
- 'when return_datasample is set to True. '
- 'Prediction results are not saved!')
- # Add img to the results after printing and dumping
- result_dict['predictions'] = results
- if print_result:
- print(result_dict)
- result_dict['visualization'] = visualization
- return result_dict
-
- # TODO: The data format and fields saved in json need further discussion.
- # Maybe should include model name, timestamp, filename, image info etc.
- def pred2dict(self,
- data_sample: DetDataSample,
- pred_out_dir: str = '') -> Dict:
- """Extract elements necessary to represent a prediction into a
- dictionary.
-
- It's better to contain only basic data elements such as strings and
- numbers in order to guarantee it's json-serializable.
-
- Args:
- data_sample (:obj:`DetDataSample`): Predictions of the model.
- pred_out_dir: Dir to save the inference results w/o
- visualization. If left as empty, no file will be saved.
- Defaults to ''.
-
- Returns:
- dict: Prediction results.
- """
- is_save_pred = True
- if pred_out_dir == '':
- is_save_pred = False
-
- if is_save_pred and 'img_path' in data_sample:
- img_path = osp.basename(data_sample.img_path)
- img_path = osp.splitext(img_path)[0]
- out_img_path = osp.join(pred_out_dir, 'preds',
- img_path + '_panoptic_seg.png')
- out_json_path = osp.join(pred_out_dir, 'preds', img_path + '.json')
- elif is_save_pred:
- out_img_path = osp.join(
- pred_out_dir, 'preds',
- f'{self.num_predicted_imgs}_panoptic_seg.png')
- out_json_path = osp.join(pred_out_dir, 'preds',
- f'{self.num_predicted_imgs}.json')
- self.num_predicted_imgs += 1
-
- result = {}
- if 'pred_instances' in data_sample:
- masks = data_sample.pred_instances.get('masks')
- pred_instances = data_sample.pred_instances.numpy()
- result = {
- 'bboxes': pred_instances.bboxes.tolist(),
- 'labels': pred_instances.labels.tolist(),
- 'scores': pred_instances.scores.tolist()
- }
- if masks is not None:
- if pred_instances.bboxes.sum() == 0:
- # Fake bbox, such as the SOLO.
- bboxes = mask2bbox(masks.cpu()).numpy().tolist()
- result['bboxes'] = bboxes
- encode_masks = encode_mask_results(pred_instances.masks)
- for encode_mask in encode_masks:
- if isinstance(encode_mask['counts'], bytes):
- encode_mask['counts'] = encode_mask['counts'].decode()
- result['masks'] = encode_masks
-
- if 'pred_panoptic_seg' in data_sample:
- if VOID is None:
- raise RuntimeError(
- 'panopticapi is not installed, please install it by: '
- 'pip install git+https://github.com/cocodataset/'
- 'panopticapi.git.')
-
- pan = data_sample.pred_panoptic_seg.sem_seg.cpu().numpy()[0]
- pan[pan % INSTANCE_OFFSET == len(
- self.model.dataset_meta['classes'])] = VOID
- pan = id2rgb(pan).astype(np.uint8)
-
- if is_save_pred:
- mmcv.imwrite(pan[:, :, ::-1], out_img_path)
- result['panoptic_seg_path'] = out_img_path
- else:
- result['panoptic_seg'] = pan
-
- if is_save_pred:
- mmengine.dump(result, out_json_path)
-
- return result
diff --git a/spaces/KyanChen/RSPrompter/mmpl/models/pler/semseg_sam_pler.py b/spaces/KyanChen/RSPrompter/mmpl/models/pler/semseg_sam_pler.py
deleted file mode 100644
index 9343ec39a70bde288cf0831197b44766017b15fe..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmpl/models/pler/semseg_sam_pler.py
+++ /dev/null
@@ -1,198 +0,0 @@
-import torch
-from mmengine.structures import InstanceData, PixelData
-from typing import List
-
-from torch import Tensor
-
-from mmpl.registry import MODELS
-from mmseg.models.utils import resize
-from mmseg.structures import SegDataSample
-from mmseg.utils import SampleList, OptSampleList
-from .base_pler import BasePLer
-import torch.nn.functional as F
-from modules.sam import sam_model_registry
-
-
-@MODELS.register_module()
-class SemSegSAMPLer(BasePLer):
- def __init__(self,
- backbone,
- adaphead=None,
- decode_head=None,
- need_train_names=None,
- align_corners=False,
- train_cfg=None,
- test_cfg=None,
- *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.save_hyperparameters()
- self.need_train_names = need_train_names
- self.align_corners = align_corners
-
- backbone_type = backbone.pop('type')
- delete_submodel = backbone.pop('delete_submodel', [])
- self.backbone = sam_model_registry[backbone_type](**backbone)
- for submodel in delete_submodel:
- delattr(self.backbone, submodel)
-
- if adaphead is not None:
- self.adaphead = MODELS.build(adaphead)
-
- decode_head_ = decode_head.deepcopy()
- decode_head_.update(train_cfg=train_cfg)
- decode_head_.update(test_cfg=test_cfg)
- self.decode_head = MODELS.build(decode_head_)
-
- self.num_classes = self.decode_head.num_classes
-
- self.train_cfg = train_cfg
- self.test_cfg = test_cfg
-
- def setup(self, stage: str) -> None:
- if self.need_train_names is not None:
- self._set_grad(self.need_train_names, noneed_train_names=[])
-
- def init_weights(self):
- import ipdb; ipdb.set_trace()
- pass
-
- def train(self, mode=True):
- if self.need_train_names is not None:
- return self._set_train_module(mode, self.need_train_names)
- else:
- super().train(mode)
- return self
-
- def extract_feat(self, batch_inputs):
- x0, x1 = self.adaphead(batch_inputs, self.backbone.image_encoder)
- return x0, x1
-
- def validation_step(self, batch, batch_idx):
- data = self.data_preprocessor(batch, False)
- batch_inputs = data['inputs']
- batch_data_samples = data['data_samples']
-
- if batch_data_samples is not None:
- batch_img_metas = [
- data_sample.metainfo for data_sample in batch_data_samples
- ]
- else:
- batch_img_metas = [
- dict(
- ori_shape=batch_inputs.shape[2:],
- img_shape=batch_inputs.shape[2:],
- pad_shape=batch_inputs.shape[2:],
- padding_size=[0, 0, 0, 0])
- ] * batch_inputs.shape[0]
-
- x = self.extract_feat(batch_inputs)
- seg_logits = self.decode_head.predict(x, batch_img_metas, self.test_cfg)
-
- results = self.postprocess_result(seg_logits, batch_data_samples)
-
- preds = []
- targets = []
- for data_sample in results:
- pred_label = data_sample.pred_sem_seg.data.squeeze()
- label = data_sample.gt_sem_seg.data.squeeze().to(pred_label)
-
- preds.append(pred_label)
- targets.append(label)
- preds = torch.stack(preds, dim=0)
- targets = torch.stack(targets, dim=0)
- self.val_evaluator.update(preds, targets)
-
- def training_step(self, batch, batch_idx):
- # import ipdb; ipdb.set_trace()
- data = self.data_preprocessor(batch, True)
- batch_inputs = data['inputs']
- batch_data_samples = data['data_samples']
- x = self.extract_feat(batch_inputs)
- losses = self.decode_head.loss(x, batch_data_samples)
- # import ipdb; ipdb.set_trace()
- parsed_losses, log_vars = self.parse_losses(losses)
- log_vars = {f'train_{k}': v for k, v in log_vars.items()}
- log_vars['loss'] = parsed_losses
- self.log_dict(log_vars, prog_bar=True)
- return log_vars
-
- def on_before_optimizer_step(self, optimizer) -> None:
- self.log_grad(module=self.adaphead)
-
- def postprocess_result(self,
- seg_logits: Tensor,
- data_samples: OptSampleList = None) -> SampleList:
- """ Convert results list to `SegDataSample`.
- Args:
- seg_logits (Tensor): The segmentation results, seg_logits from
- model of each input image.
- data_samples (list[:obj:`SegDataSample`]): The seg data samples.
- It usually includes information such as `metainfo` and
- `gt_sem_seg`. Default to None.
- Returns:
- list[:obj:`SegDataSample`]: Segmentation results of the
- input images. Each SegDataSample usually contain:
-
- - ``pred_sem_seg``(PixelData): Prediction of semantic segmentation.
- - ``seg_logits``(PixelData): Predicted logits of semantic
- segmentation before normalization.
- """
- batch_size, C, H, W = seg_logits.shape
-
- if data_samples is None:
- data_samples = [SegDataSample() for _ in range(batch_size)]
- only_prediction = True
- else:
- only_prediction = False
-
- for i in range(batch_size):
- if not only_prediction:
- img_meta = data_samples[i].metainfo
- # remove padding area
- if 'img_padding_size' not in img_meta:
- padding_size = img_meta.get('padding_size', [0] * 4)
- else:
- padding_size = img_meta['img_padding_size']
- padding_left, padding_right, padding_top, padding_bottom =\
- padding_size
- # i_seg_logits shape is 1, C, H, W after remove padding
- i_seg_logits = seg_logits[i:i + 1, :,
- padding_top:H - padding_bottom,
- padding_left:W - padding_right]
-
- flip = img_meta.get('flip', None)
- if flip:
- flip_direction = img_meta.get('flip_direction', None)
- assert flip_direction in ['horizontal', 'vertical']
- if flip_direction == 'horizontal':
- i_seg_logits = i_seg_logits.flip(dims=(3, ))
- else:
- i_seg_logits = i_seg_logits.flip(dims=(2, ))
-
- # resize as original shape
- i_seg_logits = resize(
- i_seg_logits,
- size=img_meta['ori_shape'],
- mode='bilinear',
- align_corners=self.align_corners,
- warning=False).squeeze(0)
- else:
- i_seg_logits = seg_logits[i]
-
- if C > 1:
- i_seg_pred = i_seg_logits.argmax(dim=0, keepdim=True)
- else:
- i_seg_pred = (i_seg_logits >
- self.decode_head.threshold).to(i_seg_logits)
- data_samples[i].set_data({
- 'seg_logits':
- PixelData(**{'data': i_seg_logits}),
- 'pred_sem_seg':
- PixelData(**{'data': i_seg_pred})
- })
-
- return data_samples
-
-
-
-
diff --git a/spaces/KyanChen/RSPrompter/mmpretrain/datasets/caltech101.py b/spaces/KyanChen/RSPrompter/mmpretrain/datasets/caltech101.py
deleted file mode 100644
index 71e5de85ff3bbf73c387a071f47113b46be36e2a..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmpretrain/datasets/caltech101.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import List
-
-from mmengine import get_file_backend, list_from_file
-
-from mmpretrain.registry import DATASETS
-from .base_dataset import BaseDataset
-from .categories import CALTECH101_CATEGORIES
-
-
-@DATASETS.register_module()
-class Caltech101(BaseDataset):
- """The Caltech101 Dataset.
-
- Support the `Caltech101 `_ Dataset.
- After downloading and decompression, the dataset directory structure is as follows.
-
- Caltech101 dataset directory: ::
-
- caltech-101
- ├── 101_ObjectCategories
- │ ├── class_x
- │ │ ├── xx1.jpg
- │ │ ├── xx2.jpg
- │ │ └── ...
- │ ├── class_y
- │ │ ├── yy1.jpg
- │ │ ├── yy2.jpg
- │ │ └── ...
- │ └── ...
- ├── Annotations
- │ ├── class_x
- │ │ ├── xx1.mat
- │ │ └── ...
- │ └── ...
- ├── meta
- │ ├── train.txt
- │ └── test.txt
- └── ....
-
- Please note that since there is no official splitting for training and
- test set, you can use the train.txt and text.txt provided by us or
- create your own annotation files. Here is the download
- `link `_
- for the annotations.
-
- Args:
- data_root (str): The root directory for the Caltech101 dataset.
- split (str, optional): The dataset split, supports "train" and "test".
- Default to "train".
-
- Examples:
- >>> from mmpretrain.datasets import Caltech101
- >>> train_dataset = Caltech101(data_root='data/caltech-101', split='train')
- >>> train_dataset
- Dataset Caltech101
- Number of samples: 3060
- Number of categories: 102
- Root of dataset: data/caltech-101
- >>> test_dataset = Caltech101(data_root='data/caltech-101', split='test')
- >>> test_dataset
- Dataset Caltech101
- Number of samples: 6728
- Number of categories: 102
- Root of dataset: data/caltech-101
- """ # noqa: E501
-
- METAINFO = {'classes': CALTECH101_CATEGORIES}
-
- def __init__(self, data_root: str, split: str = 'train', **kwargs):
-
- splits = ['train', 'test']
- assert split in splits, \
- f"The split must be one of {splits}, but get '{split}'"
- self.split = split
-
- self.backend = get_file_backend(data_root, enable_singleton=True)
-
- if split == 'train':
- ann_file = self.backend.join_path('meta', 'train.txt')
- else:
- ann_file = self.backend.join_path('meta', 'test.txt')
-
- data_prefix = '101_ObjectCategories'
- test_mode = split == 'test'
-
- super(Caltech101, self).__init__(
- ann_file=ann_file,
- data_root=data_root,
- data_prefix=data_prefix,
- test_mode=test_mode,
- **kwargs)
-
- def load_data_list(self):
- """Load images and ground truth labels."""
-
- pairs = list_from_file(self.ann_file)
- data_list = []
-
- for pair in pairs:
- path, gt_label = pair.split()
- img_path = self.backend.join_path(self.img_prefix, path)
- info = dict(img_path=img_path, gt_label=int(gt_label))
- data_list.append(info)
-
- return data_list
-
- def extra_repr(self) -> List[str]:
- """The extra repr information of the dataset."""
- body = [
- f'Root of dataset: \t{self.data_root}',
- ]
- return body
diff --git a/spaces/KyanChen/RSPrompter/mmpretrain/engine/hooks/class_num_check_hook.py b/spaces/KyanChen/RSPrompter/mmpretrain/engine/hooks/class_num_check_hook.py
deleted file mode 100644
index 38170d6604810c575aa5c2c9435c0b75cfa761b2..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmpretrain/engine/hooks/class_num_check_hook.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved
-from mmengine.hooks import Hook
-from mmengine.utils import is_seq_of
-
-from mmpretrain.registry import HOOKS
-
-
-@HOOKS.register_module()
-class ClassNumCheckHook(Hook):
- """Class Number Check HOOK."""
-
- def _check_head(self, runner, dataset):
- """Check whether the `num_classes` in head matches the length of
- `CLASSES` in `dataset`.
-
- Args:
- runner (obj:`Runner`): runner object.
- dataset (obj: `BaseDataset`): the dataset to check.
- """
- model = runner.model
- if dataset.CLASSES is None:
- runner.logger.warning(
- f'Please set class information in `metainfo` '
- f'in the {dataset.__class__.__name__} and'
- f'check if it is consistent with the `num_classes` '
- f'of head')
- else:
- assert is_seq_of(dataset.CLASSES, str), \
- (f'Class information in `metainfo` in '
- f'{dataset.__class__.__name__} should be a tuple of str.')
- for _, module in model.named_modules():
- if hasattr(module, 'num_classes'):
- assert module.num_classes == len(dataset.CLASSES), \
- (f'The `num_classes` ({module.num_classes}) in '
- f'{module.__class__.__name__} of '
- f'{model.__class__.__name__} does not matches '
- f'the length of class information in `metainfo` '
- f'{len(dataset.CLASSES)}) in '
- f'{dataset.__class__.__name__}')
-
- def before_train(self, runner):
- """Check whether the training dataset is compatible with head.
-
- Args:
- runner (obj: `IterBasedRunner`): Iter based Runner.
- """
- self._check_head(runner, runner.train_dataloader.dataset)
-
- def before_val(self, runner):
- """Check whether the validation dataset is compatible with head.
-
- Args:
- runner (obj:`IterBasedRunner`): Iter based Runner.
- """
- self._check_head(runner, runner.val_dataloader.dataset)
-
- def before_test(self, runner):
- """Check whether the test dataset is compatible with head.
-
- Args:
- runner (obj:`IterBasedRunner`): Iter based Runner.
- """
- self._check_head(runner, runner.test_dataloader.dataset)
diff --git a/spaces/Lamai/LAMAIGPT/tests/unit/test_commands.py b/spaces/Lamai/LAMAIGPT/tests/unit/test_commands.py
deleted file mode 100644
index ecbac9b73bd9ad872931d77e144dd853b3d8ef64..0000000000000000000000000000000000000000
--- a/spaces/Lamai/LAMAIGPT/tests/unit/test_commands.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"""Unit tests for the commands module"""
-from unittest.mock import MagicMock, patch
-
-import pytest
-
-import autogpt.agent.agent_manager as agent_manager
-from autogpt.app import execute_command, list_agents, start_agent
-
-
-@pytest.mark.integration_test
-def test_make_agent() -> None:
- """Test the make_agent command"""
- with patch("openai.ChatCompletion.create") as mock:
- obj = MagicMock()
- obj.response.choices[0].messages[0].content = "Test message"
- mock.return_value = obj
- start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2")
- agents = list_agents()
- assert "List of agents:\n0: chat" == agents
- start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2")
- agents = list_agents()
- assert "List of agents:\n0: chat\n1: write" == agents
diff --git a/spaces/LaynzKunz/Model-RCV/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py b/spaces/LaynzKunz/Model-RCV/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py
deleted file mode 100644
index b412ba2814e114ca7bb00b6fd6ef217f63d788a3..0000000000000000000000000000000000000000
--- a/spaces/LaynzKunz/Model-RCV/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
-import pyworld
-import numpy as np
-
-
-class HarvestF0Predictor(F0Predictor):
- def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
- self.hop_length = hop_length
- self.f0_min = f0_min
- self.f0_max = f0_max
- self.sampling_rate = sampling_rate
-
- def interpolate_f0(self, f0):
- """
- 对F0进行插值处理
- """
-
- data = np.reshape(f0, (f0.size, 1))
-
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
- vuv_vector[data > 0.0] = 1.0
- vuv_vector[data <= 0.0] = 0.0
-
- ip_data = data
-
- frame_number = data.size
- last_value = 0.0
- for i in range(frame_number):
- if data[i] <= 0.0:
- j = i + 1
- for j in range(i + 1, frame_number):
- if data[j] > 0.0:
- break
- if j < frame_number - 1:
- if last_value > 0.0:
- step = (data[j] - data[i - 1]) / float(j - i)
- for k in range(i, j):
- ip_data[k] = data[i - 1] + step * (k - i + 1)
- else:
- for k in range(i, j):
- ip_data[k] = data[j]
- else:
- for k in range(i, frame_number):
- ip_data[k] = last_value
- else:
- ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
- last_value = data[i]
-
- return ip_data[:, 0], vuv_vector[:, 0]
-
- def resize_f0(self, x, target_len):
- source = np.array(x)
- source[source < 0.001] = np.nan
- target = np.interp(
- np.arange(0, len(source) * target_len, len(source)) / target_len,
- np.arange(0, len(source)),
- source,
- )
- res = np.nan_to_num(target)
- return res
-
- def compute_f0(self, wav, p_len=None):
- if p_len is None:
- p_len = wav.shape[0] // self.hop_length
- f0, t = pyworld.harvest(
- wav.astype(np.double),
- fs=self.hop_length,
- f0_ceil=self.f0_max,
- f0_floor=self.f0_min,
- frame_period=1000 * self.hop_length / self.sampling_rate,
- )
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs)
- return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
-
- def compute_f0_uv(self, wav, p_len=None):
- if p_len is None:
- p_len = wav.shape[0] // self.hop_length
- f0, t = pyworld.harvest(
- wav.astype(np.double),
- fs=self.sampling_rate,
- f0_floor=self.f0_min,
- f0_ceil=self.f0_max,
- frame_period=1000 * self.hop_length / self.sampling_rate,
- )
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
- return self.interpolate_f0(self.resize_f0(f0, p_len))
diff --git a/spaces/LuxOAI/ChatGpt-Web/docs/vercel-cn.md b/spaces/LuxOAI/ChatGpt-Web/docs/vercel-cn.md
deleted file mode 100644
index c492296944737156572ea8df8b43b866b3e695bf..0000000000000000000000000000000000000000
--- a/spaces/LuxOAI/ChatGpt-Web/docs/vercel-cn.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# Vercel 的使用说明
-
-## 如何新建项目
-当你从 Github fork 本项目之后,需要重新在 Vercel 创建一个全新的 Vercel 项目来重新部署,你需要按照下列步骤进行。
-
-
-1. 进入 Vercel 控制台首页;
-2. 点击 Add New;
-3. 选择 Project。
-
-
-1. 在 Import Git Repository 处,搜索 chatgpt-next-web;
-2. 选中新 fork 的项目,点击 Import。
-
-
-1. 在项目配置页,点开 Environmane Variables 开始配置环境变量;
-2. 依次新增名为 OPENAI_API_KEY 和 CODE 的环境变量;
-3. 填入环境变量对应的值;
-4. 点击 Add 确认增加环境变量;
-5. 请确保你添加了 OPENAI_API_KEY,否则无法使用;
-6. 点击 Deploy,创建完成,耐心等待 5 分钟左右部署完成。
-
-## 如何增加自定义域名
-[TODO]
-
-## 如何更改环境变量
-
-1. 进去 Vercel 项目内部控制台,点击顶部的 Settings 按钮;
-2. 点击左侧的 Environment Variables;
-3. 点击已有条目的右侧按钮;
-4. 选择 Edit 进行编辑,然后保存即可。
-
-⚠️️ 注意:每次修改完环境变量,你都需要[重新部署项目](#如何重新部署)来让改动生效!
-
-## 如何重新部署
-
-1. 进入 Vercel 项目内部控制台,点击顶部的 Deployments 按钮;
-2. 选择列表最顶部一条的右侧按钮;
-3. 点击 Redeploy 即可重新部署。
\ No newline at end of file
diff --git a/spaces/Marshalls/testmtd/analysis/pymo/mocapplayer/libs/mocapjs.js b/spaces/Marshalls/testmtd/analysis/pymo/mocapplayer/libs/mocapjs.js
deleted file mode 100644
index aafbcbb28c371234a6aebf369b6eca43bd000048..0000000000000000000000000000000000000000
--- a/spaces/Marshalls/testmtd/analysis/pymo/mocapplayer/libs/mocapjs.js
+++ /dev/null
@@ -1,1312 +0,0 @@
-/*!
- * The MIT License (MIT)
- *
- * Copyright (c) 2016 Omid Alemi
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-/******/ (function(modules) { // webpackBootstrap
-/******/ // The module cache
-/******/ var installedModules = {};
-/******/
-/******/ // The require function
-/******/ function __webpack_require__(moduleId) {
-/******/
-/******/ // Check if module is in cache
-/******/ if(installedModules[moduleId])
-/******/ return installedModules[moduleId].exports;
-/******/
-/******/ // Create a new module (and put it into the cache)
-/******/ var module = installedModules[moduleId] = {
-/******/ exports: {},
-/******/ id: moduleId,
-/******/ loaded: false
-/******/ };
-/******/
-/******/ // Execute the module function
-/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
-/******/
-/******/ // Flag the module as loaded
-/******/ module.loaded = true;
-/******/
-/******/ // Return the exports of the module
-/******/ return module.exports;
-/******/ }
-/******/
-/******/
-/******/ // expose the modules object (__webpack_modules__)
-/******/ __webpack_require__.m = modules;
-/******/
-/******/ // expose the module cache
-/******/ __webpack_require__.c = installedModules;
-/******/
-/******/ // __webpack_public_path__
-/******/ __webpack_require__.p = "";
-/******/
-/******/ // Load entry module and return exports
-/******/ return __webpack_require__(0);
-/******/ })
-/************************************************************************/
-/******/ ([
-/* 0 */
-/***/ function(module, exports, __webpack_require__) {
-
- BVHCharacter = __webpack_require__(1);
- C3DCharacter = __webpack_require__(5);
- MocapParsers = __webpack_require__(2);
-
-/***/ },
-/* 1 */
-/***/ function(module, exports, __webpack_require__) {
-
- var parsers = __webpack_require__(2);
-
- var BVHCharacter = BVHCharacter || {};
-
-
- BVHCharacter = function(n, jm, bm, jg, bg) {
- this.name = n;
-
- this.jointMaterial = jm;
- this.boneMaterial = bm;
- this.makeJointGeometryFCN = jg;
- this.makeBoneGeometryFCN = bg;
-
- this.bvh = [];
- this.skeleton = new THREE.Group();
-
- this.skelScale = 1;
- this.jointMeshes = [];
- this.boneMeshes = [];
- this.rootMeshes = [];
-
- this.originPosition = new THREE.Vector3(0, 0, 0);
-
- this.ready = false;
- this.frameTime = 1 / 30;
- this.frameCount = 0;
- this.animIndex = 0;
- this.animStartTimeRef = 0;
- this.animOffset = 0;
- this.playing = true;
-
- this.debug = true;
- this.useWorker = true;
-
- this.webSocket = [];
- this.streamProtocol = "BVHStream";
- this.keepStreamedFrames = true;
- this.isStreaming = false;
-
- var self = this;
-
- //
-
- this.log = function(m) {
- if (self.debug)
- console.log(self.name + ": " + m.toString());
- };
-
- this.loadFromURL = function(url, callback) {
- self.log("Loading the mocap file ...");
- //Pace.start();
- reader = new parsers.bvhParser(this.name + "READER");
- this.url = url;
- reader.load(url, self.createSkel, self.fillFrames);
-
- this.callb = callback;
- };
-
- this.fillFrames = function() {
- // self.log("Ready!");
- self.ready = true;
- self.playing = true;
-
- if (self.callb)
- self.callb();
- }
-
- this.createSkel = function(data) {
- self.bvh = data;
- self.frameCount = data.frameCount;
- self.frameTime = data.frameTime;
-
- self.log("Mocap file loaded.");
-
- self.log("Creating the WebGL Joints.");
- self.buildSkelJoints(self.bvh.getSkeleton(), 0);
-
- self.log("Creating the WebGL Bones.");
- self.buildSkelBones(self.jointMeshes[0]);
-
- self.skeleton.add(self.jointMeshes[0]);
- self.setSkeletonScale(self.skelScale);
- self.setSkelUp();
- };
-
-
- // Beginning of the Stream Code
- this.onHeaderReceived = function(data) {
- self.log("Loading the mocap header (skeleton) from the stream...");
- headerReader = new parsers.bvhStreamParser();
- headerReader.readHeader(data, self.createSkel);
-
- if (self.callb)
- self.callb();
-
- Pace.stop();
- }
-
- this.onDataChunckReceived = function(rawFrames) {
- var aa = [];
-
- for (f = 1; f < rawFrames.length; f++) {
- var parts = rawFrames[f].trim().split(" ");
- for (var j = 0; j < parts.length; j++)
- parts[j] = +parts[j];
- aa.push(parts);
- }
- diff = self.bvh.fillFrameArray(aa);
- self.frameCount = self.bvh.frameArray.length;
-
-
- if (!self.playing) {
- self.animStartTimeRef = Date.now();
- // self.animOffset -= rawFrames.length;
- }
- /*
- // else
- // self.animOffset = self.animIndex;
- if (diff > 0)
- self.animOffset -= rawFrames.length + 1;
- // self.animIndex -= rawFrames.length; //math.max(0,math.min(rawFrames.length, self.bvh.bufferSize));
- */
- self.fillFrames();
- Pace.stop();
- }
-
- this.loadFromStream = function(url, callback) {
- self.log("Connecting to the stream server...");
- self.isStreaming = true;
- this.callb = callback;
- self.webSocket = new WebSocket(url);
-
- self.webSocket.onerror = function(event) {
- self.log("Error connecting to the stream server " + event.origin);
- };
-
- self.webSocket.onopen = function(event) {
- self.log("Connected to the stream server " + event.origin);
- Pace.stop();
- };
-
- self.webSocket.onmessage = function(event) {
- // I'm not doing much of a type and content checking here. Let's just trust the sender for now!
- // Protocol for header:
- // $HEADER$
- // BVH...
- // Protocl for data chunk with id#:
- // $FRAMES$id#$
-
- var messageLines = event.data.split('\n');
-
- // self.log("Received somthing!");
- // self.log("The first line is : " + messageLines[0]);
-
- if (messageLines.length < 1)
- return;
-
- if (messageLines[0] == "$HEADER$") {
- self.onHeaderReceived(event.data);
-
- } else if (messageLines[0].startsWith("$FRAMES$")) {
- chunckID = parseInt(messageLines[0].split("$")[2]);
- self.onDataChunckReceived(messageLines, chunckID);
- }
- };
-
- };
-
- this.requestFrames = function(i) {
- self.webSocket.send("$GETFRAMES" + i + "$");
- }
-
- // End of the Stream Code
-
- this.setOriginPosition = function(x, y, z) {
- self.originPosition.set(x, y, z);
- };
-
- this.setSkeletonScale = function(s) {
- self.rootMeshes.forEach(function(c) {
- c.scale.set(s, s, s);
- });
- self.jointMeshes[0].scale.set(s, s, s);
- self.jointMeshes[0].position.multiplyScalar(s);
- };
-
- this.buildSkelJoints = function(joint, parent) {
- var jointMesh = new THREE.Mesh(self.makeJointGeometryFCN(joint.name, self.skelScale), self.jointMaterial);
- jointMesh.bvhIndex = joint.jointIndex;
- jointMesh.offsetVec = new THREE.Vector3(joint.offset[0], joint.offset[1], joint.offset[2]);
- jointMesh.name = joint.name;
- jointMesh.jointparent = parent;
- var a, b, c;
- if (!joint.isEndSite()) {
- a = joint.channelNames[joint.channelNames.length - 3][0];
- b = joint.channelNames[joint.channelNames.length - 2][0];
- c = joint.channelNames[joint.channelNames.length - 1][0];
- }
- jointMesh.rotOrder = a + b + c;
- self.jointMeshes.push(jointMesh);
-
- jointMesh.position.set(jointMesh.offsetVec.x, jointMesh.offsetVec.y, jointMesh.offsetVec.z);
-
- // var axisHelper = new THREE.AxisHelper( 10 / self.skelScale );
- // jointMesh.add( axisHelper );
-
-
- joint.children.forEach(function(child) {
- jointMesh.add(self.buildSkelJoints(child, 1));
- });
-
- return jointMesh;
- };
-
- this.buildSkelBones = function(rootJointMesh) {
- rootJointMesh.traverse(function(childJointMesh) {
- if (childJointMesh.parent !== null)
- {
- if (typeof childJointMesh.bvhIndex === "undefined")
- return;
- // move origin (.translate)
- // rotate
- // translate (offset + position)
- h = math.abs(childJointMesh.offsetVec.length());
- var bgeometry = self.makeBoneGeometryFCN(childJointMesh.parent.name, childJointMesh.name, h, self.skelScale);
-
- //BEGIN - Universal
- if (childJointMesh.offsetVec.y !== 0)
- // bgeometry.translate(0, Math.sign(childJointMesh.offsetVec.y) * h / 2, 0);
- bgeometry.translate(0, -h/2, 0);
- else
- bgeometry.translate(0, -h / 2, 0);
-
-
- dx = Math.atan2(childJointMesh.offsetVec.z,childJointMesh.offsetVec.y);
- dy = Math.atan2(childJointMesh.offsetVec.x,childJointMesh.offsetVec.z);
- dz = Math.atan2(childJointMesh.offsetVec.x,childJointMesh.offsetVec.y);
-
-
- osx = math.sign(childJointMesh.offsetVec.x) === 0 ? 0: math.sign(childJointMesh.offsetVec.x);
- osy = math.sign(childJointMesh.offsetVec.y) === 0 ? 0: math.sign(childJointMesh.offsetVec.y);
- osz = math.sign(childJointMesh.offsetVec.z) === 0 ? 0: math.sign(childJointMesh.offsetVec.z);
-
- osxy = math.sign(childJointMesh.offsetVec.x) === 0 ? 0: math.sign(childJointMesh.offsetVec.y);
- osyx = math.sign(childJointMesh.offsetVec.y) === 0 ? 0: math.sign(childJointMesh.offsetVec.x);
- osyz = math.sign(childJointMesh.offsetVec.y) === 0 ? 0: math.sign(childJointMesh.offsetVec.z);
- oszy = math.sign(childJointMesh.offsetVec.z) === 0 ? 0: math.sign(childJointMesh.offsetVec.y);
-
-
- if (osz <0)
- bgeometry.rotateZ(1*(math.pi-dz));
- else if (osz === 0)
- bgeometry.rotateZ(1*(math.pi-dz));
- // console.log();
- else if (osz > 0)
- bgeometry.rotateZ(1*(2*math.pi-dz));
-
-
- if (oszy >0)
- bgeometry.rotateX(-1 *(2*math.pi-dx));
- else if (childJointMesh.offsetVec.z === 0)
- // bgeometry.rotateX(-1*(math.pi-dx));
- console.log();
- else if (oszy < 0)
- bgeometry.rotateX(-1*(2*math.pi-dx));
-
- // bgeometry.rotateY(math.pi-dy);
-
- //END - Universal
-
- var boneMesh = new THREE.Mesh(bgeometry, self.boneMaterial);
-
- boneMesh.joint = childJointMesh.parent;
- boneMesh.name = childJointMesh.parent.name + " > " + childJointMesh.name;
-
- childJointMesh.parent.add(boneMesh);
- self.boneMeshes.push(boneMesh);
- }
- });
- };
-
- this.animFrame = function(frame) {
- var torad = Math.PI / 180;
-
- if (frame >= self.frameCount) {
- self.playing = false;
- return;
- }
-
-
- this.jointMeshes[0].traverse(function(joint) {
-
- if (typeof joint.bvhIndex === "undefined") {
- return;
- }
-
-
- var bj = self.bvh.jointArray[joint.bvhIndex];
- var offsetVec = joint.offsetVec;
-
- var thisEuler = [];
-
-
- thisEuler = new THREE.Euler(
- (bj.channels[frame][bj.rotationIndex.x] * torad),
- (bj.channels[frame][bj.rotationIndex.y] * torad),
- (bj.channels[frame][bj.rotationIndex.z] * torad), joint.rotOrder);
-
-
- joint.localRotMat = new THREE.Matrix4();
- joint.localRotMat.makeRotationFromEuler(thisEuler);
- joint.rotation.setFromRotationMatrix(joint.localRotMat);
-
- if (joint.jointparent !== 0) {
- // joint.position.set(offsetVec.x, offsetVec.y, offsetVec.z);
- } else { // root
- joint.position.set(
- bj.channels[frame][bj.positionIndex.x] * self.skelScale + self.originPosition.x,
- bj.channels[frame][bj.positionIndex.y] * self.skelScale + self.originPosition.y,
- bj.channels[frame][bj.positionIndex.z] * self.skelScale + self.originPosition.z);
- }
- });
-
- if (self.isStreaming) {
- self.bvh.consumeFrames(frame);
- self.frameCount = self.bvh.frameArray.length;
- // console.log(self.frameCount);
- if (self.frameCount <= 0)
- self.playing = false;
-
- self.animOffset = 0; // self.animOffset - frame;
- self.animStartTimeRef = Date.now();
- }
- };
-
- this.setSkelUp = function() {
- this.jointMeshes[0].traverse(function(joint) {
- if (typeof joint.bvhIndex === "undefined")
- return;
-
- var bj = self.bvh.jointArray[joint.bvhIndex];
-
- var offsetVec = joint.offsetVec;
- var torad = Math.PI / 180;
- var thisEuler = [];
-
- thisEuler = new THREE.Euler(0, 0, 0, joint.rotOrder);
-
- joint.localRotMat = new THREE.Matrix4();
- joint.localRotMat.makeRotationFromEuler(thisEuler);
- joint.rotation.setFromRotationMatrix(joint.localRotMat);
-
- if (joint.jointparent !== 0) {
- // joint.position.set(offsetVec.x, offsetVec.y, offsetVec.z);
- } else { // root
- joint.position.set(self.originPosition.x, self.originPosition.y, self.originPosition.z);
- }
- });
- };
- };
-
-
- module.exports = BVHCharacter;
-
-/***/ },
-/* 2 */
-/***/ function(module, exports, __webpack_require__) {
-
- module.exports ={
- bvhParser: __webpack_require__(3),
- bvhStreamParser: __webpack_require__(4)
- };
-
-/***/ },
-/* 3 */
-/***/ function(module, exports) {
-
- // By Ankit
- var BVHReader = function () {
- this.load = function (url, callbackHeader, callbackFrameArray) {
- $.get(url, function (str) {
-
- var dataReturn = parse(str);
-
-
- var jointStack = dataReturn[0];
- var jointMap = dataReturn[1];
- var jointArray = dataReturn[2];
- var connectivityMatrix = dataReturn[3]
- _bvh = new BVHReader.BVH.Skeleton(jointStack[0], jointMap, jointArray, dataReturn[3], dataReturn[4], dataReturn[5], []);
-
- if (callbackHeader)
- callbackHeader(_bvh,'BVH');
- console.log("Blah");
- _bvh.fillFrameArray(dataReturn[6]);
-
- if (callbackFrameArray)
- callbackFrameArray();
-
- });
- };
-
- function parse(str) {
- var lines = str.split('\n');
- var jointStack = [];
- var jointMap = {};
- var jointArray = [];
- var connectivityMatrix = [];
- var frameCount, frameTime, frameArray = [];
- var i = 0;
- //parse structure
- for (i = 1; i < lines.length; i++) {
- if (!parseLine(lines[i], jointStack, jointMap, jointArray, connectivityMatrix)) {
- break;
- }
- }
-
- for (i = i + 1; i < lines.length; i++) {
- var line = lines[i].trim();
- //when encountering last line
- if (line === "")
- break;
- if (line.indexOf("Frames") === 0) {
- frameCount = +(line.split(/\b/)[2]);
- } else if (line.indexOf("Frame Time") === 0) {
- frameTime = +( line.substr(line.indexOf(":") + 1).trim() )
- } else {
- var parts = line.split(" ");
- for (var j = 0; j < parts.length; j++)
- parts[j] = +parts[j];
- frameArray.push(parts);
- }
- }
-
- //parse motion
- return [jointStack, jointMap, jointArray, connectivityMatrix, frameCount, frameTime, frameArray];
- }
-
- //parses individual line in the bvh file.
- var parseLine = function (line, jointStack, jointMap, jointArray, connectivityMatrix) {
- line = line.trim();
- if (line.indexOf("ROOT") > -1 || line.indexOf("JOINT") > -1 || line.indexOf("End") > -1) {
- var parts = line.split(" ");
- var title = parts[1]; //temporary variable to be used after creating the joint object
- parts[1] = parts[1] + "-" + jointArray.length;
- var joint = new BVHReader.BVH.Joint(parts[1]);
- joint.title = title;
- jointStack.push(joint);
-
- joint.jointIndex = Object.keys(jointMap).length;
- jointMap[parts[1]] = joint;
- jointArray.push(joint);
- //if the joint is not an end site
- if( line.indexOf("End") != 0 ){
- if (jointArray.length == 1) {
- joint.channelOffset = 0;
- } else {
- joint.channelOffset = jointArray[jointArray.length - 2].channelOffset + jointArray[jointArray.length - 2].channelLength;
- }
- }else{
- //channelLength is 0 for end joints
- joint.channelLength = 0;
- joint.channelOffset = jointArray[jointArray.length - 2].channelOffset + jointArray[jointArray.length - 2].channelLength;
- }
-
- } else if (line.indexOf("{") === 0) {
-
- } else if (line.indexOf("OFFSET") === 0) {
- var parts = line.split(" ");
- jointStack[jointStack.length - 1]["offset"] = parts.slice(1);
- for(x in jointStack[jointStack.length - 1]["offset"]){
- jointStack[jointStack.length - 1]["offset"][x] = +jointStack[jointStack.length - 1]["offset"][x]
- }
- } else if (line.indexOf("CHANNELS") === 0) {
- var parts = line.split(" ");
- jointStack[jointStack.length - 1].setChannelNames(parts.slice(2));
- jointStack[jointStack.length - 1]["channelLength"] = +parts[1];
- } else if (line.indexOf("}") === 0) {
- if (jointStack.length > 1) {
- child = jointStack.pop();
- jointStack[jointStack.length - 1].children.push(child);
- child.parent = jointStack[jointStack.length - 1];
-
- connectivityMatrix.push([child.parent, child])
-
- // if(!connectivityMatrix[child.name]){
- // connectivityMatrix[child.name] = {}
- // }
- // connectivityMatrix[child.name][child.parent.name] = 1;
-
- // if(!connectivityMatrix[child.parent.name]){
- // connectivityMatrix[child.parent.name] = {}
- // }
- // connectivityMatrix[child.parent.name][child.name] = 1;
- }
- } else if (line.indexOf("MOTION") == 0) {
- return false;
- }
-
- return true;
- };
- };
-
- BVHReader.BVH = BVHReader.BVH || {};
-
- BVHReader.BVH.Joint = function (name, index) {
-
- this.name = name;
- this.children = [];
- this.isEndSite = function () {
- return this.children.length == 0;
- };
- this.rotationIndex = {};
- this.positionIndex = {};
-
- this.getChannels = function () {
- var allChannels = [];
- for (i = 0; i < this.skeleton.frameArray.length; i++) {
- allChannels.push(this.getChannelsAt(i));
- }
- return allChannels;
- };
- this.getChannelsAt = function (frameNum) {
- var channelsAtFrame = this.skeleton.frameArray[frameNum];
- return channelsAtFrame.slice(this.channelOffset, this.channelOffset + this.channelLength);
- };
-
- this.setChannelNames = function (nameArr){
- this.channelNames = nameArr;
- for(i in this.channelNames){
- var name = this.channelNames[i];
- switch(name){
- case "Xposition": this.positionIndex.x = i; break;
- case "Yposition": this.positionIndex.y = i; break;
- case "Zposition": this.positionIndex.z = i; break;
-
- case "Xrotation": this.rotationIndex.x = i; break;
- case "Yrotation": this.rotationIndex.y = i; break;
- case "Zrotation": this.rotationIndex.z = i; break;
- }
- }
- }
- };
-
- BVHReader.BVH.Skeleton = function (root, map, arr, connectivityMatrix, frameCount, frameTime, frameArray) {
- thisSkeleton = this;
- this.root = root;
- this.jointMap = map;
- this.jointArray = arr;
- this.connectivityMatrix = connectivityMatrix;
- this.frameCount = frameCount;
- this.frameTime = frameTime;
- this.frameArray = frameArray;
-
- for (i = 0; i < this.jointArray.length; i++) {
- this.jointArray[i].skeleton = thisSkeleton;
- }
-
-
-
- this.fillFrameArray = function (fa) {
- this.frameArray = fa;
- this.frameCount = fa.length;
- //all the structures are ready. let's calculate the positions
- for(j=0; j < this.jointArray.length; j++){
- var joint = this.jointArray[j];
- updateWithPositions(joint);
- }
- }
-
- this.getChannels = function () {
- return frameArray;
- };
- this.getChannelsAt = function (frameNum) {
- //How do I know which column is what?
- //Why do you need the column index?
- return frameArray[frameNum];
- };
- this.getFrameRate = function () {
- return frameCount / frameTime;
- };
- this.getSkeleton = function () {
- return root;
- };
-
- this.getHeadJoint = function () {
- // do a quick search in the joint names to see if any of them matches head, else return the something!!!!
- return jointMap["Head"];
- };
- this.getPositionsAt = function (frameNum) {
- //for each joint, calculate its position in XYZ
- //return an array of joints, each with .x, .y, and .z properties
- posFrame = [];
-
- for (j=0;j -1 || line.indexOf("JOINT") > -1 || line.indexOf("End") > -1) {
- var parts = line.split(" ");
- var title = parts[1]; //temporary variable to be used after creating the joint object
- parts[1] = parts[1] + "-" + jointArray.length;
- var joint = new BVHStreamParser.BVH.Joint(parts[1]);
- joint.title = title;
- jointStack.push(joint);
-
- joint.jointIndex = Object.keys(jointMap).length;
- jointMap[parts[1]] = joint;
- jointArray.push(joint);
- //if the joint is not an end site
- if( line.indexOf("End") != 0 ){
- if (jointArray.length == 1) {
- joint.channelOffset = 0;
- } else {
- joint.channelOffset = jointArray[jointArray.length - 2].channelOffset + jointArray[jointArray.length - 2].channelLength;
- }
- }else{
- //channelLength is 0 for end joints
- joint.channelLength = 0;
- joint.channelOffset = jointArray[jointArray.length - 2].channelOffset + jointArray[jointArray.length - 2].channelLength;
- }
-
- } else if (line.indexOf("{") === 0) {
-
- } else if (line.indexOf("OFFSET") === 0) {
- var parts = line.split(" ");
- jointStack[jointStack.length - 1]["offset"] = parts.slice(1);
- for(x in jointStack[jointStack.length - 1]["offset"]){
- jointStack[jointStack.length - 1]["offset"][x] = +jointStack[jointStack.length - 1]["offset"][x]
- }
- } else if (line.indexOf("CHANNELS") === 0) {
- var parts = line.split(" ");
- jointStack[jointStack.length - 1].setChannelNames(parts.slice(2));
- jointStack[jointStack.length - 1]["channelLength"] = +parts[1];
- } else if (line.indexOf("}") === 0) {
- if (jointStack.length > 1) {
- child = jointStack.pop();
- jointStack[jointStack.length - 1].children.push(child);
- child.parent = jointStack[jointStack.length - 1];
-
- connectivityMatrix.push([child.parent, child])
- }
- } else if (line.indexOf("MOTION") == 0) {
- return false;
- }
-
- return true;
- };
- };
-
- BVHStreamParser.BVH = BVHStreamParser.BVH || {};
-
- BVHStreamParser.BVH.Joint = function (name, index) {
-
- this.name = name;
- this.children = [];
- this.isEndSite = function () {
- return this.children.length == 0;
- };
- this.rotationIndex = {};
- this.positionIndex = {};
-
- this.getChannels = function () {
- var allChannels = [];
- for (i = 0; i < this.skeleton.frameArray.length; i++) {
- allChannels.push(this.getChannelsAt(i));
- }
- return allChannels;
- };
- this.getChannelsAt = function (frameNum) {
- var channelsAtFrame = this.skeleton.frameArray[frameNum];
- return channelsAtFrame.slice(this.channelOffset, this.channelOffset + this.channelLength);
- };
-
- this.setChannelNames = function (nameArr){
- this.channelNames = nameArr;
- for(i in this.channelNames){
- var name = this.channelNames[i];
- switch(name){
- case "Xposition": this.positionIndex.x = i; break;
- case "Yposition": this.positionIndex.y = i; break;
- case "Zposition": this.positionIndex.z = i; break;
-
- case "Xrotation": this.rotationIndex.x = i; break;
- case "Yrotation": this.rotationIndex.y = i; break;
- case "Zrotation": this.rotationIndex.z = i; break;
- }
- }
- }
- };
-
- BVHStreamParser.BVH.Skeleton = function (root, map, arr, connectivityMatrix, frameCount, frameTime, frameArray) {
- thisSkeleton = this;
- this.root = root;
- this.jointMap = map;
- this.jointArray = arr;
- this.connectivityMatrix = connectivityMatrix;
- this.frameCount = frameCount;
- this.frameTime = frameTime;
- this.frameArray = frameArray;
- this.bufferSize = 500;
-
- for (i = 0; i < this.jointArray.length; i++) {
- this.jointArray[i].skeleton = thisSkeleton;
- }
-
- this.fillFrameArray = function (fa) {
- this.frameArray.push.apply(this.frameArray,fa);
- //this.frameArray.push.apply(this.frameArray,fa);
-
- diff = this.frameArray.length - this.bufferSize;
- // console.log('diff = ' + diff);
-
- /*
- if (diff > 0)
- for (i=0;i 0)
- addedCount = this.frameCount;
- else
- addedCount = fa.length;
-
- for(j=0; j < this.jointArray.length; j++){
- var joint = this.jointArray[j];
- updateWithPositionsSinceLast(joint, addedCount);
- }
-
- return diff;
- }
-
- this.consumeFrames = function (index) {
- for (i=0;i<=index;i++) {
- this.frameArray.shift();
- for (j=0;jOrange!
\ No newline at end of file
diff --git a/spaces/NATSpeech/PortaSpeech/docs/fastspeech2.md b/spaces/NATSpeech/PortaSpeech/docs/fastspeech2.md
deleted file mode 100644
index 572874f34bb45de5b6788f182af8bcc3c8fddca1..0000000000000000000000000000000000000000
--- a/spaces/NATSpeech/PortaSpeech/docs/fastspeech2.md
+++ /dev/null
@@ -1,53 +0,0 @@
-# Run FastSpeech 2
-
-## Quick Start
-
-### Install Dependencies
-
-Install dependencies following [readme.md](../readme.md)
-
-### Set Config Path and Experiment Name
-
-```bash
-export CONFIG_NAME=egs/datasets/audio/lj/fs2_orig.yaml
-export MY_EXP_NAME=fs2_exp
-```
-
-### Preprocess and binary dataset
-
-Prepare dataset following [prepare_data.md](./prepare_data.md)
-
-### Prepare Vocoder
-
-Prepare vocoder following [prepare_vocoder.md](./prepare_vocoder.md)
-
-## Training
-
-```bash
-CUDA_VISIBLE_DEVICES=0 python tasks/run.py --config $CONFIG_NAME --exp_name $MY_EXP_NAME --reset
-```
-
-You can check the training and validation curves open Tensorboard via:
-
-```bash
-tensorboard --logdir checkpoints/$MY_EXP_NAME
-```
-
-## Inference (Testing)
-
-```bash
-CUDA_VISIBLE_DEVICES=0 python tasks/run.py --config $CONFIG_NAME --exp_name $MY_EXP_NAME --infer
-```
-
-## Citation
-
-If you find this useful for your research, please use the following.
-
-```
-@inproceedings{ren2020fastspeech,
- title={FastSpeech 2: Fast and High-Quality End-to-End Text to Speech},
- author={Ren, Yi and Hu, Chenxu and Tan, Xu and Qin, Tao and Zhao, Sheng and Zhao, Zhou and Liu, Tie-Yan},
- booktitle={International Conference on Learning Representations},
- year={2020}
-}
-```
diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/dataloader/input_reader.py b/spaces/NCTCMumbai/NCTC/models/official/vision/detection/dataloader/input_reader.py
deleted file mode 100644
index 6e65243f6863ccadb45704b3ed487aec3b8ab21a..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/dataloader/input_reader.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Data loader and input processing."""
-
-from __future__ import absolute_import
-from __future__ import division
-# from __future__ import google_type_annotations
-from __future__ import print_function
-
-import tensorflow as tf
-
-from typing import Text, Optional
-from official.modeling.hyperparams import params_dict
-from official.vision.detection.dataloader import factory
-from official.vision.detection.dataloader import mode_keys as ModeKeys
-
-
-class InputFn(object):
- """Input function that creates dataset from files."""
-
- def __init__(self,
- file_pattern: Text,
- params: params_dict.ParamsDict,
- mode: Text,
- batch_size: int,
- num_examples: Optional[int] = -1):
- """Initialize.
-
- Args:
- file_pattern: the file pattern for the data example (TFRecords).
- params: the parameter object for constructing example parser and model.
- mode: ModeKeys.TRAIN or ModeKeys.Eval
- batch_size: the data batch size.
- num_examples: If positive, only takes this number of examples and raise
- tf.errors.OutOfRangeError after that. If non-positive, it will be
- ignored.
- """
- assert file_pattern is not None
- assert mode is not None
- assert batch_size is not None
- self._file_pattern = file_pattern
- self._mode = mode
- self._is_training = (mode == ModeKeys.TRAIN)
- self._batch_size = batch_size
- self._num_examples = num_examples
- self._parser_fn = factory.parser_generator(params, mode)
- self._dataset_fn = tf.data.TFRecordDataset
-
- self._input_sharding = (not self._is_training)
- try:
- if self._is_training:
- self._input_sharding = params.train.input_sharding
- else:
- self._input_sharding = params.eval.input_sharding
- except AttributeError:
- pass
-
- def __call__(self, ctx=None, batch_size: int = None):
- """Provides tf.data.Dataset object.
-
- Args:
- ctx: context object.
- batch_size: expected batch size input data.
-
- Returns:
- tf.data.Dataset object.
- """
- if not batch_size:
- batch_size = self._batch_size
- assert batch_size is not None
- dataset = tf.data.Dataset.list_files(
- self._file_pattern, shuffle=self._is_training)
-
- if self._input_sharding and ctx and ctx.num_input_pipelines > 1:
- dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
- dataset = dataset.cache()
-
- if self._is_training:
- dataset = dataset.repeat()
-
- dataset = dataset.interleave(
- map_func=self._dataset_fn, cycle_length=32,
- num_parallel_calls=tf.data.experimental.AUTOTUNE)
-
- if self._is_training:
- dataset = dataset.shuffle(1000)
- if self._num_examples > 0:
- dataset = dataset.take(self._num_examples)
-
- # Parses the fetched records to input tensors for model function.
- dataset = dataset.map(
- self._parser_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
- dataset = dataset.batch(batch_size, drop_remainder=True)
- dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
- return dataset
diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/image_classification/configs/configs.py b/spaces/NCTCMumbai/NCTC/models/official/vision/image_classification/configs/configs.py
deleted file mode 100644
index 8a79a1cd9b563a554614b9d4f2f0b93acf016791..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/vision/image_classification/configs/configs.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Lint as: python3
-# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Configuration utils for image classification experiments."""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import dataclasses
-
-from official.vision.image_classification import dataset_factory
-from official.vision.image_classification.configs import base_configs
-from official.vision.image_classification.efficientnet import efficientnet_config
-from official.vision.image_classification.resnet import resnet_config
-
-
-@dataclasses.dataclass
-class EfficientNetImageNetConfig(base_configs.ExperimentConfig):
- """Base configuration to train efficientnet-b0 on ImageNet.
-
- Attributes:
- export: An `ExportConfig` instance
- runtime: A `RuntimeConfig` instance.
- dataset: A `DatasetConfig` instance.
- train: A `TrainConfig` instance.
- evaluation: An `EvalConfig` instance.
- model: A `ModelConfig` instance.
-
- """
- export: base_configs.ExportConfig = base_configs.ExportConfig()
- runtime: base_configs.RuntimeConfig = base_configs.RuntimeConfig()
- train_dataset: dataset_factory.DatasetConfig = \
- dataset_factory.ImageNetConfig(split='train')
- validation_dataset: dataset_factory.DatasetConfig = \
- dataset_factory.ImageNetConfig(split='validation')
- train: base_configs.TrainConfig = base_configs.TrainConfig(
- resume_checkpoint=True,
- epochs=500,
- steps=None,
- callbacks=base_configs.CallbacksConfig(enable_checkpoint_and_export=True,
- enable_tensorboard=True),
- metrics=['accuracy', 'top_5'],
- time_history=base_configs.TimeHistoryConfig(log_steps=100),
- tensorboard=base_configs.TensorboardConfig(track_lr=True,
- write_model_weights=False),
- set_epoch_loop=False)
- evaluation: base_configs.EvalConfig = base_configs.EvalConfig(
- epochs_between_evals=1,
- steps=None)
- model: base_configs.ModelConfig = \
- efficientnet_config.EfficientNetModelConfig()
-
-
-@dataclasses.dataclass
-class ResNetImagenetConfig(base_configs.ExperimentConfig):
- """Base configuration to train resnet-50 on ImageNet."""
- export: base_configs.ExportConfig = base_configs.ExportConfig()
- runtime: base_configs.RuntimeConfig = base_configs.RuntimeConfig()
- train_dataset: dataset_factory.DatasetConfig = \
- dataset_factory.ImageNetConfig(split='train',
- one_hot=False,
- mean_subtract=True,
- standardize=True)
- validation_dataset: dataset_factory.DatasetConfig = \
- dataset_factory.ImageNetConfig(split='validation',
- one_hot=False,
- mean_subtract=True,
- standardize=True)
- train: base_configs.TrainConfig = base_configs.TrainConfig(
- resume_checkpoint=True,
- epochs=90,
- steps=None,
- callbacks=base_configs.CallbacksConfig(enable_checkpoint_and_export=True,
- enable_tensorboard=True),
- metrics=['accuracy', 'top_5'],
- time_history=base_configs.TimeHistoryConfig(log_steps=100),
- tensorboard=base_configs.TensorboardConfig(track_lr=True,
- write_model_weights=False),
- set_epoch_loop=False)
- evaluation: base_configs.EvalConfig = base_configs.EvalConfig(
- epochs_between_evals=1,
- steps=None)
- model: base_configs.ModelConfig = resnet_config.ResNetModelConfig()
-
-
-def get_config(model: str, dataset: str) -> base_configs.ExperimentConfig:
- """Given model and dataset names, return the ExperimentConfig."""
- dataset_model_config_map = {
- 'imagenet': {
- 'efficientnet': EfficientNetImageNetConfig(),
- 'resnet': ResNetImagenetConfig(),
- }
- }
- try:
- return dataset_model_config_map[dataset][model]
- except KeyError:
- if dataset not in dataset_model_config_map:
- raise KeyError('Invalid dataset received. Received: {}. Supported '
- 'datasets include: {}'.format(
- dataset,
- ', '.join(dataset_model_config_map.keys())))
- raise KeyError('Invalid model received. Received: {}. Supported models for'
- '{} include: {}'.format(
- model,
- dataset,
- ', '.join(dataset_model_config_map[dataset].keys())))
diff --git a/spaces/NMEX/rvc-hoyo-game/infer_pack/modules.py b/spaces/NMEX/rvc-hoyo-game/infer_pack/modules.py
deleted file mode 100644
index 960481cedad9a6106f2bf0b9e86e82b120f7b33f..0000000000000000000000000000000000000000
--- a/spaces/NMEX/rvc-hoyo-game/infer_pack/modules.py
+++ /dev/null
@@ -1,522 +0,0 @@
-import copy
-import math
-import numpy as np
-import scipy
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-from infer_pack import commons
-from infer_pack.commons import init_weights, get_padding
-from infer_pack.transforms import piecewise_rational_quadratic_transform
-
-
-LRELU_SLOPE = 0.1
-
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-class ConvReluNorm(nn.Module):
- def __init__(
- self,
- in_channels,
- hidden_channels,
- out_channels,
- kernel_size,
- n_layers,
- p_dropout,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.hidden_channels = hidden_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
- assert n_layers > 1, "Number of layers should be larger than 0."
-
- self.conv_layers = nn.ModuleList()
- self.norm_layers = nn.ModuleList()
- self.conv_layers.append(
- nn.Conv1d(
- in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
- )
- )
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
- for _ in range(n_layers - 1):
- self.conv_layers.append(
- nn.Conv1d(
- hidden_channels,
- hidden_channels,
- kernel_size,
- padding=kernel_size // 2,
- )
- )
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask):
- x_org = x
- for i in range(self.n_layers):
- x = self.conv_layers[i](x * x_mask)
- x = self.norm_layers[i](x)
- x = self.relu_drop(x)
- x = x_org + self.proj(x)
- return x * x_mask
-
-
-class DDSConv(nn.Module):
- """
- Dialted and Depth-Separable Convolution
- """
-
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
- super().__init__()
- self.channels = channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
-
- self.drop = nn.Dropout(p_dropout)
- self.convs_sep = nn.ModuleList()
- self.convs_1x1 = nn.ModuleList()
- self.norms_1 = nn.ModuleList()
- self.norms_2 = nn.ModuleList()
- for i in range(n_layers):
- dilation = kernel_size**i
- padding = (kernel_size * dilation - dilation) // 2
- self.convs_sep.append(
- nn.Conv1d(
- channels,
- channels,
- kernel_size,
- groups=channels,
- dilation=dilation,
- padding=padding,
- )
- )
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
- self.norms_1.append(LayerNorm(channels))
- self.norms_2.append(LayerNorm(channels))
-
- def forward(self, x, x_mask, g=None):
- if g is not None:
- x = x + g
- for i in range(self.n_layers):
- y = self.convs_sep[i](x * x_mask)
- y = self.norms_1[i](y)
- y = F.gelu(y)
- y = self.convs_1x1[i](y)
- y = self.norms_2[i](y)
- y = F.gelu(y)
- y = self.drop(y)
- x = x + y
- return x * x_mask
-
-
-class WN(torch.nn.Module):
- def __init__(
- self,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- p_dropout=0,
- ):
- super(WN, self).__init__()
- assert kernel_size % 2 == 1
- self.hidden_channels = hidden_channels
- self.kernel_size = (kernel_size,)
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.p_dropout = p_dropout
-
- self.in_layers = torch.nn.ModuleList()
- self.res_skip_layers = torch.nn.ModuleList()
- self.drop = nn.Dropout(p_dropout)
-
- if gin_channels != 0:
- cond_layer = torch.nn.Conv1d(
- gin_channels, 2 * hidden_channels * n_layers, 1
- )
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
-
- for i in range(n_layers):
- dilation = dilation_rate**i
- padding = int((kernel_size * dilation - dilation) / 2)
- in_layer = torch.nn.Conv1d(
- hidden_channels,
- 2 * hidden_channels,
- kernel_size,
- dilation=dilation,
- padding=padding,
- )
- in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
- self.in_layers.append(in_layer)
-
- # last one is not necessary
- if i < n_layers - 1:
- res_skip_channels = 2 * hidden_channels
- else:
- res_skip_channels = hidden_channels
-
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
- self.res_skip_layers.append(res_skip_layer)
-
- def forward(self, x, x_mask, g=None, **kwargs):
- output = torch.zeros_like(x)
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
- if g is not None:
- g = self.cond_layer(g)
-
- for i in range(self.n_layers):
- x_in = self.in_layers[i](x)
- if g is not None:
- cond_offset = i * 2 * self.hidden_channels
- g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
- else:
- g_l = torch.zeros_like(x_in)
-
- acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
- acts = self.drop(acts)
-
- res_skip_acts = self.res_skip_layers[i](acts)
- if i < self.n_layers - 1:
- res_acts = res_skip_acts[:, : self.hidden_channels, :]
- x = (x + res_acts) * x_mask
- output = output + res_skip_acts[:, self.hidden_channels :, :]
- else:
- output = output + res_skip_acts
- return output * x_mask
-
- def remove_weight_norm(self):
- if self.gin_channels != 0:
- torch.nn.utils.remove_weight_norm(self.cond_layer)
- for l in self.in_layers:
- torch.nn.utils.remove_weight_norm(l)
- for l in self.res_skip_layers:
- torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.convs1 = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2]),
- )
- ),
- ]
- )
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- ]
- )
- self.convs2.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c2(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.convs = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]),
- )
- ),
- ]
- )
- self.convs.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Log(nn.Module):
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
- logdet = torch.sum(-y, [1, 2])
- return y, logdet
- else:
- x = torch.exp(x) * x_mask
- return x
-
-
-class Flip(nn.Module):
- def forward(self, x, *args, reverse=False, **kwargs):
- x = torch.flip(x, [1])
- if not reverse:
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
- return x, logdet
- else:
- return x
-
-
-class ElementwiseAffine(nn.Module):
- def __init__(self, channels):
- super().__init__()
- self.channels = channels
- self.m = nn.Parameter(torch.zeros(channels, 1))
- self.logs = nn.Parameter(torch.zeros(channels, 1))
-
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = self.m + torch.exp(self.logs) * x
- y = y * x_mask
- logdet = torch.sum(self.logs * x_mask, [1, 2])
- return y, logdet
- else:
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
- return x
-
-
-class ResidualCouplingLayer(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=0,
- gin_channels=0,
- mean_only=False,
- ):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=p_dropout,
- gin_channels=gin_channels,
- )
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels] * 2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1, 2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class ConvFlow(nn.Module):
- def __init__(
- self,
- in_channels,
- filter_channels,
- kernel_size,
- n_layers,
- num_bins=10,
- tail_bound=5.0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.num_bins = num_bins
- self.tail_bound = tail_bound
- self.half_channels = in_channels // 2
-
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
- self.proj = nn.Conv1d(
- filter_channels, self.half_channels * (num_bins * 3 - 1), 1
- )
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
- h = self.pre(x0)
- h = self.convs(h, x_mask, g=g)
- h = self.proj(h) * x_mask
-
- b, c, t = x0.shape
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
-
- unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
- self.filter_channels
- )
- unnormalized_derivatives = h[..., 2 * self.num_bins :]
-
- x1, logabsdet = piecewise_rational_quadratic_transform(
- x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails="linear",
- tail_bound=self.tail_bound,
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1, 2])
- if not reverse:
- return x, logdet
- else:
- return x
diff --git a/spaces/Ngadou/NLP/README.md b/spaces/Ngadou/NLP/README.md
deleted file mode 100644
index 50b446afe3831dcbe6e77898f7efd985957b8b20..0000000000000000000000000000000000000000
--- a/spaces/Ngadou/NLP/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: NLP
-emoji: 🦀
-colorFrom: green
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.9
-app_file: app.py
-pinned: false
-license: cc-by-nc-4.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/NoFearNoDistractions/ChatGPT4/README.md b/spaces/NoFearNoDistractions/ChatGPT4/README.md
deleted file mode 100644
index 7938de14e5355209aaae713f289ca469181bbb17..0000000000000000000000000000000000000000
--- a/spaces/NoFearNoDistractions/ChatGPT4/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Chat-with-GPT4
-emoji: 🚀
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.21.0
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: ysharma/ChatGPT4
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/NooneImportant/tts/app.py b/spaces/NooneImportant/tts/app.py
deleted file mode 100644
index f9d6b62fb97bfee04db5e4011aa47a1af90c2599..0000000000000000000000000000000000000000
--- a/spaces/NooneImportant/tts/app.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import random
-import gradio as gr
-import numpy as np
-from elevenlabs import voices, generate, set_api_key, UnauthenticatedRateLimitError
-
-def pad_buffer(audio):
- # Pad buffer to multiple of 2 bytes
- buffer_size = len(audio)
- element_size = np.dtype(np.int16).itemsize
- if buffer_size % element_size != 0:
- audio = audio + b'\0' * (element_size - (buffer_size % element_size))
- return audio
-
-def generate_voice(text, voice_name):
- try:
- audio = generate(
- text[:333], # Limit to 250 characters
- voice=voice_name,
- model="eleven_multilingual_v2",
-
- )
- return (44100, np.frombuffer(pad_buffer(audio), dtype=np.int16))
- except UnauthenticatedRateLimitError as e:
- raise gr.Error("Thanks for trying out ElevenLabs TTS! You've reached the free tier limit. Please provide an API key to continue.")
- except Exception as e:
- raise gr.Error(e)
-
-
-badges = """
-"""
-
-description = """
-A demo of the world's most advanced TTS systems, made by [ElevenLabs](https://elevenlabs.io). Eleven Multilingual V2 is a single foundational model supporting 28 languages including: English, Chinese, Spanish, Hindi, Portuguese, French, German, Japanese, Arabic, Korean, Indonesian, Italian, Dutch, Turkish, Polish, Swedish, Filipino, Malay, Romanian, Ukrainian, Greek, Czech, Danish, Finnish, Bulgarian, Croatian, Slovak, and Tamil. Sign up on [ElevenLabs](https://elevenlabs.io) to get fast access, long-form generation, voice cloning, API keys, and more!
-"""
-
-with gr.Blocks() as block:
- gr.Markdown('[  ](https://elevenlabs.io)')
- gr.Markdown(badges)
- gr.Markdown(description)
-
- input_text = gr.Textbox(
- label="Input Text (250 characters max)",
- lines=2,
- value="Hello! 你好! Hola! नमस्ते! Bonjour! こんにちは! مرحبا! 안녕하세요! Ciao! Cześć! Привіт! Γειά σας! Здравей! வணக்கம்!",
- elem_id="input_text"
- )
-
- all_voices = voices()
- input_voice = gr.Dropdown(
- [ voice.name for voice in all_voices ],
- value="Bella",
- label="Voice",
- elem_id="input_voice"
- )
-
- run_button = gr.Button(
- text="Generate Voice",
- type="button"
- )
-
- out_audio = gr.Audio(
- label="Generated Voice",
- type="numpy",
- elem_id="out_audio",
- format="mp3"
- )
-
- inputs = [input_text, input_voice]
- outputs = [out_audio]
-
- run_button.click(
- fn=generate_voice,
- inputs=inputs,
- outputs=outputs,
- queue=True
- )
-
-block.queue(concurrency_count=5).launch(debug=True)
\ No newline at end of file
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/tasks/hubert_pretraining.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/tasks/hubert_pretraining.py
deleted file mode 100644
index f756080dd17b380d004420c045a8744411c0e93d..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/tasks/hubert_pretraining.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-
-import logging
-import os
-import sys
-from typing import Dict, List, Optional, Tuple
-
-import numpy as np
-
-from dataclasses import dataclass, field
-from fairseq.data import Dictionary, HubertDataset
-from fairseq.dataclass.configs import FairseqDataclass
-from fairseq.tasks import register_task
-from fairseq.tasks.fairseq_task import FairseqTask
-from omegaconf import MISSING
-
-logger = logging.getLogger(__name__)
-
-
-class LabelEncoder(object):
- def __init__(self, dictionary: Dictionary) -> None:
- self.dictionary = dictionary
-
- def __call__(self, label: str) -> List[str]:
- return self.dictionary.encode_line(
- label, append_eos=False, add_if_not_exist=False,
- )
-
-
-@dataclass
-class HubertPretrainingConfig(FairseqDataclass):
- data: str = field(
- default=MISSING, metadata={"help": "path to data directory"}
- )
- fine_tuning: bool = field(
- default=False, metadata={"help": "set to true if fine-tuning Hubert"}
- )
- labels: List[str] = field(
- default_factory=lambda: ["ltr"],
- metadata={
- "help": (
- "extension of the label files to load, frame-level labels for"
- " pre-training, and sequence-level label for fine-tuning"
- )
- },
- )
- label_dir: Optional[str] = field(
- default=None,
- metadata={
- "help": "if set, looks for labels in this directory instead",
- },
- )
- label_rate: int = field(
- default=-1,
- metadata={"help": "label frame rate. -1 for sequence label"},
- )
- sample_rate: int = field(
- default=16_000,
- metadata={
- "help": "target sample rate. audio files will be up/down "
- "sampled to this rate"
- },
- )
- normalize: bool = field(
- default=False,
- metadata={
- "help": "if set, normalizes input to have 0 mean and unit variance"
- },
- )
- enable_padding: bool = field(
- default=False,
- metadata={"help": "pad shorter samples instead of cropping"},
- )
- max_keep_size: Optional[int] = field(
- default=None,
- metadata={"help": "exclude sample longer than this"},
- )
- max_sample_size: Optional[int] = field(
- default=None,
- metadata={"help": "max sample size to crop to for batching"},
- )
- min_sample_size: Optional[int] = field(
- default=None,
- metadata={"help": "min sample size to crop to for batching"},
- )
- single_target: Optional[bool] = field(
- default=False,
- metadata={
- "help": "if set, AddTargetDatasets outputs same keys "
- "as AddTargetDataset"
- },
- )
- random_crop: Optional[bool] = field(
- default=True,
- metadata={"help": "always crop from the beginning if false"},
- )
- pad_audio: Optional[bool] = field(
- default=False,
- metadata={"help": "pad audio to the longest one in the batch if true"},
- )
-
-
-@register_task("hubert_pretraining", dataclass=HubertPretrainingConfig)
-class HubertPretrainingTask(FairseqTask):
-
- cfg: HubertPretrainingConfig
-
- def __init__(
- self,
- cfg: HubertPretrainingConfig,
- ) -> None:
- super().__init__(cfg)
-
- logger.info(f"current directory is {os.getcwd()}")
- logger.info(f"HubertPretrainingTask Config {cfg}")
-
- self.cfg = cfg
- self.fine_tuning = cfg.fine_tuning
-
- if cfg.fine_tuning:
- self.state.add_factory("target_dictionary", self.load_dictionaries)
- else:
- self.state.add_factory("dictionaries", self.load_dictionaries)
-
- self.blank_symbol = ""
-
- @property
- def source_dictionary(self) -> Optional[Dictionary]:
- return None
-
- @property
- def target_dictionary(self) -> Optional[Dictionary]:
- return self.state.target_dictionary
-
- @property
- def dictionaries(self) -> List[Dictionary]:
- return self.state.dictionaries
-
- @classmethod
- def setup_task(
- cls, cfg: HubertPretrainingConfig, **kwargs
- ) -> "HubertPretrainingTask":
- return cls(cfg)
-
- def load_dictionaries(self):
- label_dir = self.cfg.data if self.cfg.label_dir is None else self.cfg.label_dir
- dictionaries = [Dictionary.load(f"{label_dir}/dict.{label}.txt") for label in self.cfg.labels]
- return dictionaries[0] if self.cfg.fine_tuning else dictionaries
-
- def get_label_dir(self) -> str:
- if self.cfg.label_dir is None:
- return self.cfg.data
- return self.cfg.label_dir
-
- def load_dataset(self, split: str, **kwargs) -> None:
- manifest = f"{self.cfg.data}/{split}.tsv"
- dicts = [self.target_dictionary] if self.cfg.fine_tuning else self.dictionaries
- pad_list = [dict.pad() for dict in dicts]
- eos_list = [dict.eos() for dict in dicts]
- procs = [LabelEncoder(dict) for dict in dicts]
- paths = [
- f"{self.get_label_dir()}/{split}.{l}" for l in self.cfg.labels
- ]
-
- # hubert v1: pad_audio=True, random_crop=False;
- self.datasets[split] = HubertDataset(
- manifest,
- sample_rate=self.cfg.sample_rate,
- label_paths=paths,
- label_rates=self.cfg.label_rate,
- pad_list=pad_list,
- eos_list=eos_list,
- label_processors=procs,
- max_keep_sample_size=self.cfg.max_keep_size,
- min_keep_sample_size=self.cfg.min_sample_size,
- max_sample_size=self.cfg.max_sample_size,
- pad_audio=self.cfg.pad_audio,
- normalize=self.cfg.normalize,
- store_labels=False,
- random_crop=self.cfg.random_crop,
- single_target=self.cfg.single_target,
- )
-
- def max_positions(self) -> Tuple[int, int]:
- return (sys.maxsize, sys.maxsize)
-
- def filter_indices_by_size(
- self, indices: np.array, *args, **kwargs
- ) -> np.array:
- return indices
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/spm_train.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/spm_train.py
deleted file mode 100644
index 9db668fd4166a860198784990de68ea26157995d..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/spm_train.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) Facebook, Inc. and its affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from __future__ import absolute_import, division, print_function, unicode_literals
-
-import sys
-
-import sentencepiece as spm
-
-
-if __name__ == "__main__":
- spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:]))
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wmt19/README.md b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wmt19/README.md
deleted file mode 100644
index 5c90d0e6c4ae8d043ca622e70c5828dca6f9c2f2..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wmt19/README.md
+++ /dev/null
@@ -1,85 +0,0 @@
-# WMT 19
-
-This page provides pointers to the models of Facebook-FAIR's WMT'19 news translation task submission [(Ng et al., 2019)](https://arxiv.org/abs/1907.06616).
-
-## Pre-trained models
-
-Model | Description | Download
----|---|---
-`transformer.wmt19.en-de` | En->De Ensemble | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz)
-`transformer.wmt19.de-en` | De->En Ensemble | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz)
-`transformer.wmt19.en-ru` | En->Ru Ensemble | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz)
-`transformer.wmt19.ru-en` | Ru->En Ensemble | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz)
-`transformer_lm.wmt19.en` | En Language Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.gz)
-`transformer_lm.wmt19.de` | De Language Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.gz)
-`transformer_lm.wmt19.ru` | Ru Language Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.gz)
-
-## Pre-trained single models before finetuning
-
-Model | Description | Download
----|---|---
-`transformer.wmt19.en-de` | En->De Single, no finetuning | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.ffn8192.tar.gz)
-`transformer.wmt19.de-en` | De->En Single, no finetuning | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.ffn8192.tar.gz)
-`transformer.wmt19.en-ru` | En->Ru Single, no finetuning | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ffn8192.tar.gz)
-`transformer.wmt19.ru-en` | Ru->En Single, no finetuning | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ffn8192.tar.gz)
-
-## Example usage (torch.hub)
-
-#### Requirements
-
-We require a few additional Python dependencies for preprocessing:
-```bash
-pip install fastBPE sacremoses
-```
-
-#### Translation
-
-```python
-import torch
-
-# English to German translation
-en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-de', checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt',
- tokenizer='moses', bpe='fastbpe')
-en2de.translate("Machine learning is great!") # 'Maschinelles Lernen ist großartig!'
-
-# German to English translation
-de2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.de-en', checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt',
- tokenizer='moses', bpe='fastbpe')
-de2en.translate("Maschinelles Lernen ist großartig!") # 'Machine learning is great!'
-
-# English to Russian translation
-en2ru = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-ru', checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt',
- tokenizer='moses', bpe='fastbpe')
-en2ru.translate("Machine learning is great!") # 'Машинное обучение - это здорово!'
-
-# Russian to English translation
-ru2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.ru-en', checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt',
- tokenizer='moses', bpe='fastbpe')
-ru2en.translate("Машинное обучение - это здорово!") # 'Machine learning is great!'
-```
-
-#### Language Modeling
-
-```python
-# Sample from the English LM
-en_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt19.en', tokenizer='moses', bpe='fastbpe')
-en_lm.sample("Machine learning is") # 'Machine learning is the future of computing, says Microsoft boss Satya Nadella ...'
-
-# Sample from the German LM
-de_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt19.de', tokenizer='moses', bpe='fastbpe')
-de_lm.sample("Maschinelles lernen ist") # 'Maschinelles lernen ist das A und O (neues-deutschland.de) Die Arbeitsbedingungen für Lehrerinnen und Lehrer sind seit Jahren verbesserungswürdig ...'
-
-# Sample from the Russian LM
-ru_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt19.ru', tokenizer='moses', bpe='fastbpe')
-ru_lm.sample("машинное обучение это") # 'машинное обучение это то, что мы называем "искусственным интеллектом".'
-```
-
-## Citation
-```bibtex
-@inproceedings{ng2019facebook},
- title = {Facebook FAIR's WMT19 News Translation Task Submission},
- author = {Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey},
- booktitle = {Proc. of WMT},
- year = 2019,
-}
-```
diff --git a/spaces/OsituKengere/Sauti-Midjourney/app.py b/spaces/OsituKengere/Sauti-Midjourney/app.py
deleted file mode 100644
index c04b6d45f84686618444749797188ca31fcb9882..0000000000000000000000000000000000000000
--- a/spaces/OsituKengere/Sauti-Midjourney/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/prompthero/openjourney-v4").launch()
\ No newline at end of file
diff --git a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/modeling/backbone/swin.py b/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/modeling/backbone/swin.py
deleted file mode 100644
index 3b099d84396ac31d22881e5b6c9e53d2d0abaef3..0000000000000000000000000000000000000000
--- a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/modeling/backbone/swin.py
+++ /dev/null
@@ -1,770 +0,0 @@
-# --------------------------------------------------------
-# Swin Transformer
-# Copyright (c) 2021 Microsoft
-# Licensed under The MIT License [see LICENSE for details]
-# Written by Ze Liu, Yutong Lin, Yixuan Wei
-# --------------------------------------------------------
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# Modified by Bowen Cheng from https://github.com/SwinTransformer/Swin-Transformer-Semantic-Segmentation/blob/main/mmseg/models/backbones/swin_transformer.py
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.utils.checkpoint as checkpoint
-from timm.models.layers import DropPath, to_2tuple, trunc_normal_
-
-from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
-
-
-class Mlp(nn.Module):
- """Multilayer perceptron."""
-
- def __init__(
- self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0
- ):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-def window_partition(x, window_size):
- """
- Args:
- x: (B, H, W, C)
- window_size (int): window size
- Returns:
- windows: (num_windows*B, window_size, window_size, C)
- """
- B, H, W, C = x.shape
- x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
- return windows
-
-
-def window_reverse(windows, window_size, H, W):
- """
- Args:
- windows: (num_windows*B, window_size, window_size, C)
- window_size (int): Window size
- H (int): Height of image
- W (int): Width of image
- Returns:
- x: (B, H, W, C)
- """
- B = int(windows.shape[0] / (H * W / window_size / window_size))
- x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
- return x
-
-
-class WindowAttention(nn.Module):
- """Window based multi-head self attention (W-MSA) module with relative position bias.
- It supports both of shifted and non-shifted window.
- Args:
- dim (int): Number of input channels.
- window_size (tuple[int]): The height and width of the window.
- num_heads (int): Number of attention heads.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
- attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
- proj_drop (float, optional): Dropout ratio of output. Default: 0.0
- """
-
- def __init__(
- self,
- dim,
- window_size,
- num_heads,
- qkv_bias=True,
- qk_scale=None,
- attn_drop=0.0,
- proj_drop=0.0,
- ):
-
- super().__init__()
- self.dim = dim
- self.window_size = window_size # Wh, Ww
- self.num_heads = num_heads
- head_dim = dim // num_heads
- self.scale = qk_scale or head_dim ** -0.5
-
- # define a parameter table of relative position bias
- self.relative_position_bias_table = nn.Parameter(
- torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
- ) # 2*Wh-1 * 2*Ww-1, nH
-
- # get pair-wise relative position index for each token inside the window
- coords_h = torch.arange(self.window_size[0])
- coords_w = torch.arange(self.window_size[1])
- coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
- coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
- relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
- relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
- relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
- relative_coords[:, :, 1] += self.window_size[1] - 1
- relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
- relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
- self.register_buffer("relative_position_index", relative_position_index)
-
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
-
- trunc_normal_(self.relative_position_bias_table, std=0.02)
- self.softmax = nn.Softmax(dim=-1)
-
- def forward(self, x, mask=None):
- """Forward function.
- Args:
- x: input features with shape of (num_windows*B, N, C)
- mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
- """
- B_, N, C = x.shape
- qkv = (
- self.qkv(x)
- .reshape(B_, N, 3, self.num_heads, C // self.num_heads)
- .permute(2, 0, 3, 1, 4)
- )
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
-
- q = q * self.scale
- attn = q @ k.transpose(-2, -1)
-
- relative_position_bias = self.relative_position_bias_table[
- self.relative_position_index.view(-1)
- ].view(
- self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
- ) # Wh*Ww,Wh*Ww,nH
- relative_position_bias = relative_position_bias.permute(
- 2, 0, 1
- ).contiguous() # nH, Wh*Ww, Wh*Ww
- attn = attn + relative_position_bias.unsqueeze(0)
-
- if mask is not None:
- nW = mask.shape[0]
- attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
- attn = attn.view(-1, self.num_heads, N, N)
- attn = self.softmax(attn)
- else:
- attn = self.softmax(attn)
-
- attn = self.attn_drop(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
-
-
-class SwinTransformerBlock(nn.Module):
- """Swin Transformer Block.
- Args:
- dim (int): Number of input channels.
- num_heads (int): Number of attention heads.
- window_size (int): Window size.
- shift_size (int): Shift size for SW-MSA.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float, optional): Stochastic depth rate. Default: 0.0
- act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- """
-
- def __init__(
- self,
- dim,
- num_heads,
- window_size=7,
- shift_size=0,
- mlp_ratio=4.0,
- qkv_bias=True,
- qk_scale=None,
- drop=0.0,
- attn_drop=0.0,
- drop_path=0.0,
- act_layer=nn.GELU,
- norm_layer=nn.LayerNorm,
- ):
- super().__init__()
- self.dim = dim
- self.num_heads = num_heads
- self.window_size = window_size
- self.shift_size = shift_size
- self.mlp_ratio = mlp_ratio
- assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
-
- self.norm1 = norm_layer(dim)
- self.attn = WindowAttention(
- dim,
- window_size=to_2tuple(self.window_size),
- num_heads=num_heads,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- attn_drop=attn_drop,
- proj_drop=drop,
- )
-
- self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(
- in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop
- )
-
- self.H = None
- self.W = None
-
- def forward(self, x, mask_matrix):
- """Forward function.
- Args:
- x: Input feature, tensor size (B, H*W, C).
- H, W: Spatial resolution of the input feature.
- mask_matrix: Attention mask for cyclic shift.
- """
- B, L, C = x.shape
- H, W = self.H, self.W
- assert L == H * W, "input feature has wrong size"
-
- shortcut = x
- x = self.norm1(x)
- x = x.view(B, H, W, C)
-
- # pad feature maps to multiples of window size
- pad_l = pad_t = 0
- pad_r = (self.window_size - W % self.window_size) % self.window_size
- pad_b = (self.window_size - H % self.window_size) % self.window_size
- x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
- _, Hp, Wp, _ = x.shape
-
- # cyclic shift
- if self.shift_size > 0:
- shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
- attn_mask = mask_matrix
- else:
- shifted_x = x
- attn_mask = None
-
- # partition windows
- x_windows = window_partition(
- shifted_x, self.window_size
- ) # nW*B, window_size, window_size, C
- x_windows = x_windows.view(
- -1, self.window_size * self.window_size, C
- ) # nW*B, window_size*window_size, C
-
- # W-MSA/SW-MSA
- attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
-
- # merge windows
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
- shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
-
- # reverse cyclic shift
- if self.shift_size > 0:
- x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
- else:
- x = shifted_x
-
- if pad_r > 0 or pad_b > 0:
- x = x[:, :H, :W, :].contiguous()
-
- x = x.view(B, H * W, C)
-
- # FFN
- x = shortcut + self.drop_path(x)
- x = x + self.drop_path(self.mlp(self.norm2(x)))
-
- return x
-
-
-class PatchMerging(nn.Module):
- """Patch Merging Layer
- Args:
- dim (int): Number of input channels.
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- """
-
- def __init__(self, dim, norm_layer=nn.LayerNorm):
- super().__init__()
- self.dim = dim
- self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
- self.norm = norm_layer(4 * dim)
-
- def forward(self, x, H, W):
- """Forward function.
- Args:
- x: Input feature, tensor size (B, H*W, C).
- H, W: Spatial resolution of the input feature.
- """
- B, L, C = x.shape
- assert L == H * W, "input feature has wrong size"
-
- x = x.view(B, H, W, C)
-
- # padding
- pad_input = (H % 2 == 1) or (W % 2 == 1)
- if pad_input:
- x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
-
- x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
- x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
- x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
- x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
- x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
- x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
-
- x = self.norm(x)
- x = self.reduction(x)
-
- return x
-
-
-class BasicLayer(nn.Module):
- """A basic Swin Transformer layer for one stage.
- Args:
- dim (int): Number of feature channels
- depth (int): Depths of this stage.
- num_heads (int): Number of attention head.
- window_size (int): Local window size. Default: 7.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
- """
-
- def __init__(
- self,
- dim,
- depth,
- num_heads,
- window_size=7,
- mlp_ratio=4.0,
- qkv_bias=True,
- qk_scale=None,
- drop=0.0,
- attn_drop=0.0,
- drop_path=0.0,
- norm_layer=nn.LayerNorm,
- downsample=None,
- use_checkpoint=False,
- ):
- super().__init__()
- self.window_size = window_size
- self.shift_size = window_size // 2
- self.depth = depth
- self.use_checkpoint = use_checkpoint
-
- # build blocks
- self.blocks = nn.ModuleList(
- [
- SwinTransformerBlock(
- dim=dim,
- num_heads=num_heads,
- window_size=window_size,
- shift_size=0 if (i % 2 == 0) else window_size // 2,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop,
- attn_drop=attn_drop,
- drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
- norm_layer=norm_layer,
- )
- for i in range(depth)
- ]
- )
-
- # patch merging layer
- if downsample is not None:
- self.downsample = downsample(dim=dim, norm_layer=norm_layer)
- else:
- self.downsample = None
-
- def forward(self, x, H, W):
- """Forward function.
- Args:
- x: Input feature, tensor size (B, H*W, C).
- H, W: Spatial resolution of the input feature.
- """
-
- # calculate attention mask for SW-MSA
- Hp = int(np.ceil(H / self.window_size)) * self.window_size
- Wp = int(np.ceil(W / self.window_size)) * self.window_size
- img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
- h_slices = (
- slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None),
- )
- w_slices = (
- slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None),
- )
- cnt = 0
- for h in h_slices:
- for w in w_slices:
- img_mask[:, h, w, :] = cnt
- cnt += 1
-
- mask_windows = window_partition(
- img_mask, self.window_size
- ) # nW, window_size, window_size, 1
- mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
- attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
- attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
- attn_mask == 0, float(0.0)
- )
-
- for blk in self.blocks:
- blk.H, blk.W = H, W
- if self.use_checkpoint:
- x = checkpoint.checkpoint(blk, x, attn_mask)
- else:
- x = blk(x, attn_mask)
- if self.downsample is not None:
- x_down = self.downsample(x, H, W)
- Wh, Ww = (H + 1) // 2, (W + 1) // 2
- return x, H, W, x_down, Wh, Ww
- else:
- return x, H, W, x, H, W
-
-
-class PatchEmbed(nn.Module):
- """Image to Patch Embedding
- Args:
- patch_size (int): Patch token size. Default: 4.
- in_chans (int): Number of input image channels. Default: 3.
- embed_dim (int): Number of linear projection output channels. Default: 96.
- norm_layer (nn.Module, optional): Normalization layer. Default: None
- """
-
- def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
- super().__init__()
- patch_size = to_2tuple(patch_size)
- self.patch_size = patch_size
-
- self.in_chans = in_chans
- self.embed_dim = embed_dim
-
- self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
- if norm_layer is not None:
- self.norm = norm_layer(embed_dim)
- else:
- self.norm = None
-
- def forward(self, x):
- """Forward function."""
- # padding
- _, _, H, W = x.size()
- if W % self.patch_size[1] != 0:
- x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
- if H % self.patch_size[0] != 0:
- x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
-
- x = self.proj(x) # B C Wh Ww
- if self.norm is not None:
- Wh, Ww = x.size(2), x.size(3)
- x = x.flatten(2).transpose(1, 2)
- x = self.norm(x)
- x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
-
- return x
-
-
-class SwinTransformer(nn.Module):
- """Swin Transformer backbone.
- A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
- https://arxiv.org/pdf/2103.14030
- Args:
- pretrain_img_size (int): Input image size for training the pretrained model,
- used in absolute postion embedding. Default 224.
- patch_size (int | tuple(int)): Patch size. Default: 4.
- in_chans (int): Number of input image channels. Default: 3.
- embed_dim (int): Number of linear projection output channels. Default: 96.
- depths (tuple[int]): Depths of each Swin Transformer stage.
- num_heads (tuple[int]): Number of attention head of each stage.
- window_size (int): Window size. Default: 7.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
- qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
- drop_rate (float): Dropout rate.
- attn_drop_rate (float): Attention dropout rate. Default: 0.
- drop_path_rate (float): Stochastic depth rate. Default: 0.2.
- norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
- ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
- patch_norm (bool): If True, add normalization after patch embedding. Default: True.
- out_indices (Sequence[int]): Output from which stages.
- frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
- -1 means not freezing any parameters.
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
- """
-
- def __init__(
- self,
- pretrain_img_size=224,
- patch_size=4,
- in_chans=3,
- embed_dim=96,
- depths=[2, 2, 6, 2],
- num_heads=[3, 6, 12, 24],
- window_size=7,
- mlp_ratio=4.0,
- qkv_bias=True,
- qk_scale=None,
- drop_rate=0.0,
- attn_drop_rate=0.0,
- drop_path_rate=0.2,
- norm_layer=nn.LayerNorm,
- ape=False,
- patch_norm=True,
- out_indices=(0, 1, 2, 3),
- frozen_stages=-1,
- use_checkpoint=False,
- ):
- super().__init__()
-
- self.pretrain_img_size = pretrain_img_size
- self.num_layers = len(depths)
- self.embed_dim = embed_dim
- self.ape = ape
- self.patch_norm = patch_norm
- self.out_indices = out_indices
- self.frozen_stages = frozen_stages
-
- # split image into non-overlapping patches
- self.patch_embed = PatchEmbed(
- patch_size=patch_size,
- in_chans=in_chans,
- embed_dim=embed_dim,
- norm_layer=norm_layer if self.patch_norm else None,
- )
-
- # absolute position embedding
- if self.ape:
- pretrain_img_size = to_2tuple(pretrain_img_size)
- patch_size = to_2tuple(patch_size)
- patches_resolution = [
- pretrain_img_size[0] // patch_size[0],
- pretrain_img_size[1] // patch_size[1],
- ]
-
- self.absolute_pos_embed = nn.Parameter(
- torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])
- )
- trunc_normal_(self.absolute_pos_embed, std=0.02)
-
- self.pos_drop = nn.Dropout(p=drop_rate)
-
- # stochastic depth
- dpr = [
- x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
- ] # stochastic depth decay rule
-
- # build layers
- self.layers = nn.ModuleList()
- for i_layer in range(self.num_layers):
- layer = BasicLayer(
- dim=int(embed_dim * 2 ** i_layer),
- depth=depths[i_layer],
- num_heads=num_heads[i_layer],
- window_size=window_size,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop_rate,
- attn_drop=attn_drop_rate,
- drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
- norm_layer=norm_layer,
- downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
- use_checkpoint=use_checkpoint,
- )
- self.layers.append(layer)
-
- num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
- self.num_features = num_features
-
- # add a norm layer for each output
- for i_layer in out_indices:
- layer = norm_layer(num_features[i_layer])
- layer_name = f"norm{i_layer}"
- self.add_module(layer_name, layer)
-
- self._freeze_stages()
-
- def _freeze_stages(self):
- if self.frozen_stages >= 0:
- self.patch_embed.eval()
- for param in self.patch_embed.parameters():
- param.requires_grad = False
-
- if self.frozen_stages >= 1 and self.ape:
- self.absolute_pos_embed.requires_grad = False
-
- if self.frozen_stages >= 2:
- self.pos_drop.eval()
- for i in range(0, self.frozen_stages - 1):
- m = self.layers[i]
- m.eval()
- for param in m.parameters():
- param.requires_grad = False
-
- def init_weights(self, pretrained=None):
- """Initialize the weights in backbone.
- Args:
- pretrained (str, optional): Path to pre-trained weights.
- Defaults to None.
- """
-
- def _init_weights(m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=0.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
-
- def forward(self, x):
- """Forward function."""
- x = self.patch_embed(x)
-
- Wh, Ww = x.size(2), x.size(3)
- if self.ape:
- # interpolate the position embedding to the corresponding size
- absolute_pos_embed = F.interpolate(
- self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic"
- )
- x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
- else:
- x = x.flatten(2).transpose(1, 2)
- x = self.pos_drop(x)
-
- outs = {}
- for i in range(self.num_layers):
- layer = self.layers[i]
- x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
-
- if i in self.out_indices:
- norm_layer = getattr(self, f"norm{i}")
- x_out = norm_layer(x_out)
-
- out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
- outs["res{}".format(i + 2)] = out
-
- return outs
-
- def train(self, mode=True):
- """Convert the model into training mode while keep layers freezed."""
- super(SwinTransformer, self).train(mode)
- self._freeze_stages()
-
-
-@BACKBONE_REGISTRY.register()
-class D2SwinTransformer(SwinTransformer, Backbone):
- def __init__(self, cfg, input_shape):
-
- pretrain_img_size = cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE
- patch_size = cfg.MODEL.SWIN.PATCH_SIZE
- in_chans = 3
- embed_dim = cfg.MODEL.SWIN.EMBED_DIM
- depths = cfg.MODEL.SWIN.DEPTHS
- num_heads = cfg.MODEL.SWIN.NUM_HEADS
- window_size = cfg.MODEL.SWIN.WINDOW_SIZE
- mlp_ratio = cfg.MODEL.SWIN.MLP_RATIO
- qkv_bias = cfg.MODEL.SWIN.QKV_BIAS
- qk_scale = cfg.MODEL.SWIN.QK_SCALE
- drop_rate = cfg.MODEL.SWIN.DROP_RATE
- attn_drop_rate = cfg.MODEL.SWIN.ATTN_DROP_RATE
- drop_path_rate = cfg.MODEL.SWIN.DROP_PATH_RATE
- norm_layer = nn.LayerNorm
- ape = cfg.MODEL.SWIN.APE
- patch_norm = cfg.MODEL.SWIN.PATCH_NORM
- use_checkpoint = cfg.MODEL.SWIN.USE_CHECKPOINT
-
- super().__init__(
- pretrain_img_size,
- patch_size,
- in_chans,
- embed_dim,
- depths,
- num_heads,
- window_size,
- mlp_ratio,
- qkv_bias,
- qk_scale,
- drop_rate,
- attn_drop_rate,
- drop_path_rate,
- norm_layer,
- ape,
- patch_norm,
- use_checkpoint=use_checkpoint,
- )
-
- self._out_features = cfg.MODEL.SWIN.OUT_FEATURES
-
- self._out_feature_strides = {
- "res2": 4,
- "res3": 8,
- "res4": 16,
- "res5": 32,
- }
- self._out_feature_channels = {
- "res2": self.num_features[0],
- "res3": self.num_features[1],
- "res4": self.num_features[2],
- "res5": self.num_features[3],
- }
-
- def forward(self, x):
- """
- Args:
- x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
- Returns:
- dict[str->Tensor]: names and the corresponding features
- """
- assert (
- x.dim() == 4
- ), f"SwinTransformer takes an input of shape (N, C, H, W). Got {x.shape} instead!"
- outputs = {}
- y = super().forward(x)
- for k in y.keys():
- if k in self._out_features:
- outputs[k] = y[k]
- return outputs
-
- def output_shape(self):
- return {
- name: ShapeSpec(
- channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
- )
- for name in self._out_features
- }
-
- @property
- def size_divisibility(self):
- return 32
diff --git a/spaces/PSLD/PSLD/stable-diffusion/debug/inverse.sh b/spaces/PSLD/PSLD/stable-diffusion/debug/inverse.sh
deleted file mode 100644
index 33e90eb628d1610fdd008ec12a40f2b1f5264db5..0000000000000000000000000000000000000000
--- a/spaces/PSLD/PSLD/stable-diffusion/debug/inverse.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-export CUDA_VISIBLE_DEVICES='1'
-python scripts/inverse.py \
- --file_id='00019.png' \
- --task_config='configs/motion_deblur_config.yaml' \
- --inpainting=0 \
- --general_inverse=0 \
- --gamma=1e-1 \
- --omega=1e-1 \
- --W=256 \
- --H=256 \
- --scale=5.0 \
- --laion400m \
- --prompt="a virus monster is playing guitar, oil on canvas"
\ No newline at end of file
diff --git a/spaces/Pascall/OASSapi_00/OASSapi.py b/spaces/Pascall/OASSapi_00/OASSapi.py
deleted file mode 100644
index 8e42068e720e9645d3039a23ec5f84abaab67c41..0000000000000000000000000000000000000000
--- a/spaces/Pascall/OASSapi_00/OASSapi.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import os
-import gradio
-from text_generation import Client, InferenceAPIClient
-
-past = []
-
-def reset_memories(content = ""):
- try:
- with open("memories.txt", 'w') as file:
- file.write(content)
- print("Successfully reset memories.")
- except Exception as err:
- print(err)
-
-def add_memory(content):
- try:
- with open("memories.txt", 'a') as file:
- file.write(content)
- print("Successfully added to the file.")
- except Exception as err:
- print(err)
-
-def read_memories():
- try:
- with open("memories.txt", 'r') as file:
- content = file.read()
- return content
- except Exception as err:
- print(err)
-
-def get_client(model: str):
- return InferenceAPIClient(model, token=os.getenv("HF_TOKEN", None), timeout=100)
-
-def get_usernames(model: str):
- if model in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"):
- return "", "<|prompter|>", "<|assistant|>", "<|endoftext|>\n"
- return "", "User: ", "Assistant: ", "\n"
-
-def predict(inputs: str):
- model = "OpenAssistant/oasst-sft-1-pythia-12b"
-
- client = get_client(model)
- preprompt, user_name, assistant_name, end = get_usernames(model)
-
- personality = "My name is karen, i prefere to be called kage, i like to answer questions as short as possible (in unber 100 words)."
-
- past = []
-
- total_prompt = preprompt + user_name + inputs + end + assistant_name
- total_prompts = preprompt + "".join(past) + user_name + inputs + end + assistant_name
-
- print(total_prompts)
-
- if past == "" or past is None:
- past.append(user_name + "describe yourself" + end + assistant_name + personality + end + total_prompt)
- else:
- past.append(total_prompt)
-
- if model in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"):
- iterator = client.generate(
- total_prompts,
- typical_p = 0.1,
- truncate = 1000,
- watermark = 0,
- max_new_tokens = 502,
- )
-
- yield iterator.generated_text
-
- add_memory(iterator.generated_text + end)
-
-def run_gradio_interface():
- g = gradio.Interface(
- fn=predict,
- inputs=[
- gradio.components.Textbox(lines=3, label="Hi, how can I help you?", placeholder="Enter A prompt"),
- ],
- outputs=[
- gradio.inputs.Textbox(
- lines=10,
- label="",
- )
- ]
- )
- g.queue(concurrency_count=1)
- g.launch()
-
-if __name__ == "__main__":
- run_gradio_interface()
diff --git a/spaces/PeepDaSlan9/AutoGPT/autogpt/agent/__init__.py b/spaces/PeepDaSlan9/AutoGPT/autogpt/agent/__init__.py
deleted file mode 100644
index e928af2205b1c52d19dc89ec4246e8c1d2c20e3f..0000000000000000000000000000000000000000
--- a/spaces/PeepDaSlan9/AutoGPT/autogpt/agent/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from autogpt.agent.agent import Agent
-from autogpt.agent.agent_manager import AgentManager
-
-__all__ = ["Agent", "AgentManager"]
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/utils/fuse_conv_bn.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/utils/fuse_conv_bn.py
deleted file mode 100644
index cb7076f80bf37f7931185bf0293ffcc1ce19c8ef..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/utils/fuse_conv_bn.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-
-
-def _fuse_conv_bn(conv, bn):
- """Fuse conv and bn into one module.
-
- Args:
- conv (nn.Module): Conv to be fused.
- bn (nn.Module): BN to be fused.
-
- Returns:
- nn.Module: Fused module.
- """
- conv_w = conv.weight
- conv_b = conv.bias if conv.bias is not None else torch.zeros_like(
- bn.running_mean)
-
- factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)
- conv.weight = nn.Parameter(conv_w *
- factor.reshape([conv.out_channels, 1, 1, 1]))
- conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)
- return conv
-
-
-def fuse_conv_bn(module):
- """Recursively fuse conv and bn in a module.
-
- During inference, the functionary of batch norm layers is turned off
- but only the mean and var alone channels are used, which exposes the
- chance to fuse it with the preceding conv layers to save computations and
- simplify network structures.
-
- Args:
- module (nn.Module): Module to be fused.
-
- Returns:
- nn.Module: Fused module.
- """
- last_conv = None
- last_conv_name = None
-
- for name, child in module.named_children():
- if isinstance(child,
- (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):
- if last_conv is None: # only fuse BN that is after Conv
- continue
- fused_conv = _fuse_conv_bn(last_conv, child)
- module._modules[last_conv_name] = fused_conv
- # To reduce changes, set BN as Identity instead of deleting it.
- module._modules[name] = nn.Identity()
- last_conv = None
- elif isinstance(child, nn.Conv2d):
- last_conv = child
- last_conv_name = name
- else:
- fuse_conv_bn(child)
- return module
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/__init__.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/__init__.py
deleted file mode 100644
index 999e090a458ee148ceca0649f1e3806a40e909bd..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/__init__.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from .assign_score_withk import assign_score_withk
-from .ball_query import ball_query
-from .bbox import bbox_overlaps
-from .border_align import BorderAlign, border_align
-from .box_iou_rotated import box_iou_rotated
-from .carafe import CARAFE, CARAFENaive, CARAFEPack, carafe, carafe_naive
-from .cc_attention import CrissCrossAttention
-from .contour_expand import contour_expand
-from .corner_pool import CornerPool
-from .correlation import Correlation
-from .deform_conv import DeformConv2d, DeformConv2dPack, deform_conv2d
-from .deform_roi_pool import (DeformRoIPool, DeformRoIPoolPack,
- ModulatedDeformRoIPoolPack, deform_roi_pool)
-from .deprecated_wrappers import Conv2d_deprecated as Conv2d
-from .deprecated_wrappers import ConvTranspose2d_deprecated as ConvTranspose2d
-from .deprecated_wrappers import Linear_deprecated as Linear
-from .deprecated_wrappers import MaxPool2d_deprecated as MaxPool2d
-from .focal_loss import (SigmoidFocalLoss, SoftmaxFocalLoss,
- sigmoid_focal_loss, softmax_focal_loss)
-from .furthest_point_sample import (furthest_point_sample,
- furthest_point_sample_with_dist)
-from .fused_bias_leakyrelu import FusedBiasLeakyReLU, fused_bias_leakyrelu
-from .gather_points import gather_points
-from .group_points import GroupAll, QueryAndGroup, grouping_operation
-from .info import (get_compiler_version, get_compiling_cuda_version,
- get_onnxruntime_op_path)
-from .iou3d import boxes_iou_bev, nms_bev, nms_normal_bev
-from .knn import knn
-from .masked_conv import MaskedConv2d, masked_conv2d
-from .modulated_deform_conv import (ModulatedDeformConv2d,
- ModulatedDeformConv2dPack,
- modulated_deform_conv2d)
-from .multi_scale_deform_attn import MultiScaleDeformableAttention
-from .nms import batched_nms, nms, nms_match, nms_rotated, soft_nms
-from .pixel_group import pixel_group
-from .point_sample import (SimpleRoIAlign, point_sample,
- rel_roi_point_to_rel_img_point)
-from .points_in_boxes import (points_in_boxes_all, points_in_boxes_cpu,
- points_in_boxes_part)
-from .points_sampler import PointsSampler
-from .psa_mask import PSAMask
-from .roi_align import RoIAlign, roi_align
-from .roi_align_rotated import RoIAlignRotated, roi_align_rotated
-from .roi_pool import RoIPool, roi_pool
-from .roiaware_pool3d import RoIAwarePool3d
-from .roipoint_pool3d import RoIPointPool3d
-from .saconv import SAConv2d
-from .scatter_points import DynamicScatter, dynamic_scatter
-from .sync_bn import SyncBatchNorm
-from .three_interpolate import three_interpolate
-from .three_nn import three_nn
-from .tin_shift import TINShift, tin_shift
-from .upfirdn2d import upfirdn2d
-from .voxelize import Voxelization, voxelization
-
-__all__ = [
- 'bbox_overlaps', 'CARAFE', 'CARAFENaive', 'CARAFEPack', 'carafe',
- 'carafe_naive', 'CornerPool', 'DeformConv2d', 'DeformConv2dPack',
- 'deform_conv2d', 'DeformRoIPool', 'DeformRoIPoolPack',
- 'ModulatedDeformRoIPoolPack', 'deform_roi_pool', 'SigmoidFocalLoss',
- 'SoftmaxFocalLoss', 'sigmoid_focal_loss', 'softmax_focal_loss',
- 'get_compiler_version', 'get_compiling_cuda_version',
- 'get_onnxruntime_op_path', 'MaskedConv2d', 'masked_conv2d',
- 'ModulatedDeformConv2d', 'ModulatedDeformConv2dPack',
- 'modulated_deform_conv2d', 'batched_nms', 'nms', 'soft_nms', 'nms_match',
- 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 'SyncBatchNorm', 'Conv2d',
- 'ConvTranspose2d', 'Linear', 'MaxPool2d', 'CrissCrossAttention', 'PSAMask',
- 'point_sample', 'rel_roi_point_to_rel_img_point', 'SimpleRoIAlign',
- 'SAConv2d', 'TINShift', 'tin_shift', 'assign_score_withk',
- 'box_iou_rotated', 'RoIPointPool3d', 'nms_rotated', 'knn', 'ball_query',
- 'upfirdn2d', 'FusedBiasLeakyReLU', 'fused_bias_leakyrelu',
- 'RoIAlignRotated', 'roi_align_rotated', 'pixel_group', 'QueryAndGroup',
- 'GroupAll', 'grouping_operation', 'contour_expand', 'three_nn',
- 'three_interpolate', 'MultiScaleDeformableAttention', 'BorderAlign',
- 'border_align', 'gather_points', 'furthest_point_sample',
- 'furthest_point_sample_with_dist', 'PointsSampler', 'Correlation',
- 'boxes_iou_bev', 'nms_bev', 'nms_normal_bev', 'Voxelization',
- 'voxelization', 'dynamic_scatter', 'DynamicScatter', 'RoIAwarePool3d',
- 'points_in_boxes_part', 'points_in_boxes_cpu', 'points_in_boxes_all'
-]
diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/csrc/cpu/vision.h b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/csrc/cpu/vision.h
deleted file mode 100644
index 0a06a56233e19b6ab65f738996bf399c3023e076..0000000000000000000000000000000000000000
--- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/csrc/cpu/vision.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-#pragma once
-#include
-
-
-at::Tensor ROIAlign_forward_cpu(const at::Tensor& input,
- const at::Tensor& rois,
- const float spatial_scale,
- const int pooled_height,
- const int pooled_width,
- const int sampling_ratio);
-
-
-at::Tensor nms_cpu(const at::Tensor& dets,
- const at::Tensor& scores,
- const float threshold);
-
-
-std::pair soft_nms_cpu(const at::Tensor& dets,
- const at::Tensor& scores,
- const float threshold,
- const float sigma);
\ No newline at end of file
diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/models/blip_pretrain.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/models/blip_pretrain.py
deleted file mode 100644
index e42ce5f998b0a51e6f731ee6b5c8bae6d02a8664..0000000000000000000000000000000000000000
--- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/models/blip_pretrain.py
+++ /dev/null
@@ -1,339 +0,0 @@
-'''
- * Copyright (c) 2022, salesforce.com, inc.
- * All rights reserved.
- * SPDX-License-Identifier: BSD-3-Clause
- * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
- * By Junnan Li
-'''
-from models.med import BertConfig, BertModel, BertLMHeadModel
-from transformers import BertTokenizer
-import transformers
-transformers.logging.set_verbosity_error()
-
-import torch
-from torch import nn
-import torch.nn.functional as F
-
-from models.blip import create_vit, init_tokenizer, load_checkpoint
-
-class BLIP_Pretrain(nn.Module):
- def __init__(self,
- med_config = 'configs/bert_config.json',
- image_size = 224,
- vit = 'base',
- vit_grad_ckpt = False,
- vit_ckpt_layer = 0,
- embed_dim = 256,
- queue_size = 57600,
- momentum = 0.995,
- ):
- """
- Args:
- med_config (str): path for the mixture of encoder-decoder model's configuration file
- image_size (int): input image size
- vit (str): model size of vision transformer
- """
- super().__init__()
-
- self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer, 0)
-
- if vit=='base':
- checkpoint = torch.hub.load_state_dict_from_url(
- url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
- map_location="cpu", check_hash=True)
- state_dict = checkpoint["model"]
- msg = self.visual_encoder.load_state_dict(state_dict,strict=False)
- elif vit=='large':
- from timm.models.helpers import load_custom_pretrained
- from timm.models.vision_transformer import default_cfgs
- load_custom_pretrained(self.visual_encoder,default_cfgs['vit_large_patch16_224_in21k'])
-
- self.tokenizer = init_tokenizer()
- encoder_config = BertConfig.from_json_file(med_config)
- encoder_config.encoder_width = vision_width
- self.text_encoder = BertModel.from_pretrained('bert-base-uncased',config=encoder_config, add_pooling_layer=False)
- self.text_encoder.resize_token_embeddings(len(self.tokenizer))
-
- text_width = self.text_encoder.config.hidden_size
-
- self.vision_proj = nn.Linear(vision_width, embed_dim)
- self.text_proj = nn.Linear(text_width, embed_dim)
-
- self.itm_head = nn.Linear(text_width, 2)
-
- # create momentum encoders
- self.visual_encoder_m, vision_width = create_vit(vit,image_size)
- self.vision_proj_m = nn.Linear(vision_width, embed_dim)
- self.text_encoder_m = BertModel(config=encoder_config, add_pooling_layer=False)
- self.text_proj_m = nn.Linear(text_width, embed_dim)
-
- self.model_pairs = [[self.visual_encoder,self.visual_encoder_m],
- [self.vision_proj,self.vision_proj_m],
- [self.text_encoder,self.text_encoder_m],
- [self.text_proj,self.text_proj_m],
- ]
- self.copy_params()
-
- # create the queue
- self.register_buffer("image_queue", torch.randn(embed_dim, queue_size))
- self.register_buffer("text_queue", torch.randn(embed_dim, queue_size))
- self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
-
- self.image_queue = nn.functional.normalize(self.image_queue, dim=0)
- self.text_queue = nn.functional.normalize(self.text_queue, dim=0)
-
- self.queue_size = queue_size
- self.momentum = momentum
- self.temp = nn.Parameter(0.07*torch.ones([]))
-
- # create the decoder
- decoder_config = BertConfig.from_json_file(med_config)
- decoder_config.encoder_width = vision_width
- self.text_decoder = BertLMHeadModel.from_pretrained('bert-base-uncased',config=decoder_config)
- self.text_decoder.resize_token_embeddings(len(self.tokenizer))
- tie_encoder_decoder_weights(self.text_encoder,self.text_decoder.bert,'','/attention')
-
-
- def forward(self, image, caption, alpha):
- with torch.no_grad():
- self.temp.clamp_(0.001,0.5)
-
- image_embeds = self.visual_encoder(image)
- image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
- image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1)
-
- text = self.tokenizer(caption, padding='max_length', truncation=True, max_length=30,
- return_tensors="pt").to(image.device)
- text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
- return_dict = True, mode = 'text')
- text_feat = F.normalize(self.text_proj(text_output.last_hidden_state[:,0,:]),dim=-1)
-
- # get momentum features
- with torch.no_grad():
- self._momentum_update()
- image_embeds_m = self.visual_encoder_m(image)
- image_feat_m = F.normalize(self.vision_proj_m(image_embeds_m[:,0,:]),dim=-1)
- image_feat_all = torch.cat([image_feat_m.t(),self.image_queue.clone().detach()],dim=1)
-
- text_output_m = self.text_encoder_m(text.input_ids, attention_mask = text.attention_mask,
- return_dict = True, mode = 'text')
- text_feat_m = F.normalize(self.text_proj_m(text_output_m.last_hidden_state[:,0,:]),dim=-1)
- text_feat_all = torch.cat([text_feat_m.t(),self.text_queue.clone().detach()],dim=1)
-
- sim_i2t_m = image_feat_m @ text_feat_all / self.temp
- sim_t2i_m = text_feat_m @ image_feat_all / self.temp
-
- sim_targets = torch.zeros(sim_i2t_m.size()).to(image.device)
- sim_targets.fill_diagonal_(1)
-
- sim_i2t_targets = alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets
- sim_t2i_targets = alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets
-
- sim_i2t = image_feat @ text_feat_all / self.temp
- sim_t2i = text_feat @ image_feat_all / self.temp
-
- loss_i2t = -torch.sum(F.log_softmax(sim_i2t, dim=1)*sim_i2t_targets,dim=1).mean()
- loss_t2i = -torch.sum(F.log_softmax(sim_t2i, dim=1)*sim_t2i_targets,dim=1).mean()
-
- loss_ita = (loss_i2t+loss_t2i)/2
-
- self._dequeue_and_enqueue(image_feat_m, text_feat_m)
-
- ###============== Image-text Matching ===================###
- encoder_input_ids = text.input_ids.clone()
- encoder_input_ids[:,0] = self.tokenizer.enc_token_id
-
- # forward the positve image-text pair
- bs = image.size(0)
- output_pos = self.text_encoder(encoder_input_ids,
- attention_mask = text.attention_mask,
- encoder_hidden_states = image_embeds,
- encoder_attention_mask = image_atts,
- return_dict = True,
- )
- with torch.no_grad():
- weights_t2i = F.softmax(sim_t2i[:,:bs],dim=1)+1e-4
- weights_t2i.fill_diagonal_(0)
- weights_i2t = F.softmax(sim_i2t[:,:bs],dim=1)+1e-4
- weights_i2t.fill_diagonal_(0)
-
- # select a negative image for each text
- image_embeds_neg = []
- for b in range(bs):
- neg_idx = torch.multinomial(weights_t2i[b], 1).item()
- image_embeds_neg.append(image_embeds[neg_idx])
- image_embeds_neg = torch.stack(image_embeds_neg,dim=0)
-
- # select a negative text for each image
- text_ids_neg = []
- text_atts_neg = []
- for b in range(bs):
- neg_idx = torch.multinomial(weights_i2t[b], 1).item()
- text_ids_neg.append(encoder_input_ids[neg_idx])
- text_atts_neg.append(text.attention_mask[neg_idx])
-
- text_ids_neg = torch.stack(text_ids_neg,dim=0)
- text_atts_neg = torch.stack(text_atts_neg,dim=0)
-
- text_ids_all = torch.cat([encoder_input_ids, text_ids_neg],dim=0)
- text_atts_all = torch.cat([text.attention_mask, text_atts_neg],dim=0)
-
- image_embeds_all = torch.cat([image_embeds_neg,image_embeds],dim=0)
- image_atts_all = torch.cat([image_atts,image_atts],dim=0)
-
- output_neg = self.text_encoder(text_ids_all,
- attention_mask = text_atts_all,
- encoder_hidden_states = image_embeds_all,
- encoder_attention_mask = image_atts_all,
- return_dict = True,
- )
-
- vl_embeddings = torch.cat([output_pos.last_hidden_state[:,0,:], output_neg.last_hidden_state[:,0,:]],dim=0)
- vl_output = self.itm_head(vl_embeddings)
-
- itm_labels = torch.cat([torch.ones(bs,dtype=torch.long),torch.zeros(2*bs,dtype=torch.long)],
- dim=0).to(image.device)
- loss_itm = F.cross_entropy(vl_output, itm_labels)
-
- ##================= LM ========================##
- decoder_input_ids = text.input_ids.clone()
- decoder_input_ids[:,0] = self.tokenizer.bos_token_id
- decoder_targets = decoder_input_ids.masked_fill(decoder_input_ids == self.tokenizer.pad_token_id, -100)
-
- decoder_output = self.text_decoder(decoder_input_ids,
- attention_mask = text.attention_mask,
- encoder_hidden_states = image_embeds,
- encoder_attention_mask = image_atts,
- labels = decoder_targets,
- return_dict = True,
- )
-
- loss_lm = decoder_output.loss
- return loss_ita, loss_itm, loss_lm
-
-
-
- @torch.no_grad()
- def copy_params(self):
- for model_pair in self.model_pairs:
- for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
- param_m.data.copy_(param.data) # initialize
- param_m.requires_grad = False # not update by gradient
-
-
- @torch.no_grad()
- def _momentum_update(self):
- for model_pair in self.model_pairs:
- for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
- param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum)
-
-
- @torch.no_grad()
- def _dequeue_and_enqueue(self, image_feat, text_feat):
- # gather keys before updating queue
- image_feats = concat_all_gather(image_feat)
- text_feats = concat_all_gather(text_feat)
-
- batch_size = image_feats.shape[0]
-
- ptr = int(self.queue_ptr)
- assert self.queue_size % batch_size == 0 # for simplicity
-
- # replace the keys at ptr (dequeue and enqueue)
- self.image_queue[:, ptr:ptr + batch_size] = image_feats.T
- self.text_queue[:, ptr:ptr + batch_size] = text_feats.T
- ptr = (ptr + batch_size) % self.queue_size # move pointer
-
- self.queue_ptr[0] = ptr
-
-
-def blip_pretrain(**kwargs):
- model = BLIP_Pretrain(**kwargs)
- return model
-
-
-@torch.no_grad()
-def concat_all_gather(tensor):
- """
- Performs all_gather operation on the provided tensors.
- *** Warning ***: torch.distributed.all_gather has no gradient.
- """
- tensors_gather = [torch.ones_like(tensor)
- for _ in range(torch.distributed.get_world_size())]
- torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
-
- output = torch.cat(tensors_gather, dim=0)
- return output
-
-
-from typing import List
-def tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str, skip_key:str):
- uninitialized_encoder_weights: List[str] = []
- if decoder.__class__ != encoder.__class__:
- logger.info(
- f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
- )
-
- def tie_encoder_to_decoder_recursively(
- decoder_pointer: nn.Module,
- encoder_pointer: nn.Module,
- module_name: str,
- uninitialized_encoder_weights: List[str],
- skip_key: str,
- depth=0,
- ):
- assert isinstance(decoder_pointer, nn.Module) and isinstance(
- encoder_pointer, nn.Module
- ), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module"
- if hasattr(decoder_pointer, "weight") and skip_key not in module_name:
- assert hasattr(encoder_pointer, "weight")
- encoder_pointer.weight = decoder_pointer.weight
- if hasattr(decoder_pointer, "bias"):
- assert hasattr(encoder_pointer, "bias")
- encoder_pointer.bias = decoder_pointer.bias
- print(module_name+' is tied')
- return
-
- encoder_modules = encoder_pointer._modules
- decoder_modules = decoder_pointer._modules
- if len(decoder_modules) > 0:
- assert (
- len(encoder_modules) > 0
- ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"
-
- all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
- encoder_layer_pos = 0
- for name, module in decoder_modules.items():
- if name.isdigit():
- encoder_name = str(int(name) + encoder_layer_pos)
- decoder_name = name
- if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(
- encoder_modules
- ) != len(decoder_modules):
- # this can happen if the name corresponds to the position in a list module list of layers
- # in this case the decoder has added a cross-attention that the encoder does not have
- # thus skip this step and subtract one layer pos from encoder
- encoder_layer_pos -= 1
- continue
- elif name not in encoder_modules:
- continue
- elif depth > 500:
- raise ValueError(
- "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
- )
- else:
- decoder_name = encoder_name = name
- tie_encoder_to_decoder_recursively(
- decoder_modules[decoder_name],
- encoder_modules[encoder_name],
- module_name + "/" + name,
- uninitialized_encoder_weights,
- skip_key,
- depth=depth + 1,
- )
- all_encoder_weights.remove(module_name + "/" + encoder_name)
-
- uninitialized_encoder_weights += list(all_encoder_weights)
-
- # tie weights recursively
- tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights, skip_key)
diff --git a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/base.py b/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/base.py
deleted file mode 100644
index e21667df4ce4baa6bb6aad9f8679bd756e2ffdb7..0000000000000000000000000000000000000000
--- a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/base.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import bisect
-import numpy as np
-import albumentations
-from PIL import Image
-from torch.utils.data import Dataset, ConcatDataset
-
-
-class ConcatDatasetWithIndex(ConcatDataset):
- """Modified from original pytorch code to return dataset idx"""
- def __getitem__(self, idx):
- if idx < 0:
- if -idx > len(self):
- raise ValueError("absolute value of index should not exceed dataset length")
- idx = len(self) + idx
- dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
- if dataset_idx == 0:
- sample_idx = idx
- else:
- sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
- return self.datasets[dataset_idx][sample_idx], dataset_idx
-
-
-class ImagePaths(Dataset):
- def __init__(self, paths, size=None, random_crop=False, labels=None):
- self.size = size
- self.random_crop = random_crop
-
- self.labels = dict() if labels is None else labels
- self.labels["file_path_"] = paths
- self._length = len(paths)
-
- if self.size is not None and self.size > 0:
- self.rescaler = albumentations.SmallestMaxSize(max_size = self.size)
- if not self.random_crop:
- self.cropper = albumentations.CenterCrop(height=self.size,width=self.size)
- else:
- self.cropper = albumentations.RandomCrop(height=self.size,width=self.size)
- self.preprocessor = albumentations.Compose([self.rescaler, self.cropper])
- else:
- self.preprocessor = lambda **kwargs: kwargs
-
- def __len__(self):
- return self._length
-
- def preprocess_image(self, image_path):
- image = Image.open(image_path)
- if not image.mode == "RGB":
- image = image.convert("RGB")
- image = np.array(image).astype(np.uint8)
- image = self.preprocessor(image=image)["image"]
- image = (image/127.5 - 1.0).astype(np.float32)
- return image
-
- def __getitem__(self, i):
- example = dict()
- example["image"] = self.preprocess_image(self.labels["file_path_"][i])
- for k in self.labels:
- example[k] = self.labels[k][i]
- return example
-
-
-class NumpyPaths(ImagePaths):
- def preprocess_image(self, image_path):
- image = np.load(image_path).squeeze(0) # 3 x 1024 x 1024
- image = np.transpose(image, (1,2,0))
- image = Image.fromarray(image, mode="RGB")
- image = np.array(image).astype(np.uint8)
- image = self.preprocessor(image=image)["image"]
- image = (image/127.5 - 1.0).astype(np.float32)
- return image
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/urllib3/poolmanager.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/urllib3/poolmanager.py
deleted file mode 100644
index ca4ec341184adb3d30f3cd825b49a81b87d29b08..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/urllib3/poolmanager.py
+++ /dev/null
@@ -1,537 +0,0 @@
-from __future__ import absolute_import
-
-import collections
-import functools
-import logging
-
-from ._collections import RecentlyUsedContainer
-from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme
-from .exceptions import (
- LocationValueError,
- MaxRetryError,
- ProxySchemeUnknown,
- ProxySchemeUnsupported,
- URLSchemeUnknown,
-)
-from .packages import six
-from .packages.six.moves.urllib.parse import urljoin
-from .request import RequestMethods
-from .util.proxy import connection_requires_http_tunnel
-from .util.retry import Retry
-from .util.url import parse_url
-
-__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"]
-
-
-log = logging.getLogger(__name__)
-
-SSL_KEYWORDS = (
- "key_file",
- "cert_file",
- "cert_reqs",
- "ca_certs",
- "ssl_version",
- "ca_cert_dir",
- "ssl_context",
- "key_password",
- "server_hostname",
-)
-
-# All known keyword arguments that could be provided to the pool manager, its
-# pools, or the underlying connections. This is used to construct a pool key.
-_key_fields = (
- "key_scheme", # str
- "key_host", # str
- "key_port", # int
- "key_timeout", # int or float or Timeout
- "key_retries", # int or Retry
- "key_strict", # bool
- "key_block", # bool
- "key_source_address", # str
- "key_key_file", # str
- "key_key_password", # str
- "key_cert_file", # str
- "key_cert_reqs", # str
- "key_ca_certs", # str
- "key_ssl_version", # str
- "key_ca_cert_dir", # str
- "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
- "key_maxsize", # int
- "key_headers", # dict
- "key__proxy", # parsed proxy url
- "key__proxy_headers", # dict
- "key__proxy_config", # class
- "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples
- "key__socks_options", # dict
- "key_assert_hostname", # bool or string
- "key_assert_fingerprint", # str
- "key_server_hostname", # str
-)
-
-#: The namedtuple class used to construct keys for the connection pool.
-#: All custom key schemes should include the fields in this key at a minimum.
-PoolKey = collections.namedtuple("PoolKey", _key_fields)
-
-_proxy_config_fields = ("ssl_context", "use_forwarding_for_https")
-ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields)
-
-
-def _default_key_normalizer(key_class, request_context):
- """
- Create a pool key out of a request context dictionary.
-
- According to RFC 3986, both the scheme and host are case-insensitive.
- Therefore, this function normalizes both before constructing the pool
- key for an HTTPS request. If you wish to change this behaviour, provide
- alternate callables to ``key_fn_by_scheme``.
-
- :param key_class:
- The class to use when constructing the key. This should be a namedtuple
- with the ``scheme`` and ``host`` keys at a minimum.
- :type key_class: namedtuple
- :param request_context:
- A dictionary-like object that contain the context for a request.
- :type request_context: dict
-
- :return: A namedtuple that can be used as a connection pool key.
- :rtype: PoolKey
- """
- # Since we mutate the dictionary, make a copy first
- context = request_context.copy()
- context["scheme"] = context["scheme"].lower()
- context["host"] = context["host"].lower()
-
- # These are both dictionaries and need to be transformed into frozensets
- for key in ("headers", "_proxy_headers", "_socks_options"):
- if key in context and context[key] is not None:
- context[key] = frozenset(context[key].items())
-
- # The socket_options key may be a list and needs to be transformed into a
- # tuple.
- socket_opts = context.get("socket_options")
- if socket_opts is not None:
- context["socket_options"] = tuple(socket_opts)
-
- # Map the kwargs to the names in the namedtuple - this is necessary since
- # namedtuples can't have fields starting with '_'.
- for key in list(context.keys()):
- context["key_" + key] = context.pop(key)
-
- # Default to ``None`` for keys missing from the context
- for field in key_class._fields:
- if field not in context:
- context[field] = None
-
- return key_class(**context)
-
-
-#: A dictionary that maps a scheme to a callable that creates a pool key.
-#: This can be used to alter the way pool keys are constructed, if desired.
-#: Each PoolManager makes a copy of this dictionary so they can be configured
-#: globally here, or individually on the instance.
-key_fn_by_scheme = {
- "http": functools.partial(_default_key_normalizer, PoolKey),
- "https": functools.partial(_default_key_normalizer, PoolKey),
-}
-
-pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
-
-
-class PoolManager(RequestMethods):
- """
- Allows for arbitrary requests while transparently keeping track of
- necessary connection pools for you.
-
- :param num_pools:
- Number of connection pools to cache before discarding the least
- recently used pool.
-
- :param headers:
- Headers to include with all requests, unless other headers are given
- explicitly.
-
- :param \\**connection_pool_kw:
- Additional parameters are used to create fresh
- :class:`urllib3.connectionpool.ConnectionPool` instances.
-
- Example::
-
- >>> manager = PoolManager(num_pools=2)
- >>> r = manager.request('GET', 'http://google.com/')
- >>> r = manager.request('GET', 'http://google.com/mail')
- >>> r = manager.request('GET', 'http://yahoo.com/')
- >>> len(manager.pools)
- 2
-
- """
-
- proxy = None
- proxy_config = None
-
- def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
- RequestMethods.__init__(self, headers)
- self.connection_pool_kw = connection_pool_kw
- self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close())
-
- # Locally set the pool classes and keys so other PoolManagers can
- # override them.
- self.pool_classes_by_scheme = pool_classes_by_scheme
- self.key_fn_by_scheme = key_fn_by_scheme.copy()
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.clear()
- # Return False to re-raise any potential exceptions
- return False
-
- def _new_pool(self, scheme, host, port, request_context=None):
- """
- Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
- any additional pool keyword arguments.
-
- If ``request_context`` is provided, it is provided as keyword arguments
- to the pool class used. This method is used to actually create the
- connection pools handed out by :meth:`connection_from_url` and
- companion methods. It is intended to be overridden for customization.
- """
- pool_cls = self.pool_classes_by_scheme[scheme]
- if request_context is None:
- request_context = self.connection_pool_kw.copy()
-
- # Although the context has everything necessary to create the pool,
- # this function has historically only used the scheme, host, and port
- # in the positional args. When an API change is acceptable these can
- # be removed.
- for key in ("scheme", "host", "port"):
- request_context.pop(key, None)
-
- if scheme == "http":
- for kw in SSL_KEYWORDS:
- request_context.pop(kw, None)
-
- return pool_cls(host, port, **request_context)
-
- def clear(self):
- """
- Empty our store of pools and direct them all to close.
-
- This will not affect in-flight connections, but they will not be
- re-used after completion.
- """
- self.pools.clear()
-
- def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
- """
- Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
-
- If ``port`` isn't given, it will be derived from the ``scheme`` using
- ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
- provided, it is merged with the instance's ``connection_pool_kw``
- variable and used to create the new connection pool, if one is
- needed.
- """
-
- if not host:
- raise LocationValueError("No host specified.")
-
- request_context = self._merge_pool_kwargs(pool_kwargs)
- request_context["scheme"] = scheme or "http"
- if not port:
- port = port_by_scheme.get(request_context["scheme"].lower(), 80)
- request_context["port"] = port
- request_context["host"] = host
-
- return self.connection_from_context(request_context)
-
- def connection_from_context(self, request_context):
- """
- Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
-
- ``request_context`` must at least contain the ``scheme`` key and its
- value must be a key in ``key_fn_by_scheme`` instance variable.
- """
- scheme = request_context["scheme"].lower()
- pool_key_constructor = self.key_fn_by_scheme.get(scheme)
- if not pool_key_constructor:
- raise URLSchemeUnknown(scheme)
- pool_key = pool_key_constructor(request_context)
-
- return self.connection_from_pool_key(pool_key, request_context=request_context)
-
- def connection_from_pool_key(self, pool_key, request_context=None):
- """
- Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
-
- ``pool_key`` should be a namedtuple that only contains immutable
- objects. At a minimum it must have the ``scheme``, ``host``, and
- ``port`` fields.
- """
- with self.pools.lock:
- # If the scheme, host, or port doesn't match existing open
- # connections, open a new ConnectionPool.
- pool = self.pools.get(pool_key)
- if pool:
- return pool
-
- # Make a fresh ConnectionPool of the desired type
- scheme = request_context["scheme"]
- host = request_context["host"]
- port = request_context["port"]
- pool = self._new_pool(scheme, host, port, request_context=request_context)
- self.pools[pool_key] = pool
-
- return pool
-
- def connection_from_url(self, url, pool_kwargs=None):
- """
- Similar to :func:`urllib3.connectionpool.connection_from_url`.
-
- If ``pool_kwargs`` is not provided and a new pool needs to be
- constructed, ``self.connection_pool_kw`` is used to initialize
- the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
- is provided, it is used instead. Note that if a new pool does not
- need to be created for the request, the provided ``pool_kwargs`` are
- not used.
- """
- u = parse_url(url)
- return self.connection_from_host(
- u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
- )
-
- def _merge_pool_kwargs(self, override):
- """
- Merge a dictionary of override values for self.connection_pool_kw.
-
- This does not modify self.connection_pool_kw and returns a new dict.
- Any keys in the override dictionary with a value of ``None`` are
- removed from the merged dictionary.
- """
- base_pool_kwargs = self.connection_pool_kw.copy()
- if override:
- for key, value in override.items():
- if value is None:
- try:
- del base_pool_kwargs[key]
- except KeyError:
- pass
- else:
- base_pool_kwargs[key] = value
- return base_pool_kwargs
-
- def _proxy_requires_url_absolute_form(self, parsed_url):
- """
- Indicates if the proxy requires the complete destination URL in the
- request. Normally this is only needed when not using an HTTP CONNECT
- tunnel.
- """
- if self.proxy is None:
- return False
-
- return not connection_requires_http_tunnel(
- self.proxy, self.proxy_config, parsed_url.scheme
- )
-
- def _validate_proxy_scheme_url_selection(self, url_scheme):
- """
- Validates that were not attempting to do TLS in TLS connections on
- Python2 or with unsupported SSL implementations.
- """
- if self.proxy is None or url_scheme != "https":
- return
-
- if self.proxy.scheme != "https":
- return
-
- if six.PY2 and not self.proxy_config.use_forwarding_for_https:
- raise ProxySchemeUnsupported(
- "Contacting HTTPS destinations through HTTPS proxies "
- "'via CONNECT tunnels' is not supported in Python 2"
- )
-
- def urlopen(self, method, url, redirect=True, **kw):
- """
- Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
- with custom cross-host redirect logic and only sends the request-uri
- portion of the ``url``.
-
- The given ``url`` parameter must be absolute, such that an appropriate
- :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
- """
- u = parse_url(url)
- self._validate_proxy_scheme_url_selection(u.scheme)
-
- conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
-
- kw["assert_same_host"] = False
- kw["redirect"] = False
-
- if "headers" not in kw:
- kw["headers"] = self.headers.copy()
-
- if self._proxy_requires_url_absolute_form(u):
- response = conn.urlopen(method, url, **kw)
- else:
- response = conn.urlopen(method, u.request_uri, **kw)
-
- redirect_location = redirect and response.get_redirect_location()
- if not redirect_location:
- return response
-
- # Support relative URLs for redirecting.
- redirect_location = urljoin(url, redirect_location)
-
- # RFC 7231, Section 6.4.4
- if response.status == 303:
- method = "GET"
-
- retries = kw.get("retries")
- if not isinstance(retries, Retry):
- retries = Retry.from_int(retries, redirect=redirect)
-
- # Strip headers marked as unsafe to forward to the redirected location.
- # Check remove_headers_on_redirect to avoid a potential network call within
- # conn.is_same_host() which may use socket.gethostbyname() in the future.
- if retries.remove_headers_on_redirect and not conn.is_same_host(
- redirect_location
- ):
- headers = list(six.iterkeys(kw["headers"]))
- for header in headers:
- if header.lower() in retries.remove_headers_on_redirect:
- kw["headers"].pop(header, None)
-
- try:
- retries = retries.increment(method, url, response=response, _pool=conn)
- except MaxRetryError:
- if retries.raise_on_redirect:
- response.drain_conn()
- raise
- return response
-
- kw["retries"] = retries
- kw["redirect"] = redirect
-
- log.info("Redirecting %s -> %s", url, redirect_location)
-
- response.drain_conn()
- return self.urlopen(method, redirect_location, **kw)
-
-
-class ProxyManager(PoolManager):
- """
- Behaves just like :class:`PoolManager`, but sends all requests through
- the defined proxy, using the CONNECT method for HTTPS URLs.
-
- :param proxy_url:
- The URL of the proxy to be used.
-
- :param proxy_headers:
- A dictionary containing headers that will be sent to the proxy. In case
- of HTTP they are being sent with each request, while in the
- HTTPS/CONNECT case they are sent only once. Could be used for proxy
- authentication.
-
- :param proxy_ssl_context:
- The proxy SSL context is used to establish the TLS connection to the
- proxy when using HTTPS proxies.
-
- :param use_forwarding_for_https:
- (Defaults to False) If set to True will forward requests to the HTTPS
- proxy to be made on behalf of the client instead of creating a TLS
- tunnel via the CONNECT method. **Enabling this flag means that request
- and response headers and content will be visible from the HTTPS proxy**
- whereas tunneling keeps request and response headers and content
- private. IP address, target hostname, SNI, and port are always visible
- to an HTTPS proxy even when this flag is disabled.
-
- Example:
- >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
- >>> r1 = proxy.request('GET', 'http://google.com/')
- >>> r2 = proxy.request('GET', 'http://httpbin.org/')
- >>> len(proxy.pools)
- 1
- >>> r3 = proxy.request('GET', 'https://httpbin.org/')
- >>> r4 = proxy.request('GET', 'https://twitter.com/')
- >>> len(proxy.pools)
- 3
-
- """
-
- def __init__(
- self,
- proxy_url,
- num_pools=10,
- headers=None,
- proxy_headers=None,
- proxy_ssl_context=None,
- use_forwarding_for_https=False,
- **connection_pool_kw
- ):
-
- if isinstance(proxy_url, HTTPConnectionPool):
- proxy_url = "%s://%s:%i" % (
- proxy_url.scheme,
- proxy_url.host,
- proxy_url.port,
- )
- proxy = parse_url(proxy_url)
-
- if proxy.scheme not in ("http", "https"):
- raise ProxySchemeUnknown(proxy.scheme)
-
- if not proxy.port:
- port = port_by_scheme.get(proxy.scheme, 80)
- proxy = proxy._replace(port=port)
-
- self.proxy = proxy
- self.proxy_headers = proxy_headers or {}
- self.proxy_ssl_context = proxy_ssl_context
- self.proxy_config = ProxyConfig(proxy_ssl_context, use_forwarding_for_https)
-
- connection_pool_kw["_proxy"] = self.proxy
- connection_pool_kw["_proxy_headers"] = self.proxy_headers
- connection_pool_kw["_proxy_config"] = self.proxy_config
-
- super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw)
-
- def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
- if scheme == "https":
- return super(ProxyManager, self).connection_from_host(
- host, port, scheme, pool_kwargs=pool_kwargs
- )
-
- return super(ProxyManager, self).connection_from_host(
- self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs
- )
-
- def _set_proxy_headers(self, url, headers=None):
- """
- Sets headers needed by proxies: specifically, the Accept and Host
- headers. Only sets headers not provided by the user.
- """
- headers_ = {"Accept": "*/*"}
-
- netloc = parse_url(url).netloc
- if netloc:
- headers_["Host"] = netloc
-
- if headers:
- headers_.update(headers)
- return headers_
-
- def urlopen(self, method, url, redirect=True, **kw):
- "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
- u = parse_url(url)
- if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme):
- # For connections using HTTP CONNECT, httplib sets the necessary
- # headers on the CONNECT to the proxy. If we're not using CONNECT,
- # we'll definitely need to set 'Host' at the very least.
- headers = kw.get("headers", self.headers)
- kw["headers"] = self._set_proxy_headers(url, headers)
-
- return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
-
-
-def proxy_from_url(url, **kw):
- return ProxyManager(proxy_url=url, **kw)
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/alias.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/alias.py
deleted file mode 100644
index 452a9244ea6766d8cf94425fb583583ef740baee..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/alias.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from distutils.errors import DistutilsOptionError
-
-from setuptools.command.setopt import edit_config, option_base, config_file
-
-
-def shquote(arg):
- """Quote an argument for later parsing by shlex.split()"""
- for c in '"', "'", "\\", "#":
- if c in arg:
- return repr(arg)
- if arg.split() != [arg]:
- return repr(arg)
- return arg
-
-
-class alias(option_base):
- """Define a shortcut that invokes one or more commands"""
-
- description = "define a shortcut to invoke one or more commands"
- command_consumes_arguments = True
-
- user_options = [
- ('remove', 'r', 'remove (unset) the alias'),
- ] + option_base.user_options
-
- boolean_options = option_base.boolean_options + ['remove']
-
- def initialize_options(self):
- option_base.initialize_options(self)
- self.args = None
- self.remove = None
-
- def finalize_options(self):
- option_base.finalize_options(self)
- if self.remove and len(self.args) != 1:
- raise DistutilsOptionError(
- "Must specify exactly one argument (the alias name) when "
- "using --remove"
- )
-
- def run(self):
- aliases = self.distribution.get_option_dict('aliases')
-
- if not self.args:
- print("Command Aliases")
- print("---------------")
- for alias in aliases:
- print("setup.py alias", format_alias(alias, aliases))
- return
-
- elif len(self.args) == 1:
- alias, = self.args
- if self.remove:
- command = None
- elif alias in aliases:
- print("setup.py alias", format_alias(alias, aliases))
- return
- else:
- print("No alias definition found for %r" % alias)
- return
- else:
- alias = self.args[0]
- command = ' '.join(map(shquote, self.args[1:]))
-
- edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
-
-
-def format_alias(name, aliases):
- source, command = aliases[name]
- if source == config_file('global'):
- source = '--global-config '
- elif source == config_file('user'):
- source = '--user-config '
- elif source == config_file('local'):
- source = ''
- else:
- source = '--filename=%r' % source
- return source + name + ' ' + command
diff --git a/spaces/Rbrq/DeticChatGPT/tools/get_coco_zeroshot_oriorder.py b/spaces/Rbrq/DeticChatGPT/tools/get_coco_zeroshot_oriorder.py
deleted file mode 100644
index ed6748be1f2ed92741ea78f5a187f9838185a80e..0000000000000000000000000000000000000000
--- a/spaces/Rbrq/DeticChatGPT/tools/get_coco_zeroshot_oriorder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import argparse
-import json
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--data_path', default='datasets/coco/annotations/instances_val2017_unseen_2.json')
- parser.add_argument('--cat_path', default='datasets/coco/annotations/instances_val2017.json')
- args = parser.parse_args()
- print('Loading', args.cat_path)
- cat = json.load(open(args.cat_path, 'r'))['categories']
-
- print('Loading', args.data_path)
- data = json.load(open(args.data_path, 'r'))
- data['categories'] = cat
- out_path = args.data_path[:-5] + '_oriorder.json'
- print('Saving to', out_path)
- json.dump(data, open(out_path, 'w'))
diff --git a/spaces/Realcat/image-matching-webui/hloc/pipelines/7Scenes/__init__.py b/spaces/Realcat/image-matching-webui/hloc/pipelines/7Scenes/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Redgon/bingo/src/lib/utils.ts b/spaces/Redgon/bingo/src/lib/utils.ts
deleted file mode 100644
index 0a09ddc4aa5518f681a00a64ad48566516f35417..0000000000000000000000000000000000000000
--- a/spaces/Redgon/bingo/src/lib/utils.ts
+++ /dev/null
@@ -1,158 +0,0 @@
-import { clsx, type ClassValue } from 'clsx'
-import { customAlphabet } from 'nanoid'
-import { twMerge } from 'tailwind-merge'
-
-export function cn(...inputs: ClassValue[]) {
- return twMerge(clsx(inputs))
-}
-
-export const nanoid = customAlphabet(
- '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
- 7
-) // 7-character random string
-
-export function createChunkDecoder() {
- const decoder = new TextDecoder()
- return function (chunk: Uint8Array | undefined): string {
- if (!chunk) return ''
- return decoder.decode(chunk, { stream: true })
- }
-}
-
-export function random (start: number, end: number) {
- return start + Math.ceil(Math.random() * (end - start))
-}
-
-export function randomIP() {
- return `11.${random(104, 107)}.${random(1, 255)}.${random(1, 255)}`
-}
-
-export const defaultUID = Math.random().toString(36).slice(2)
-
-export function parseHeadersFromCurl(content: string) {
- const re = /-H '([^:]+):\s*([^']+)/mg
- const headers: HeadersInit = {}
- content = content.replaceAll('-H "', '-H \'').replaceAll('" ^', '\'\\').replaceAll('^\\^"', '"') // 将 cmd curl 转成 bash curl
- content.replace(re, (_: string, key: string, value: string) => {
- headers[key] = value
- return ''
- })
-
- return headers
-}
-
-export const ChunkKeys = ['BING_HEADER', 'BING_HEADER1', 'BING_HEADER2']
-export function encodeHeadersToCookie(content: string) {
- const base64Content = btoa(content)
- const contentChunks = base64Content.match(/.{1,4000}/g) || []
- return ChunkKeys.map((key, index) => `${key}=${contentChunks[index] ?? ''}`)
-}
-
-export function extraCurlFromCookie(cookies: Partial<{ [key: string]: string }>) {
- let base64Content = ''
- ChunkKeys.forEach((key) => {
- base64Content += (cookies[key] || '')
- })
- try {
- return atob(base64Content)
- } catch(e) {
- return ''
- }
-}
-
-export function extraHeadersFromCookie(cookies: Partial<{ [key: string]: string }>) {
- return parseHeadersFromCurl(extraCurlFromCookie(cookies))
-}
-
-export function formatDate(input: string | number | Date): string {
- const date = new Date(input)
- return date.toLocaleDateString('en-US', {
- month: 'long',
- day: 'numeric',
- year: 'numeric'
- })
-}
-
-export function parseCookie(cookie: string, cookieName: string) {
- const targetCookie = new RegExp(`(?:[; ]|^)${cookieName}=([^;]*)`).test(cookie) ? RegExp.$1 : cookie
- return targetCookie ? decodeURIComponent(targetCookie).trim() : cookie.indexOf('=') === -1 ? cookie.trim() : ''
-}
-
-export function setCookie(key: string, value: string) {
- const maxAge = 86400 * 30
- document.cookie = `${key}=${value || ''}; Path=/; Max-Age=${maxAge}; SameSite=None; Secure`
-}
-
-export function getCookie(cookieName: string) {
- const re = new RegExp(`(?:[; ]|^)${cookieName}=([^;]*)`)
- return re.test(document.cookie) ? RegExp.$1 : ''
-}
-
-export function parseCookies(cookie: string, cookieNames: string[]) {
- const cookies: { [key: string]: string } = {}
- cookieNames.forEach(cookieName => {
- cookies[cookieName] = parseCookie(cookie, cookieName)
- })
- return cookies
-}
-
-export const DEFAULT_UA = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.0.0'
-export const DEFAULT_IP = process.env.BING_IP || randomIP()
-
-export function parseUA(ua?: string, default_ua = DEFAULT_UA) {
- return / EDGE?/i.test(decodeURIComponent(ua || '')) ? decodeURIComponent(ua!.trim()) : default_ua
-}
-
-export function createHeaders(cookies: Partial<{ [key: string]: string }>, defaultHeaders?: Partial<{ [key: string]: string }>, type?: string) {
- let {
- BING_COOKIE = process.env.BING_COOKIE,
- BING_UA = process.env.BING_UA,
- BING_IP = process.env.BING_IP,
- BING_HEADER = process.env.BING_HEADER,
- IMAGE_ONLY = process.env.IMAGE_ONLY ?? '1',
- } = cookies
-
- if (BING_HEADER) {
- const headers = extraHeadersFromCookie({
- BING_HEADER,
- ...cookies,
- }) || {}
- if (/^(1|true|yes)$/.test(String(IMAGE_ONLY)) && type !== 'image') {
- // 仅画图时设置 cookie
- headers.cookie = `_U=${defaultUID}`
- }
- if (headers['user-agent']) {
- return headers
- }
- }
-
- const ua = parseUA(BING_UA)
-
- if (!BING_COOKIE) {
- BING_COOKIE = defaultHeaders?.IMAGE_BING_COOKIE || defaultUID // hf 暂时不用 Cookie 也可以正常使用
- }
-
- const parsedCookie = parseCookie(BING_COOKIE, '_U')
- if (!parsedCookie) {
- throw new Error('Invalid Cookie')
- }
- return {
- 'x-forwarded-for': BING_IP || DEFAULT_IP,
- 'Accept-Encoding': 'gzip, deflate, br',
- 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
- 'User-Agent': ua!,
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- cookie: `_U=${parsedCookie}` || '',
- }
-}
-
-export class WatchDog {
- private tid = 0
- watch(fn: Function, timeout = 2000) {
- clearTimeout(this.tid)
- this.tid = setTimeout(fn, timeout + Math.random() * 1000)
- }
- reset() {
- clearTimeout(this.tid)
- }
-}
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/fcos.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/fcos.py
deleted file mode 100644
index 58485c1864a11a66168b7597f345ea759ce20551..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/fcos.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from ..builder import DETECTORS
-from .single_stage import SingleStageDetector
-
-
-@DETECTORS.register_module()
-class FCOS(SingleStageDetector):
- """Implementation of `FCOS `_"""
-
- def __init__(self,
- backbone,
- neck,
- bbox_head,
- train_cfg=None,
- test_cfg=None,
- pretrained=None):
- super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
- test_cfg, pretrained)
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/ssd_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/ssd_head.py
deleted file mode 100644
index 145622b64e3f0b3f7f518fc61a2a01348ebfa4f3..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/ssd_head.py
+++ /dev/null
@@ -1,265 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv.cnn import xavier_init
-from mmcv.runner import force_fp32
-
-from mmdet.core import (build_anchor_generator, build_assigner,
- build_bbox_coder, build_sampler, multi_apply)
-from ..builder import HEADS
-from ..losses import smooth_l1_loss
-from .anchor_head import AnchorHead
-
-
-# TODO: add loss evaluator for SSD
-@HEADS.register_module()
-class SSDHead(AnchorHead):
- """SSD head used in https://arxiv.org/abs/1512.02325.
-
- Args:
- num_classes (int): Number of categories excluding the background
- category.
- in_channels (int): Number of channels in the input feature map.
- anchor_generator (dict): Config dict for anchor generator
- bbox_coder (dict): Config of bounding box coder.
- reg_decoded_bbox (bool): If true, the regression loss would be
- applied directly on decoded bounding boxes, converting both
- the predicted boxes and regression targets to absolute
- coordinates format. Default False. It should be `True` when
- using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
- train_cfg (dict): Training config of anchor head.
- test_cfg (dict): Testing config of anchor head.
- """ # noqa: W605
-
- def __init__(self,
- num_classes=80,
- in_channels=(512, 1024, 512, 256, 256, 256),
- anchor_generator=dict(
- type='SSDAnchorGenerator',
- scale_major=False,
- input_size=300,
- strides=[8, 16, 32, 64, 100, 300],
- ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),
- basesize_ratio_range=(0.1, 0.9)),
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- clip_border=True,
- target_means=[.0, .0, .0, .0],
- target_stds=[1.0, 1.0, 1.0, 1.0],
- ),
- reg_decoded_bbox=False,
- train_cfg=None,
- test_cfg=None):
- super(AnchorHead, self).__init__()
- self.num_classes = num_classes
- self.in_channels = in_channels
- self.cls_out_channels = num_classes + 1 # add background class
- self.anchor_generator = build_anchor_generator(anchor_generator)
- num_anchors = self.anchor_generator.num_base_anchors
-
- reg_convs = []
- cls_convs = []
- for i in range(len(in_channels)):
- reg_convs.append(
- nn.Conv2d(
- in_channels[i],
- num_anchors[i] * 4,
- kernel_size=3,
- padding=1))
- cls_convs.append(
- nn.Conv2d(
- in_channels[i],
- num_anchors[i] * (num_classes + 1),
- kernel_size=3,
- padding=1))
- self.reg_convs = nn.ModuleList(reg_convs)
- self.cls_convs = nn.ModuleList(cls_convs)
-
- self.bbox_coder = build_bbox_coder(bbox_coder)
- self.reg_decoded_bbox = reg_decoded_bbox
- self.use_sigmoid_cls = False
- self.cls_focal_loss = False
- self.train_cfg = train_cfg
- self.test_cfg = test_cfg
- # set sampling=False for archor_target
- self.sampling = False
- if self.train_cfg:
- self.assigner = build_assigner(self.train_cfg.assigner)
- # SSD sampling=False so use PseudoSampler
- sampler_cfg = dict(type='PseudoSampler')
- self.sampler = build_sampler(sampler_cfg, context=self)
- self.fp16_enabled = False
-
- def init_weights(self):
- """Initialize weights of the head."""
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- xavier_init(m, distribution='uniform', bias=0)
-
- def forward(self, feats):
- """Forward features from the upstream network.
-
- Args:
- feats (tuple[Tensor]): Features from the upstream network, each is
- a 4D-tensor.
-
- Returns:
- tuple:
- cls_scores (list[Tensor]): Classification scores for all scale
- levels, each is a 4D-tensor, the channels number is
- num_anchors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all scale
- levels, each is a 4D-tensor, the channels number is
- num_anchors * 4.
- """
- cls_scores = []
- bbox_preds = []
- for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,
- self.cls_convs):
- cls_scores.append(cls_conv(feat))
- bbox_preds.append(reg_conv(feat))
- return cls_scores, bbox_preds
-
- def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights,
- bbox_targets, bbox_weights, num_total_samples):
- """Compute loss of a single image.
-
- Args:
- cls_score (Tensor): Box scores for eachimage
- Has shape (num_total_anchors, num_classes).
- bbox_pred (Tensor): Box energies / deltas for each image
- level with shape (num_total_anchors, 4).
- anchors (Tensor): Box reference for each scale level with shape
- (num_total_anchors, 4).
- labels (Tensor): Labels of each anchors with shape
- (num_total_anchors,).
- label_weights (Tensor): Label weights of each anchor with shape
- (num_total_anchors,)
- bbox_targets (Tensor): BBox regression targets of each anchor wight
- shape (num_total_anchors, 4).
- bbox_weights (Tensor): BBox regression loss weights of each anchor
- with shape (num_total_anchors, 4).
- num_total_samples (int): If sampling, num total samples equal to
- the number of total anchors; Otherwise, it is the number of
- positive anchors.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
-
- loss_cls_all = F.cross_entropy(
- cls_score, labels, reduction='none') * label_weights
- # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
- pos_inds = ((labels >= 0) &
- (labels < self.num_classes)).nonzero().reshape(-1)
- neg_inds = (labels == self.num_classes).nonzero().view(-1)
-
- num_pos_samples = pos_inds.size(0)
- num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples
- if num_neg_samples > neg_inds.size(0):
- num_neg_samples = neg_inds.size(0)
- topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
- loss_cls_pos = loss_cls_all[pos_inds].sum()
- loss_cls_neg = topk_loss_cls_neg.sum()
- loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
-
- if self.reg_decoded_bbox:
- # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
- # is applied directly on the decoded bounding boxes, it
- # decodes the already encoded coordinates to absolute format.
- bbox_pred = self.bbox_coder.decode(anchor, bbox_pred)
-
- loss_bbox = smooth_l1_loss(
- bbox_pred,
- bbox_targets,
- bbox_weights,
- beta=self.train_cfg.smoothl1_beta,
- avg_factor=num_total_samples)
- return loss_cls[None], loss_bbox
-
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
- def loss(self,
- cls_scores,
- bbox_preds,
- gt_bboxes,
- gt_labels,
- img_metas,
- gt_bboxes_ignore=None):
- """Compute losses of the head.
-
- Args:
- cls_scores (list[Tensor]): Box scores for each scale level
- Has shape (N, num_anchors * num_classes, H, W)
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
- level with shape (N, num_anchors * 4, H, W)
- gt_bboxes (list[Tensor]): each item are the truth boxes for each
- image in [tl_x, tl_y, br_x, br_y] format.
- gt_labels (list[Tensor]): class indices corresponding to each box
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
- boxes can be ignored when computing the loss.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
- assert len(featmap_sizes) == self.anchor_generator.num_levels
-
- device = cls_scores[0].device
-
- anchor_list, valid_flag_list = self.get_anchors(
- featmap_sizes, img_metas, device=device)
- cls_reg_targets = self.get_targets(
- anchor_list,
- valid_flag_list,
- gt_bboxes,
- img_metas,
- gt_bboxes_ignore_list=gt_bboxes_ignore,
- gt_labels_list=gt_labels,
- label_channels=1,
- unmap_outputs=False)
- if cls_reg_targets is None:
- return None
- (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
- num_total_pos, num_total_neg) = cls_reg_targets
-
- num_images = len(img_metas)
- all_cls_scores = torch.cat([
- s.permute(0, 2, 3, 1).reshape(
- num_images, -1, self.cls_out_channels) for s in cls_scores
- ], 1)
- all_labels = torch.cat(labels_list, -1).view(num_images, -1)
- all_label_weights = torch.cat(label_weights_list,
- -1).view(num_images, -1)
- all_bbox_preds = torch.cat([
- b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
- for b in bbox_preds
- ], -2)
- all_bbox_targets = torch.cat(bbox_targets_list,
- -2).view(num_images, -1, 4)
- all_bbox_weights = torch.cat(bbox_weights_list,
- -2).view(num_images, -1, 4)
-
- # concat all level anchors to a single tensor
- all_anchors = []
- for i in range(num_images):
- all_anchors.append(torch.cat(anchor_list[i]))
-
- # check NaN and Inf
- assert torch.isfinite(all_cls_scores).all().item(), \
- 'classification scores become infinite or NaN!'
- assert torch.isfinite(all_bbox_preds).all().item(), \
- 'bbox predications become infinite or NaN!'
-
- losses_cls, losses_bbox = multi_apply(
- self.loss_single,
- all_cls_scores,
- all_bbox_preds,
- all_anchors,
- all_labels,
- all_label_weights,
- all_bbox_targets,
- all_bbox_weights,
- num_total_samples=num_total_pos)
- return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
diff --git a/spaces/Rojastopher/Image-to-3D/README.md b/spaces/Rojastopher/Image-to-3D/README.md
deleted file mode 100644
index 243f6cf265f7fba001aa2f2065af966fbc9aca20..0000000000000000000000000000000000000000
--- a/spaces/Rojastopher/Image-to-3D/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Point-e Demo
-emoji: 🐢
-colorFrom: yellow
-colorTo: blue
-sdk: gradio
-sdk_version: 3.14.0
-app_file: app.py
-pinned: false
-duplicated_from: AP123/text-to-3D
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/SERER/VITS-Umamusume-voice-synthesizer/text/thai.py b/spaces/SERER/VITS-Umamusume-voice-synthesizer/text/thai.py
deleted file mode 100644
index 998207c01a85c710a46db1ec8b62c39c2d94bc84..0000000000000000000000000000000000000000
--- a/spaces/SERER/VITS-Umamusume-voice-synthesizer/text/thai.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import re
-from num_thai.thainumbers import NumThai
-
-
-num = NumThai()
-
-# List of (Latin alphabet, Thai) pairs:
-_latin_to_thai = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', 'เอ'),
- ('b','บี'),
- ('c','ซี'),
- ('d','ดี'),
- ('e','อี'),
- ('f','เอฟ'),
- ('g','จี'),
- ('h','เอช'),
- ('i','ไอ'),
- ('j','เจ'),
- ('k','เค'),
- ('l','แอล'),
- ('m','เอ็ม'),
- ('n','เอ็น'),
- ('o','โอ'),
- ('p','พี'),
- ('q','คิว'),
- ('r','แอร์'),
- ('s','เอส'),
- ('t','ที'),
- ('u','ยู'),
- ('v','วี'),
- ('w','ดับเบิลยู'),
- ('x','เอ็กซ์'),
- ('y','วาย'),
- ('z','ซี')
-]]
-
-
-def num_to_thai(text):
- return re.sub(r'(?:\d+(?:,?\d+)?)+(?:\.\d+(?:,?\d+)?)?', lambda x: ''.join(num.NumberToTextThai(float(x.group(0).replace(',', '')))), text)
-
-def latin_to_thai(text):
- for regex, replacement in _latin_to_thai:
- text = re.sub(regex, replacement, text)
- return text
diff --git a/spaces/SIGGRAPH2022/Text2Human/Text2Human/utils/logger.py b/spaces/SIGGRAPH2022/Text2Human/Text2Human/utils/logger.py
deleted file mode 100644
index 4fee1a2b221c4d219206fd8f3201db3b52566adb..0000000000000000000000000000000000000000
--- a/spaces/SIGGRAPH2022/Text2Human/Text2Human/utils/logger.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import datetime
-import logging
-import time
-
-
-class MessageLogger():
- """Message logger for printing.
-
- Args:
- opt (dict): Config. It contains the following keys:
- name (str): Exp name.
- logger (dict): Contains 'print_freq' (str) for logger interval.
- train (dict): Contains 'niter' (int) for total iters.
- use_tb_logger (bool): Use tensorboard logger.
- start_iter (int): Start iter. Default: 1.
- tb_logger (obj:`tb_logger`): Tensorboard logger. Default: None.
- """
-
- def __init__(self, opt, start_iter=1, tb_logger=None):
- self.exp_name = opt['name']
- self.interval = opt['print_freq']
- self.start_iter = start_iter
- self.max_iters = opt['max_iters']
- self.use_tb_logger = opt['use_tb_logger']
- self.tb_logger = tb_logger
- self.start_time = time.time()
- self.logger = get_root_logger()
-
- def __call__(self, log_vars):
- """Format logging message.
-
- Args:
- log_vars (dict): It contains the following keys:
- epoch (int): Epoch number.
- iter (int): Current iter.
- lrs (list): List for learning rates.
-
- time (float): Iter time.
- data_time (float): Data time for each iter.
- """
- # epoch, iter, learning rates
- epoch = log_vars.pop('epoch')
- current_iter = log_vars.pop('iter')
- lrs = log_vars.pop('lrs')
-
- message = (f'[{self.exp_name[:5]}..][epoch:{epoch:3d}, '
- f'iter:{current_iter:8,d}, lr:(')
- for v in lrs:
- message += f'{v:.3e},'
- message += ')] '
-
- # time and estimated time
- if 'time' in log_vars.keys():
- iter_time = log_vars.pop('time')
- data_time = log_vars.pop('data_time')
-
- total_time = time.time() - self.start_time
- time_sec_avg = total_time / (current_iter - self.start_iter + 1)
- eta_sec = time_sec_avg * (self.max_iters - current_iter - 1)
- eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
- message += f'[eta: {eta_str}, '
- message += f'time: {iter_time:.3f}, data_time: {data_time:.3f}] '
-
- # other items, especially losses
- for k, v in log_vars.items():
- message += f'{k}: {v:.4e} '
- # tensorboard logger
- if self.use_tb_logger and 'debug' not in self.exp_name:
- self.tb_logger.add_scalar(k, v, current_iter)
-
- self.logger.info(message)
-
-
-def init_tb_logger(log_dir):
- from torch.utils.tensorboard import SummaryWriter
- tb_logger = SummaryWriter(log_dir=log_dir)
- return tb_logger
-
-
-def get_root_logger(logger_name='base', log_level=logging.INFO, log_file=None):
- """Get the root logger.
-
- The logger will be initialized if it has not been initialized. By default a
- StreamHandler will be added. If `log_file` is specified, a FileHandler will
- also be added.
-
- Args:
- logger_name (str): root logger name. Default: base.
- log_file (str | None): The log filename. If specified, a FileHandler
- will be added to the root logger.
- log_level (int): The root logger level. Note that only the process of
- rank 0 is affected, while other processes will set the level to
- "Error" and be silent most of the time.
-
- Returns:
- logging.Logger: The root logger.
- """
- logger = logging.getLogger(logger_name)
- # if the logger has been initialized, just return it
- if logger.hasHandlers():
- return logger
-
- format_str = '%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s'
- logging.basicConfig(format=format_str, level=log_level)
-
- if log_file is not None:
- file_handler = logging.FileHandler(log_file, 'w')
- file_handler.setFormatter(logging.Formatter(format_str))
- file_handler.setLevel(log_level)
- logger.addHandler(file_handler)
-
- return logger
diff --git a/spaces/Sakil/question_answering_app/app.py b/spaces/Sakil/question_answering_app/app.py
deleted file mode 100644
index da99ac7b991f3991d188632bb733d858807333a2..0000000000000000000000000000000000000000
--- a/spaces/Sakil/question_answering_app/app.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import gradio as gr
-from transformers import pipeline
-title = 'Question Answering APP'
-context = "My name is Sakil Ansari. I love tea and drink it regularly"
-question = "What do I love?"
-
-
-question_answerer = pipeline("question-answering")
- #result = question_answerer(question = question, context=context)
- #return result['answer']
-
-
-
-
-interface = gr.Interface.from_pipeline(question_answerer,
- title = title,
- theme = "grass",
- examples = [[context, question]]).launch()
\ No newline at end of file
diff --git a/spaces/Sanathkumar1603/hackathon/app/Hackathon_setup/exp_recognition.py b/spaces/Sanathkumar1603/hackathon/app/Hackathon_setup/exp_recognition.py
deleted file mode 100644
index a387ce80f32f5b804c5e61e5c115811337e4f48e..0000000000000000000000000000000000000000
--- a/spaces/Sanathkumar1603/hackathon/app/Hackathon_setup/exp_recognition.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import numpy as np
-import cv2
-from matplotlib import pyplot as plt
-import torch
-# In the below line,remove '.' while working on your local system.However Make sure that '.' is present before face_recognition_model while uploading to the server, Do not remove it.
-from .exp_recognition_model import *
-from PIL import Image
-import base64
-import io
-import torchvision.transforms as transforms
-import os
-## Add more imports if required
-
-#############################################################################################################################
-# Caution: Don't change any of the filenames, function names and definitions #
-# Always use the current_path + file_name for refering any files, without it we cannot access files on the server #
-#############################################################################################################################
-
-# Current_path stores absolute path of the file from where it runs.
-current_path = os.path.dirname(os.path.abspath(__file__))
-
-
-#1) The below function is used to detect faces in the given image.
-#2) It returns only one image which has maximum area out of all the detected faces in the photo.
-#3) If no face is detected,then it returns zero(0).
-
-def detected_face(image):
- eye_haar = current_path + '/haarcascade_eye.xml'
- face_haar = current_path + '/haarcascade_frontalface_default.xml'
- face_cascade = cv2.CascadeClassifier(face_haar)
- eye_cascade = cv2.CascadeClassifier(eye_haar)
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
- faces = face_cascade.detectMultiScale(gray, 1.3, 5)
- face_areas=[]
- images = []
- required_image=0
- for i, (x,y,w,h) in enumerate(faces):
- face_cropped = gray[y:y+h, x:x+w]
- face_areas.append(w*h)
- images.append(face_cropped)
- required_image = images[np.argmax(face_areas)]
- required_image = Image.fromarray(required_image)
- return required_image
-
-
-#1) Images captured from mobile is passed as parameter to the below function in the API call, It returns the Expression detected by your network.
-#2) The image is passed to the function in base64 encoding, Code for decoding the image is provided within the function.
-#3) Define an object to your network here in the function and load the weight from the trained network, set it in evaluation mode.
-#4) Perform necessary transformations to the input(detected face using the above function), this should return the Expression in string form ex: "Anger"
-#5) For loading your model use the current_path+'your model file name', anyhow detailed example is given in comments to the function
-##Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
-def get_expression(img):
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
-
- ##########################################################################################
- ##Example for loading a model using weight state dictionary: ##
- ## face_det_net = facExpRec() #Example Network ##
- ## model = torch.load(current_path + '/exp_recognition_net.t7', map_location=device) ##
- ## face_det_net.load_state_dict(model['net_dict']) ##
- ## ##
- ##current_path + '/' is path of the saved model if present in ##
- ##the same path as this file, we recommend to put in the same directory ##
- ##########################################################################################
- ##########################################################################################
- face_det_net = facExpRec() #Example Network ##
- model = torch.load(current_path + '/expression_model.t7', map_location=device) ##
- face_det_net.load_state_dict(model['net_dict'])
- # face_det_net.to(device)
- # face_det_net.eval()
- face = detected_face(img)
- if face==0:
- face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
-
- # YOUR CODE HERE, return expression using your model
- # transform = transforms.Compose([
- # # Adjust the size as needed
- # transforms.ToTensor()
-
- # ])
-
- #trnscm = transforms.Compose([transforms.ToTensor()])
- # Apply the transformations to the image
- img1 = trnscm(face).unsqueeze(0).to(device)
-
- with torch.no_grad():
- output = face_det_net(img1)
- ans, predicted = torch.max(output, 1).item()
- #predicted_class = torch.argmax(output, dim=1).item()
- return ans
\ No newline at end of file
diff --git a/spaces/SantiagoMoreno-UdeA/NER_RC/src/graph/GUI.py b/spaces/SantiagoMoreno-UdeA/NER_RC/src/graph/GUI.py
deleted file mode 100644
index f9e36543831e2bc00c75a6e80dacc8bef83c9ec3..0000000000000000000000000000000000000000
--- a/spaces/SantiagoMoreno-UdeA/NER_RC/src/graph/GUI.py
+++ /dev/null
@@ -1,245 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Nov 11 16:01:08 2022
-
-@author: Santiago Moreno
-"""
-
-import os
-import gradio as gr
-import sys
-import json
-
-
-default_path = os.path.dirname(os.path.abspath(__file__))
-#default_path = default_path.replace('\\', '/')
-
-os.chdir(default_path)
-sys.path.insert(0, default_path+'/../scripts')
-
-from src.scripts.functionsner import use_model, tag_sentence, json_to_txt, training_model, characterize_data, upsampling_data, usage_cuda, copy_data
-from src.scripts.functionsrc import use_model_rc, training_model_rc, usage_cuda_rc
-
-models_NER = os.listdir(default_path+'/../../models/NER')
-models_rc = os.listdir(default_path+'/../../models/RC')
-
-#-------------------------------------------Functions-----------------------------------------------
-
-#--------------------------------------NER-----------------------------------
-def Trainer(fast, model_name, standard, input_dir, Upsampling, Cuda):
- if fast: epochs = 1
- else: epochs = 20
-
- if Cuda:
- cuda_info = usage_cuda(True)
- else:
- cuda_info = usage_cuda(False)
-
-
- if standard:
- copy_data(input_dir)
- else:
- Error = json_to_txt(input_dir)
- if type(Error)==int:
- yield 'Error processing the input documents, code error {}'.format(Error)
- if Upsampling:
- yield cuda_info+'\n'+'-'*20+'Upsampling'+'-'*20
- entities_dict=characterize_data()
- entities = list(entities_dict.keys())
- entities_to_upsample = [entities[i] for i,value in enumerate(entities_dict.values()) if value < 200]
- upsampling_data(entities_to_upsample, 0.8, entities)
- yield '-'*20+'Training'+'-'*20
- else:
- yield cuda_info+'\n'+'-'*20+'Training'+'-'*20
- Error = training_model(model_name, epochs)
- if type(Error)==int:
- yield 'Error training the model, code error {}'.format(Error)
- else:
- yield 'Training complete, model {} could be found at models/{}'.format(model_name,model_name)
-
-
-def Tagger_sentence(Model, Sentence, Cuda):
- if Cuda: cuda_info = usage_cuda(True)
- else: cuda_info = usage_cuda(False)
- yield cuda_info+'\n'+'-'*20+'Tagging'+'-'*20
- results = tag_sentence(Sentence, Model)
- if type(results)==int:
- yield "Error {}, see documentation".format(results)
- else:
- yield results['Highligth']
-
-def Tagger_json(Model, Input_file, Output_file, Cuda):
- if Cuda: cuda_info = usage_cuda(True)
- else: cuda_info = usage_cuda(False)
-
- with open(Output_file, "w", encoding='utf-8') as write_file:
- json.dump({'error':'error'}, write_file)
-
- yield cuda_info+'\n'+'-'*20+'Tagging'+'-'*20, {}, Output_file
-
- results = use_model(Model, Input_file.name, Output_file)
- if type(results)==int:
- error_dict = {}
- yield "Error {}, see documentation".format(results), error_dict, Output_file
- else:
- yield { "text" : results['text'], 'entities': results['entities']}, results, Output_file
-
-
-#--------------------RC-------------------------------
-def Trainer_RC(fast, model_name, input_file, rel2id_file, Cuda):
- if fast: epochs = 1
- else: epochs = 200
-
- if Cuda:
- cuda_info = usage_cuda_rc(True)
- else:
- cuda_info = usage_cuda_rc(False)
-
-
- yield cuda_info+'\n'+'-'*20+'Training'+'-'*20
- Error = training_model_rc(model_name, input_file.name, rel2id_file.name ,epochs)
- if type(Error)==int:
- yield 'Error training the model, code error {}'.format(Error)
- else:
- yield 'Training complete, model {} could be found at models/{}'.format(model_name,model_name)
-
-
-def Tagger_document_RC(Model, Input_file, Output_file, Cuda):
- if Cuda: cuda_info = usage_cuda_rc(True)
- else: cuda_info = usage_cuda_rc(False)
-
- with open(Output_file, "w", encoding='utf-8') as write_file:
- json.dump({'error':'error'}, write_file)
-
- yield {'cuda':cuda_info}, Output_file
-
- results = use_model_rc(Model, Input_file.name, Output_file)
- if type(results)==int:
- error_dict = {}
- yield error_dict, Output_file
- else:
- yield results, Output_file
-
-
-#---------------------------------GUI-------------------------------------
-def execute_GUI():
- global models_NER
- with gr.Blocks(title='NER', css="#title {font-size: 150% } #sub {font-size: 120% } ") as demo:
-
- gr.Markdown("Named Entity Recognition(NER) and Relation Classification (RC) by GITA and Pratec Group S.A.S.",elem_id="title")
- gr.Markdown("Software developed by Santiago Moreno, Daniel Escobar, and Rafael Orozco",elem_id="sub")
- gr.Markdown("Named Entity Recognition(NER) and Relation Classification (RC) System.")
-
- with gr.Tab("NER"):
- gr.Markdown("Use Tagger to apply NER from a pretrained model in a sentence or a given document in INPUT (.JSON) format.")
- gr.Markdown("Use Trainer to train a new NER model from a directory of documents in PRATECH (.JSON) format.")
- with gr.Tab("Tagger"):
- with gr.Tab("Sentence"):
- with gr.Row():
- with gr.Column():
- b = gr.Radio(list(models_NER), label='Model')
- inputs =[
- b,
- gr.Textbox(placeholder="Enter sentence here...", label='Sentence'),
- gr.Radio([True,False], label='CUDA', value=False),
- ]
- tagger_sen = gr.Button("Tag")
- output = gr.HighlightedText()
-
-
-
- tagger_sen.click(Tagger_sentence, inputs=inputs, outputs=output)
- b.change(fn=lambda value: gr.update(choices=list(os.listdir('../../models/NER'))), inputs=b, outputs=b)
- gr.Examples(
-
- examples=[
- ['CCC',"Camara de comercio de medellín. El ciudadano JAIME JARAMILLO VELEZ identificado con C.C. 12546987 ingresó al plantel el día 1/01/2022"],
- ['CCC',"Razón Social GASEOSAS GLACIAR S.A.S, ACTIVIDAD PRINCIPAL fabricación y distribución de bebidas endulzadas"]
- ],
- inputs=inputs
- )
-
-
- with gr.Tab("Document"):
- with gr.Row():
- with gr.Column():
- c = gr.Radio(list(models_NER), label='Model')
- inputs =[
- c,
- gr.File(label='Input data file'),
- gr.Textbox(placeholder="Enter path here...", label='Output data file path'), #value='../../data/Tagged/document_tagged.json'),
- gr.Radio([True,False], label='CUDA', value=False),
- ]
- tagger_json = gr.Button("Tag")
- output = [
- gr.HighlightedText(),
- gr.JSON(),
- gr.File(),
- ]
-
- models_NER = os.listdir(default_path+'/../../models/NER')
-
-
- tagger_json.click(Tagger_json, inputs=inputs, outputs=output)
- c.change(fn=lambda value: gr.update(choices=list(os.listdir('../../models/NER'))), inputs=c, outputs=c)
-
-
- with gr.Tab("Trainer"):
- with gr.Row():
- with gr.Column():
- train_input = inputs =[
- gr.Radio([True,False], label='Fast training', value=False),
- gr.Textbox(placeholder="Enter model name here...", label='New model name'),
- gr.Radio([True,False], label='Standard input', value=False),
- gr.Textbox(placeholder="Enter path here...", label='Input data directory path'),
- gr.Radio([True,False], label='Upsampling', value=False),
- gr.Radio([True,False], label='CUDA', value=False),
- ]
- trainer = gr.Button("Train")
- train_output = gr.TextArea(placeholder="Output information", label='Output')
-
-
- with gr.Tab("RC"):
- gr.Markdown("Use Tagger to apply RC from a pretrained model in document in (.TXT) CONLL04 format.")
- gr.Markdown("Use Trainer to train a new RC model from a file (.TXT) CONLL04 format and the rel2id file (.JSON).")
- with gr.Tab("Tagger Document"):
-
- with gr.Row():
- with gr.Column():
- c = gr.Radio(list(models_rc), label='Model')
- inputs =[
- c,
- gr.File(label='Input data file'),
- gr.Textbox(placeholder="Enter path here...", label='Output data file path (.JSON)'), #value='../../data/Tagged/document_tagged.json'),
- gr.Radio([True,False], label='CUDA', value=False),
- ]
- tagger_json = gr.Button("Tag")
- output = [
- gr.JSON(),
- gr.File(),
- ]
-
- tagger_json.click(Tagger_document_RC, inputs=inputs, outputs=output)
- c.change(fn=lambda value: gr.update(choices=list(os.listdir('../../models/RC'))), inputs=c, outputs=c)
-
- with gr.Tab("Trainer"):
- with gr.Row():
- with gr.Column():
- train_input = inputs =[
- gr.Radio([True,False], label='Fast training', value=True),
- gr.Textbox(placeholder="Enter model name here...", label='New model name'),
- gr.File(label='Input train file (.TXT)'),
- gr.File(label='Input rel2id file (.JSON)'),
- gr.Radio([True,False], label='CUDA', value=False),
- ]
- trainer = gr.Button("Train")
- train_output = gr.TextArea(placeholder="Output information", label='Output')
-
- trainer.click(Trainer_RC, inputs=train_input, outputs=train_output)
-
-
-
- demo.queue()
- demo.launch()
-
-
diff --git a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/yolo/darknet.py b/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/yolo/darknet.py
deleted file mode 100644
index f12a8ac6a577217e2a797214f788fdf30d8ec989..0000000000000000000000000000000000000000
--- a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/yolo/darknet.py
+++ /dev/null
@@ -1,548 +0,0 @@
-from __future__ import division
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torch.autograd import Variable
-import numpy as np
-import cv2
-import matplotlib.pyplot as plt
-try:
- from util import count_parameters as count
- from util import convert2cpu as cpu
- from util import predict_transform
-except ImportError:
- from yolo.util import count_parameters as count
- from yolo.util import convert2cpu as cpu
- from yolo.util import predict_transform
-
-class test_net(nn.Module):
- def __init__(self, num_layers, input_size):
- super(test_net, self).__init__()
- self.num_layers= num_layers
- self.linear_1 = nn.Linear(input_size, 5)
- self.middle = nn.ModuleList([nn.Linear(5,5) for x in range(num_layers)])
- self.output = nn.Linear(5,2)
-
- def forward(self, x):
- x = x.view(-1)
- fwd = nn.Sequential(self.linear_1, *self.middle, self.output)
- return fwd(x)
-
-def get_test_input():
- img = cv2.imread("dog-cycle-car.png")
- img = cv2.resize(img, (416,416))
- img_ = img[:,:,::-1].transpose((2,0,1))
- img_ = img_[np.newaxis,:,:,:]/255.0
- img_ = torch.from_numpy(img_).float()
- img_ = Variable(img_)
- return img_
-
-
-def parse_cfg(cfgfile):
- """
- Takes a configuration file
-
- Returns a list of blocks. Each blocks describes a block in the neural
- network to be built. Block is represented as a dictionary in the list
-
- """
- file = open(cfgfile, 'r')
- lines = file.read().split('\n') #store the lines in a list
- lines = [x for x in lines if len(x) > 0] #get read of the empty lines
- lines = [x for x in lines if x[0] != '#']
- lines = [x.rstrip().lstrip() for x in lines]
-
-
- block = {}
- blocks = []
-
- for line in lines:
- if line[0] == "[": #This marks the start of a new block
- if len(block) != 0:
- blocks.append(block)
- block = {}
- block["type"] = line[1:-1].rstrip()
- else:
- key,value = line.split("=")
- block[key.rstrip()] = value.lstrip()
- blocks.append(block)
-
- return blocks
-# print('\n\n'.join([repr(x) for x in blocks]))
-
-import pickle as pkl
-
-class MaxPoolStride1(nn.Module):
- def __init__(self, kernel_size):
- super(MaxPoolStride1, self).__init__()
- self.kernel_size = kernel_size
- self.pad = kernel_size - 1
-
- def forward(self, x):
- padding = int(self.pad / 2)
- #padded_x = F.pad(x, (0,self.pad,0,self.pad), mode="replicate")
- #pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x)
- #padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode="replicate")
- padded_x = F.pad(x, (padding, padding, padding, padding), mode="constant", value=0)
- pooled_x = nn.MaxPool2d(self.kernel_size, 1)(padded_x)
- return pooled_x
-
-
-class EmptyLayer(nn.Module):
- def __init__(self):
- super(EmptyLayer, self).__init__()
-
-
-class DetectionLayer(nn.Module):
- def __init__(self, anchors):
- super(DetectionLayer, self).__init__()
- self.anchors = anchors
-
- def forward(self, x, inp_dim, num_classes, confidence):
- x = x.data
- global CUDA
- prediction = x
- prediction = predict_transform(prediction, inp_dim, self.anchors, num_classes, confidence, CUDA)
- return prediction
-
-
-
-
-
-class Upsample(nn.Module):
- def __init__(self, stride=2):
- super(Upsample, self).__init__()
- self.stride = stride
-
- def forward(self, x):
- stride = self.stride
- assert(x.data.dim() == 4)
- B = x.data.size(0)
- C = x.data.size(1)
- H = x.data.size(2)
- W = x.data.size(3)
- ws = stride
- hs = stride
- x = x.view(B, C, H, 1, W, 1).expand(B, C, H, stride, W, stride).contiguous().view(B, C, H*stride, W*stride)
- return x
-#
-
-class ReOrgLayer(nn.Module):
- def __init__(self, stride = 2):
- super(ReOrgLayer, self).__init__()
- self.stride= stride
-
- def forward(self,x):
- assert(x.data.dim() == 4)
- B,C,H,W = x.data.shape
- hs = self.stride
- ws = self.stride
- assert(H % hs == 0), "The stride " + str(self.stride) + " is not a proper divisor of height " + str(H)
- assert(W % ws == 0), "The stride " + str(self.stride) + " is not a proper divisor of height " + str(W)
- x = x.view(B,C, H // hs, hs, W // ws, ws).transpose(-2,-3).contiguous()
- x = x.view(B,C, H // hs * W // ws, hs, ws)
- x = x.view(B,C, H // hs * W // ws, hs*ws).transpose(-1,-2).contiguous()
- x = x.view(B, C, ws*hs, H // ws, W // ws).transpose(1,2).contiguous()
- x = x.view(B, C*ws*hs, H // ws, W // ws)
- return x
-
-
-def create_modules(blocks):
- net_info = blocks[0] #Captures the information about the input and pre-processing
-
- module_list = nn.ModuleList()
-
- index = 0 #indexing blocks helps with implementing route layers (skip connections)
-
-
- prev_filters = 3
-
- output_filters = []
-
- for x in blocks:
- module = nn.Sequential()
-
- if (x["type"] == "net"):
- continue
-
- #If it's a convolutional layer
- if (x["type"] == "convolutional"):
- #Get the info about the layer
- activation = x["activation"]
- try:
- batch_normalize = int(x["batch_normalize"])
- bias = False
- except:
- batch_normalize = 0
- bias = True
-
- filters= int(x["filters"])
- padding = int(x["pad"])
- kernel_size = int(x["size"])
- stride = int(x["stride"])
-
- if padding:
- pad = (kernel_size - 1) // 2
- else:
- pad = 0
-
- #Add the convolutional layer
- conv = nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=bias)
- module.add_module("conv_{0}".format(index), conv)
-
- #Add the Batch Norm Layer
- if batch_normalize:
- bn = nn.BatchNorm2d(filters)
- module.add_module("batch_norm_{0}".format(index), bn)
-
- #Check the activation.
- #It is either Linear or a Leaky ReLU for YOLO
- if activation == "leaky":
- activn = nn.LeakyReLU(0.1, inplace = True)
- module.add_module("leaky_{0}".format(index), activn)
-
-
-
- #If it's an upsampling layer
- #We use Bilinear2dUpsampling
-
- elif (x["type"] == "upsample"):
- stride = int(x["stride"])
-# upsample = Upsample(stride)
- upsample = nn.Upsample(scale_factor = 2, mode = "nearest")
- module.add_module("upsample_{}".format(index), upsample)
-
- #If it is a route layer
- elif (x["type"] == "route"):
- x["layers"] = x["layers"].split(',')
-
- #Start of a route
- start = int(x["layers"][0])
- if len(x["layers"]) <= 2:
- #end, if there exists one.
- try:
- end = int(x["layers"][1])
- except:
- end = 0
-
- #Positive anotation
- if start > 0:
- start = start - index
-
- if end > 0:
- end = end - index
-
-
- route = EmptyLayer()
- module.add_module("route_{0}".format(index), route)
-
-
-
- if end < 0:
- filters = output_filters[index + start] + output_filters[index + end]
- else:
- filters= output_filters[index + start]
- else: #SPP-route
- assert len(x["layers"]) == 4
-
- round = EmptyLayer()
- module.add_module("route_{0}".format(index), route)
-
- filters = output_filters[index + start] + output_filters[index + int(x["layers"][1])] \
- + output_filters[index + int(x["layers"][2])] + output_filters[index + int(x["layers"][3])]
-
- #shortcut corresponds to skip connection
- elif x["type"] == "shortcut":
- from_ = int(x["from"])
- shortcut = EmptyLayer()
- module.add_module("shortcut_{}".format(index), shortcut)
-
-
- elif x["type"] == "maxpool":
- stride = int(x["stride"])
- size = int(x["size"])
- if stride != 1:
- maxpool = nn.MaxPool2d(size, stride)
- else:
- maxpool = MaxPoolStride1(size)
- #maxpool = nn.MaxPool2d(size, stride=1, padding=size-1)
-
- module.add_module("maxpool_{}".format(index), maxpool)
-
- #Yolo is the detection layer
- elif x["type"] == "yolo":
- mask = x["mask"].split(",")
- mask = [int(x) for x in mask]
-
-
- anchors = x["anchors"].split(",")
- anchors = [int(a) for a in anchors]
- anchors = [(anchors[i], anchors[i+1]) for i in range(0, len(anchors),2)]
- anchors = [anchors[i] for i in mask]
-
- detection = DetectionLayer(anchors)
- module.add_module("Detection_{}".format(index), detection)
-
-
-
- else:
- print("Something I dunno")
- assert False
-
- module_list.append(module)
- prev_filters = filters
- output_filters.append(filters)
- index += 1
-
-
- return (net_info, module_list)
-
-
-
-class Darknet(nn.Module):
- def __init__(self, cfgfile):
- super(Darknet, self).__init__()
- self.blocks = parse_cfg(cfgfile)
- self.net_info, self.module_list = create_modules(self.blocks)
- self.header = torch.IntTensor([0,0,0,0])
- self.seen = 0
-
-
-
- def get_blocks(self):
- return self.blocks
-
- def get_module_list(self):
- return self.module_list
-
-
- def forward(self, x, CUDA):
- detections = []
- modules = self.blocks[1:]
- outputs = {} #We cache the outputs for the route layer
-
-
- write = 0
- for i in range(len(modules)):
-
- module_type = (modules[i]["type"])
- if module_type == "convolutional" or module_type == "upsample" or module_type == "maxpool":
-
- x = self.module_list[i](x)
- outputs[i] = x
-
-
- elif module_type == "route":
- layers = modules[i]["layers"]
- layers = [int(a) for a in layers]
-
- if (layers[0]) > 0:
- layers[0] = layers[0] - i
-
- if len(layers) == 1:
- x = outputs[i + (layers[0])]
-
- elif len(layers) == 2:
- if (layers[1]) > 0:
- layers[1] = layers[1] - i
-
- map1 = outputs[i + layers[0]]
- map2 = outputs[i + layers[1]]
-
- x = torch.cat((map1, map2), 1)
- elif len(layers) == 4: # SPP
- map1 = outputs[i + layers[0]]
- map2 = outputs[i + layers[1]]
- map3 = outputs[i + layers[2]]
- map4 = outputs[i + layers[3]]
-
- x = torch.cat((map1, map2, map3, map4), 1)
- outputs[i] = x
-
- elif module_type == "shortcut":
- from_ = int(modules[i]["from"])
- x = outputs[i-1] + outputs[i+from_]
- outputs[i] = x
-
-
-
- elif module_type == 'yolo':
-
- anchors = self.module_list[i][0].anchors
- #Get the input dimensions
- inp_dim = int (self.net_info["height"])
-
- #Get the number of classes
- num_classes = int (modules[i]["classes"])
-
- #Output the result
- x = x.data
- x = predict_transform(x, inp_dim, anchors, num_classes, CUDA)
-
- if type(x) == int:
- continue
-
-
- if not write:
- detections = x
- write = 1
-
- else:
- detections = torch.cat((detections, x), 1)
-
- outputs[i] = outputs[i-1]
-
-
-
- try:
- return detections
- except:
- return 0
-
-
- def load_weights(self, weightfile):
-
- #Open the weights file
- fp = open(weightfile, "rb")
-
- #The first 4 values are header information
- # 1. Major version number
- # 2. Minor Version Number
- # 3. Subversion number
- # 4. IMages seen
- header = np.fromfile(fp, dtype = np.int32, count = 5)
- self.header = torch.from_numpy(header)
- self.seen = self.header[3]
-
- #The rest of the values are the weights
- # Let's load them up
- weights = np.fromfile(fp, dtype = np.float32)
-
- ptr = 0
- for i in range(len(self.module_list)):
- module_type = self.blocks[i + 1]["type"]
-
- if module_type == "convolutional":
- model = self.module_list[i]
- try:
- batch_normalize = int(self.blocks[i+1]["batch_normalize"])
- except:
- batch_normalize = 0
-
- conv = model[0]
-
- if (batch_normalize):
- bn = model[1]
-
- #Get the number of weights of Batch Norm Layer
- num_bn_biases = bn.bias.numel()
-
- #Load the weights
- bn_biases = torch.from_numpy(weights[ptr:ptr + num_bn_biases])
- ptr += num_bn_biases
-
- bn_weights = torch.from_numpy(weights[ptr: ptr + num_bn_biases])
- ptr += num_bn_biases
-
- bn_running_mean = torch.from_numpy(weights[ptr: ptr + num_bn_biases])
- ptr += num_bn_biases
-
- bn_running_var = torch.from_numpy(weights[ptr: ptr + num_bn_biases])
- ptr += num_bn_biases
-
- #Cast the loaded weights into dims of model weights.
- bn_biases = bn_biases.view_as(bn.bias.data)
- bn_weights = bn_weights.view_as(bn.weight.data)
- bn_running_mean = bn_running_mean.view_as(bn.running_mean)
- bn_running_var = bn_running_var.view_as(bn.running_var)
-
- #Copy the data to model
- bn.bias.data.copy_(bn_biases)
- bn.weight.data.copy_(bn_weights)
- bn.running_mean.copy_(bn_running_mean)
- bn.running_var.copy_(bn_running_var)
-
- else:
- #Number of biases
- num_biases = conv.bias.numel()
-
- #Load the weights
- conv_biases = torch.from_numpy(weights[ptr: ptr + num_biases])
- ptr = ptr + num_biases
-
- #reshape the loaded weights according to the dims of the model weights
- conv_biases = conv_biases.view_as(conv.bias.data)
-
- #Finally copy the data
- conv.bias.data.copy_(conv_biases)
-
-
- #Let us load the weights for the Convolutional layers
- num_weights = conv.weight.numel()
-
- #Do the same as above for weights
- conv_weights = torch.from_numpy(weights[ptr:ptr+num_weights])
- ptr = ptr + num_weights
-
- conv_weights = conv_weights.view_as(conv.weight.data)
- conv.weight.data.copy_(conv_weights)
-
- def save_weights(self, savedfile, cutoff = 0):
-
- if cutoff <= 0:
- cutoff = len(self.blocks) - 1
-
- fp = open(savedfile, 'wb')
-
- # Attach the header at the top of the file
- self.header[3] = self.seen
- header = self.header
-
- header = header.numpy()
- header.tofile(fp)
-
- # Now, let us save the weights
- for i in range(len(self.module_list)):
- module_type = self.blocks[i+1]["type"]
-
- if (module_type) == "convolutional":
- model = self.module_list[i]
- try:
- batch_normalize = int(self.blocks[i+1]["batch_normalize"])
- except:
- batch_normalize = 0
-
- conv = model[0]
-
- if (batch_normalize):
- bn = model[1]
-
- #If the parameters are on GPU, convert them back to CPU
- #We don't convert the parameter to GPU
- #Instead. we copy the parameter and then convert it to CPU
- #This is done as weight are need to be saved during training
- cpu(bn.bias.data).numpy().tofile(fp)
- cpu(bn.weight.data).numpy().tofile(fp)
- cpu(bn.running_mean).numpy().tofile(fp)
- cpu(bn.running_var).numpy().tofile(fp)
-
-
- else:
- cpu(conv.bias.data).numpy().tofile(fp)
-
-
- #Let us save the weights for the Convolutional layers
- cpu(conv.weight.data).numpy().tofile(fp)
-
-
-
-
-
-#
-#dn = Darknet('cfg/yolov3.cfg')
-#dn.load_weights("yolov3.weights")
-#inp = get_test_input()
-#a, interms = dn(inp)
-#dn.eval()
-#a_i, interms_i = dn(inp)
diff --git a/spaces/SarthakSidhant/Go-Cattle/diseases/necrotic stomatitis (calf diphtheria).md b/spaces/SarthakSidhant/Go-Cattle/diseases/necrotic stomatitis (calf diphtheria).md
deleted file mode 100644
index efa825fa3c23bbe81d1079d340604086a47ea72e..0000000000000000000000000000000000000000
--- a/spaces/SarthakSidhant/Go-Cattle/diseases/necrotic stomatitis (calf diphtheria).md
+++ /dev/null
@@ -1,41 +0,0 @@
-## Necrotic stomatitis (calf diphtheria)
-
-**Information** : Necrotic stomatitis (calf diphtheria) is a bacterial infection of the mouth and throat of young calves caused by the bacterium Clostridium chauvoei. The bacteria produce a toxin that causes tissue death and necrosis.
-
-**Symptoms**
-
-The symptoms of necrotic stomatitis typically appear within 2-5 days of infection and include:
-
-* Painful mouth and throat
-* Drooling
-* Fever
-* Difficulty eating and drinking
-* Swelling of the lymph nodes in the head and neck
-* Necrotic lesions on the tongue, gums, and palate
-* Death in severe cases
-
-**Remedies**
-
-There is no specific treatment for necrotic stomatitis. Treatment is usually supportive and may include:
-
-* Providing pain relief
-* Administering fluids and electrolytes
-* Treating secondary bacterial infections
-* Administering antitoxin
-
-**Causes**
-
-Necrotic stomatitis (calf diphtheria) is caused by the bacterium Clostridium chauvoei. The bacteria are found in the environment, in soil, and in the feces of animals. Calves can become infected by ingesting the bacteria or by coming into contact with contaminated objects, such as feed, water, or equipment.
-
-**Prevention**
-
-There is no vaccine available for necrotic stomatitis. However, there are a number of preventive measures that can be taken to reduce the risk of infection, such as:
-
-* Practicing good biosecurity measures
-* Isolating infected animals from healthy animals
-* Cleaning and disinfecting contaminated areas
-* Vaccinating cattle against other diseases that can weaken the immune system, such as bovine viral diarrhea virus (BVDV) and rotavirus
-
-**Differential diagnosis**
-
-Necrotic stomatitis can be difficult to distinguish from other diseases that cause mouth lesions, such as foot-and-mouth disease, contagious ecthyma, and vesicular stomatitis. A veterinarian can diagnose necrotic stomatitis by testing a sample of the lesions for the presence of Clostridium chauvoei.
diff --git a/spaces/SeViLA/SeViLA/lavis/models/pnp_vqa_models/pnp_vqa.py b/spaces/SeViLA/SeViLA/lavis/models/pnp_vqa_models/pnp_vqa.py
deleted file mode 100644
index 59b9d888bdcb999ca65eabfda7c457b7041524c4..0000000000000000000000000000000000000000
--- a/spaces/SeViLA/SeViLA/lavis/models/pnp_vqa_models/pnp_vqa.py
+++ /dev/null
@@ -1,340 +0,0 @@
-"""
- Copyright (c) 2022, salesforce.com, inc.
- All rights reserved.
- SPDX-License-Identifier: BSD-3-Clause
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-"""
-
-import torch
-import torch.nn as nn
-from itertools import chain
-from lavis.common.registry import registry
-from lavis.models.base_model import BaseModel
-from torch.nn import CrossEntropyLoss, MSELoss
-from transformers import T5ForConditionalGeneration
-from lavis.models.pnp_vqa_models import prepare_qa_input
-from lavis.models.blip_models.blip_image_text_matching import compute_gradcam
-from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
-
-
-@registry.register_model("pnp_vqa")
-class PNPVQA(BaseModel):
- """
- PNPVQA model consists of three submodels for zero-shot VQA:
- 1. Image-questioning matching model
- 2. Image captioning model
- 3. Question answering model
-
- Supported model types:
- - base: BLIPITM, BLIPCaption, PNPUnifiedQAv2FiD (t5-base)
- - large: BLIPITM, BLIPCaption, PNPUnifiedQAv2FiD (t5-large)
- - 3b: BLIPITM, BLIPCaption, PNPUnifiedQAv2FiD (t5-3b)
-
- Usage:
- >>> from lavis.models import load_model
- >>> model = load_model("pnp_vqa", "base", is_eval=True)
- >>> model = load_model("pnp_vqa", "large", is_eval=True)
- >>> model = load_model("pnp_vqa", "3b", is_eval=True)
- """
-
- PRETRAINED_MODEL_CONFIG_DICT = {"base": "configs/models/pnp-vqa/pnp_vqa_base.yaml",
- "large": "configs/models/pnp-vqa/pnp_vqa_large.yaml",
- "3b": "configs/models/pnp-vqa/pnp_vqa_3b.yaml",
- }
-
- def __init__(self, image_question_matching_model, image_captioning_model,
- question_answering_model, offload_model=False):
- super().__init__()
-
- self.image_question_matching_model = image_question_matching_model
- self.image_captioning_model = image_captioning_model
- self.question_answering_model = question_answering_model
- self.offload_model = offload_model
-
- def forward_itm(self, samples, block_num=7):
- """
- Args:
- samples (dict): A dictionary containing the following keys:
- - image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- - text_input (list): A list of strings of length batch_size
- block_num (int): The index of cross-attention block for gradcam computation.
-
- Returns:
- samples (dict): A dictionary containing the following keys:
- - image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- - text_input (list): A list of strings of length batch_size
- - gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
- """
- image = samples['image']
- question = [text.strip('?') for text in samples['text_input']]
- tokenized_text = self.image_question_matching_model.tokenizer(question, padding='longest', truncation=True,
- return_tensors="pt").to(self.image_question_matching_model.device)
- with torch.set_grad_enabled(True):
- gradcams, _ = compute_gradcam(model=self.image_question_matching_model,
- visual_input=image,
- text_input=question,
- tokenized_text=tokenized_text,
- block_num=block_num)
-
- gradcams = [gradcam_[1] for gradcam_ in gradcams]
- samples['gradcams'] = torch.stack(gradcams).reshape(samples['image'].size(0), -1)
-
- return samples
-
- def forward_cap(
- self,
- samples,
- cap_max_length=20,
- cap_min_length=0,
- top_p=1,
- top_k=50,
- repetition_penalty=1.0,
- num_captions=100,
- num_patches=20,
- ):
- """
- Args:
- samples (dict): A dictionary containing the following keys:
- - image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- - text_input (list): A list of strings of length batch_size
- - gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
- cap_max_length (int): The maximum length of the caption to be generated.
- cap_min_length (int): The minimum length of the caption to be generated.
- top_p (float): The cumulative probability for nucleus sampling.
- top_k (float): The number of the highest probability tokens for top-k sampling.
- repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty.
- num_captions (int): Number of captions generated for each image.
- num_patches (int): Number of patches sampled for each image.
-
- Returns:
- samples (dict): A dictionary containing the following keys:
- - image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- - text_input (list): A list of strings of length batch_size
- - gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
- - captions (nested list): A nested list of strings of total length batch_size * num_captions
- """
- encoder_out = self.image_captioning_model.forward_encoder(samples)
- captions = [[] for _ in range(encoder_out.size(0))]
-
- min_num_captions = 0
-
- while min_num_captions < num_captions:
- encoder_out_samples = []
- for i in range(num_captions):
- patch_id = torch.multinomial(samples['gradcams'].to(self.image_captioning_model.device),
- num_patches).reshape(encoder_out.size(0), -1) + 1
- patch_id = patch_id.sort(dim=1).values.unsqueeze(-1).expand(-1, -1, encoder_out.size(2))
- encoder_out_sample = torch.gather(encoder_out, 1, patch_id)
- encoder_out_samples.append(encoder_out_sample)
-
- stacked = torch.stack(encoder_out_samples, dim=1)
- image_embeds = torch.flatten(stacked, start_dim=0, end_dim=1) #(bsz*num_seq, num_patch, dim)
-
- image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(self.image_captioning_model.device)
- model_kwargs = {
- "encoder_hidden_states": image_embeds,
- "encoder_attention_mask": image_atts,
- }
-
- prompt = [self.image_captioning_model.prompt] * image_embeds.size(0)
- prompt = self.image_captioning_model.tokenizer(prompt,
- return_tensors="pt").to(self.image_captioning_model.device)
- prompt.input_ids[:, 0] = self.image_captioning_model.tokenizer.bos_token_id
- prompt.input_ids = prompt.input_ids[:, :-1]
-
- decoder_out = self.image_captioning_model.text_decoder.generate(
- input_ids=prompt.input_ids,
- max_length=cap_max_length,
- min_length=cap_min_length,
- do_sample=True,
- top_p=top_p,
- top_k=top_k,
- num_return_sequences=1,
- eos_token_id=self.image_captioning_model.tokenizer.sep_token_id,
- pad_token_id=self.image_captioning_model.tokenizer.pad_token_id,
- repetition_penalty=repetition_penalty,
- **model_kwargs)
-
- outputs = self.image_captioning_model.tokenizer.batch_decode(decoder_out, skip_special_tokens=True)
-
- for counter, output in enumerate(outputs):
- ind = counter//num_captions
- if len(captions[ind]) < num_captions:
- caption = output[len(self.image_captioning_model.prompt):]
- overlap_caption = [1 for caps in captions[ind] if caption in caps]
- if len(overlap_caption) == 0:
- captions[ind].append(caption)
-
- min_num_captions = min([len(i) for i in captions])
-
- samples['captions'] = captions
-
- return samples
-
- def forward_qa(
- self,
- samples,
- num_beams=1,
- max_len=20,
- min_len=0,
- internal_bsz_fid=1,
- num_captions=100,
- num_captions_fid=1,
- ):
- """
- Args:
- samples (dict): A dictionary containing the following keys:
- - image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- - text_input (list): A list of strings of length batch_size
- - gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
- - captions (nested list): A nested list of strings of total length batch_size * num_captions
- - question_captions (nested list): A nested list of concatenated strings of questions and captions
- num_beams (int): Number of beams for beam search. 1 means no beam search.
- max_len (int): Maximum length of generated answers.
- min_len (int): Minimum length of generated answers.
- internal_bsz_fid (int): Internal batch size when using FiD decoding.
- num_captions (int): Number of captions generated for each image.
- num_captions_fid (int): Number of captions concatenated with a question during FiD decoding.
-
- Returns:
- List: A list of strings, each string is an answer.
- """
- prepare_qa_input(samples, num_captions=num_captions, num_captions_fid=num_captions_fid)
-
- pred_answers = []
- question_captions = samples['question_captions']
- question_captions_chunk = [question_captions[i:i + internal_bsz_fid]
- for i in range(0, len(question_captions), internal_bsz_fid)]
- question_captions_chunk = list(chain(*question_captions_chunk))
-
- for question_caption in question_captions_chunk:
- question_caption_input = self.question_answering_model.tokenizer(question_caption, padding='longest',
- truncation=True, return_tensors="pt").to(self.question_answering_model.device)
-
- question_caption_input.input_ids = question_caption_input.input_ids.reshape(
- internal_bsz_fid, -1, question_caption_input.input_ids.size(1))
- question_caption_input.attention_mask = question_caption_input.attention_mask.reshape(
- internal_bsz_fid, -1, question_caption_input.attention_mask.size(1))
-
- outputs = self.question_answering_model.generate(input_ids=question_caption_input.input_ids,
- attention_mask=question_caption_input.attention_mask,
- num_beams=num_beams,
- min_length=min_len,
- max_length=max_len,
- )
-
- for output in outputs:
- pred_answer = self.question_answering_model.tokenizer.decode(output, skip_special_tokens=True)
- pred_answers.append(pred_answer)
-
- return pred_answers
-
- def predict_answers(
- self,
- samples,
- num_beams=1,
- inference_method="generate",
- max_len=20,
- min_len=0,
- internal_bsz_fid=1,
- num_captions=50,
- num_captions_fid=1,
- cap_max_length=20,
- cap_min_length=10,
- top_k=50,
- top_p=1,
- repetition_penalty=1,
- num_patches=50,
- block_num=7,
- ):
- """
- Args:
- samples (dict): A dictionary containing the following keys:
- - image (torch.Tensor): A tensor of shape (batch_size, 3, H, W). Default H=480, W=480.
- - text_input (str or [str]): String or a list of strings, each string is a question.
- The number of questions must be equal to the batch size. If a single string, will be converted to a list of string, with length 1 first.
- num_beams (int): Number of beams for beam search. 1 means no beam search.
- inference_method (str): Inference method. Must be "generate". The model will generate answers.
- max_len (int): Maximum length of generated answers.
- min_len (int): Minimum length of generated answers.
- internal_bsz_fid (int): Internal batch size when using FiD decoding.
- num_captions (int): Number of captions generated for each image.
- num_captions_fid (int): Number of captions concatenated with a question during FiD decoding.
- cap_max_length (int): The maximum length of the caption to be generated.
- cap_min_length (int): The minimum length of the caption to be generated.
- top_k (float): The number of the highest probability tokens for top-k sampling.
- top_p (float): The cumulative probability for nucleus sampling.
- repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty.
- num_patches (int): Number of patches sampled for each image.
- block_num (int): The index of cross-attention block for gradcam computation.
-
- Returns:
- List: A list of strings, each string is an answer.
- gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
- captions (nested list): A nested list of strings of total length batch_size * num_captions
- """
- assert inference_method in [
- "generate",
- ], "Inference method must be 'generate', got {}.".format(
- inference_method
- )
-
- if isinstance(samples["text_input"], str):
- samples["text_input"] = [samples["text_input"]]
-
- assert len(samples["text_input"]) == samples["image"].size(
- 0
- ), "The number of questions must be equal to the batch size."
-
- samples = self.forward_itm(samples, block_num=block_num)
-
- samples = self.forward_cap(samples,
- cap_max_length=cap_max_length,
- cap_min_length=cap_min_length,
- top_k=top_k,
- top_p=top_p,
- repetition_penalty=repetition_penalty,
- num_captions=num_captions,
- num_patches=num_patches)
-
- if self.offload_model:
- samples['image'] = samples['image'].to('cpu')
- self.image_question_matching_model.to('cpu')
- self.image_captioning_model.to('cpu')
- torch.cuda.empty_cache()
-
- pred_answers = self.forward_qa(samples,
- num_beams=num_beams,
- max_len=max_len,
- min_len=min_len,
- internal_bsz_fid=internal_bsz_fid,
- num_captions=num_captions,
- num_captions_fid=num_captions_fid)
-
- if self.offload_model:
- self.image_question_matching_model.to(self.question_answering_model.device)
- self.image_captioning_model.to(self.question_answering_model.device)
-
- return pred_answers, samples['captions'], samples['gradcams']
-
- @classmethod
- def from_config(cls, model_config):
- itm_config = model_config.image_question_matching_model
- cap_config = model_config.image_captioning_model
- qa_config = model_config.question_answering_model
-
- itm_cls = registry.get_model_class(itm_config.arch)
- cap_cls = registry.get_model_class(cap_config.arch)
- qa_cls = registry.get_model_class(qa_config.arch)
-
- image_question_matching_model = itm_cls.from_config(itm_config)
- image_captioning_model = cap_cls.from_config(cap_config)
- question_answering_model = qa_cls.from_config(qa_config)
-
- model = cls(image_question_matching_model=image_question_matching_model,
- image_captioning_model=image_captioning_model,
- question_answering_model=question_answering_model,
- offload_model= True if model_config.model_type == '3b' else False,
- )
-
- return model
\ No newline at end of file
diff --git a/spaces/SeViLA/SeViLA/lavis/models/vit.py b/spaces/SeViLA/SeViLA/lavis/models/vit.py
deleted file mode 100644
index f35b7bb6886f8e4455330cf7c330a18e57f11db7..0000000000000000000000000000000000000000
--- a/spaces/SeViLA/SeViLA/lavis/models/vit.py
+++ /dev/null
@@ -1,527 +0,0 @@
-"""
- Copyright (c) 2022, salesforce.com, inc.
- All rights reserved.
- SPDX-License-Identifier: BSD-3-Clause
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-
- Based on timm code base
- https://github.com/rwightman/pytorch-image-models/tree/master/timm
-"""
-
-import math
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from functools import partial
-
-from timm.models.vision_transformer import _cfg, PatchEmbed
-from timm.models.registry import register_model
-from timm.models.layers import trunc_normal_, DropPath
-from timm.models.helpers import named_apply, adapt_input_conv
-
-from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
-from lavis.models.base_model import BaseEncoder
-
-
-class Mlp(nn.Module):
- """MLP as used in Vision Transformer, MLP-Mixer and related networks"""
-
- def __init__(
- self,
- in_features,
- hidden_features=None,
- out_features=None,
- act_layer=nn.GELU,
- drop=0.0,
- ):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-class Attention(nn.Module):
- def __init__(
- self,
- dim,
- num_heads=8,
- qkv_bias=False,
- qk_scale=None,
- attn_drop=0.0,
- proj_drop=0.0,
- ):
- super().__init__()
- self.num_heads = num_heads
- head_dim = dim // num_heads
- # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
- self.scale = qk_scale or head_dim**-0.5
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
- self.attn_gradients = None
- self.attention_map = None
-
- def save_attn_gradients(self, attn_gradients):
- self.attn_gradients = attn_gradients
-
- def get_attn_gradients(self):
- return self.attn_gradients
-
- def save_attention_map(self, attention_map):
- self.attention_map = attention_map
-
- def get_attention_map(self):
- return self.attention_map
-
- def forward(self, x, register_hook=False):
- B, N, C = x.shape
- qkv = (
- self.qkv(x)
- .reshape(B, N, 3, self.num_heads, C // self.num_heads)
- .permute(2, 0, 3, 1, 4)
- )
- q, k, v = (
- qkv[0],
- qkv[1],
- qkv[2],
- ) # make torchscript happy (cannot use tensor as tuple)
-
- attn = (q @ k.transpose(-2, -1)) * self.scale
- attn = attn.softmax(dim=-1)
- attn = self.attn_drop(attn)
-
- if register_hook:
- self.save_attention_map(attn)
- attn.register_hook(self.save_attn_gradients)
-
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
-
-
-class Block(nn.Module):
- def __init__(
- self,
- dim,
- num_heads,
- mlp_ratio=4.0,
- qkv_bias=False,
- qk_scale=None,
- drop=0.0,
- attn_drop=0.0,
- drop_path=0.0,
- act_layer=nn.GELU,
- norm_layer=nn.LayerNorm,
- use_grad_checkpointing=False,
- ):
- super().__init__()
- self.norm1 = norm_layer(dim)
- self.attn = Attention(
- dim,
- num_heads=num_heads,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- attn_drop=attn_drop,
- proj_drop=drop,
- )
- # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
- self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(
- in_features=dim,
- hidden_features=mlp_hidden_dim,
- act_layer=act_layer,
- drop=drop,
- )
-
- if use_grad_checkpointing:
- self.attn = checkpoint_wrapper(self.attn)
- self.mlp = checkpoint_wrapper(self.mlp)
-
- def forward(self, x, register_hook=False):
- x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook))
- x = x + self.drop_path(self.mlp(self.norm2(x)))
- return x
-
-
-class VisionTransformer(nn.Module):
- """Vision Transformer
- A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
- https://arxiv.org/abs/2010.11929
- """
-
- def __init__(
- self,
- img_size=224,
- patch_size=16,
- in_chans=3,
- num_classes=1000,
- embed_dim=768,
- depth=12,
- num_heads=12,
- mlp_ratio=4.0,
- qkv_bias=True,
- qk_scale=None,
- representation_size=None,
- drop_rate=0.0,
- attn_drop_rate=0.0,
- drop_path_rate=0.0,
- norm_layer=None,
- use_grad_checkpointing=False,
- ckpt_layer=0,
- ):
- """
- Args:
- img_size (int, tuple): input image size
- patch_size (int, tuple): patch size
- in_chans (int): number of input channels
- num_classes (int): number of classes for classification head
- embed_dim (int): embedding dimension
- depth (int): depth of transformer
- num_heads (int): number of attention heads
- mlp_ratio (int): ratio of mlp hidden dim to embedding dim
- qkv_bias (bool): enable bias for qkv if True
- qk_scale (float): override default qk scale of head_dim ** -0.5 if set
- representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
- drop_rate (float): dropout rate
- attn_drop_rate (float): attention dropout rate
- drop_path_rate (float): stochastic depth rate
- norm_layer: (nn.Module): normalization layer
- """
- super().__init__()
- self.num_features = (
- self.embed_dim
- ) = embed_dim # num_features for consistency with other models
- norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
-
- self.patch_embed = PatchEmbed(
- img_size=img_size,
- patch_size=patch_size,
- in_chans=in_chans,
- embed_dim=embed_dim,
- )
-
- num_patches = self.patch_embed.num_patches
-
- self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
- self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
- self.pos_drop = nn.Dropout(p=drop_rate)
-
- dpr = [
- x.item() for x in torch.linspace(0, drop_path_rate, depth)
- ] # stochastic depth decay rule
- self.blocks = nn.ModuleList(
- [
- Block(
- dim=embed_dim,
- num_heads=num_heads,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop_rate,
- attn_drop=attn_drop_rate,
- drop_path=dpr[i],
- norm_layer=norm_layer,
- use_grad_checkpointing=(
- use_grad_checkpointing and i >= depth - ckpt_layer
- ),
- )
- for i in range(depth)
- ]
- )
- self.norm = norm_layer(embed_dim)
-
- trunc_normal_(self.pos_embed, std=0.02)
- trunc_normal_(self.cls_token, std=0.02)
- self.apply(self._init_weights)
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=0.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
-
- @torch.jit.ignore
- def no_weight_decay(self):
- return {"pos_embed", "cls_token"}
-
- def forward(self, x, register_blk=-1):
- B = x.shape[0]
- x = self.patch_embed(x)
-
- cls_tokens = self.cls_token.expand(
- B, -1, -1
- ) # stole cls_tokens impl from Phil Wang, thanks
- x = torch.cat((cls_tokens, x), dim=1)
-
- x = x + self.pos_embed[:, : x.size(1), :]
- x = self.pos_drop(x)
-
- for i, blk in enumerate(self.blocks):
- x = blk(x, register_blk == i)
- x = self.norm(x)
-
- return x
-
- @torch.jit.ignore()
- def load_pretrained(self, checkpoint_path, prefix=""):
- _load_weights(self, checkpoint_path, prefix)
-
-
-@torch.no_grad()
-def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ""):
- """Load weights from .npz checkpoints for official Google Brain Flax implementation"""
- import numpy as np
-
- def _n2p(w, t=True):
- if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
- w = w.flatten()
- if t:
- if w.ndim == 4:
- w = w.transpose([3, 2, 0, 1])
- elif w.ndim == 3:
- w = w.transpose([2, 0, 1])
- elif w.ndim == 2:
- w = w.transpose([1, 0])
- return torch.from_numpy(w)
-
- w = np.load(checkpoint_path)
- if not prefix and "opt/target/embedding/kernel" in w:
- prefix = "opt/target/"
-
- if hasattr(model.patch_embed, "backbone"):
- # hybrid
- backbone = model.patch_embed.backbone
- stem_only = not hasattr(backbone, "stem")
- stem = backbone if stem_only else backbone.stem
- stem.conv.weight.copy_(
- adapt_input_conv(
- stem.conv.weight.shape[1], _n2p(w[f"{prefix}conv_root/kernel"])
- )
- )
- stem.norm.weight.copy_(_n2p(w[f"{prefix}gn_root/scale"]))
- stem.norm.bias.copy_(_n2p(w[f"{prefix}gn_root/bias"]))
- if not stem_only:
- for i, stage in enumerate(backbone.stages):
- for j, block in enumerate(stage.blocks):
- bp = f"{prefix}block{i + 1}/unit{j + 1}/"
- for r in range(3):
- getattr(block, f"conv{r + 1}").weight.copy_(
- _n2p(w[f"{bp}conv{r + 1}/kernel"])
- )
- getattr(block, f"norm{r + 1}").weight.copy_(
- _n2p(w[f"{bp}gn{r + 1}/scale"])
- )
- getattr(block, f"norm{r + 1}").bias.copy_(
- _n2p(w[f"{bp}gn{r + 1}/bias"])
- )
- if block.downsample is not None:
- block.downsample.conv.weight.copy_(
- _n2p(w[f"{bp}conv_proj/kernel"])
- )
- block.downsample.norm.weight.copy_(
- _n2p(w[f"{bp}gn_proj/scale"])
- )
- block.downsample.norm.bias.copy_(_n2p(w[f"{bp}gn_proj/bias"]))
- embed_conv_w = _n2p(w[f"{prefix}embedding/kernel"])
- else:
- embed_conv_w = adapt_input_conv(
- model.patch_embed.proj.weight.shape[1], _n2p(w[f"{prefix}embedding/kernel"])
- )
- model.patch_embed.proj.weight.copy_(embed_conv_w)
- model.patch_embed.proj.bias.copy_(_n2p(w[f"{prefix}embedding/bias"]))
- model.cls_token.copy_(_n2p(w[f"{prefix}cls"], t=False))
- pos_embed_w = _n2p(w[f"{prefix}Transformer/posembed_input/pos_embedding"], t=False)
- if pos_embed_w.shape != model.pos_embed.shape:
- pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights
- pos_embed_w,
- model.pos_embed,
- getattr(model, "num_tokens", 1),
- model.patch_embed.grid_size,
- )
- model.pos_embed.copy_(pos_embed_w)
- model.norm.weight.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/scale"]))
- model.norm.bias.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/bias"]))
- # if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:
- # model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))
- # model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))
- # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
- # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
- # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
- for i, block in enumerate(model.blocks.children()):
- block_prefix = f"{prefix}Transformer/encoderblock_{i}/"
- mha_prefix = block_prefix + "MultiHeadDotProductAttention_1/"
- block.norm1.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/scale"]))
- block.norm1.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/bias"]))
- block.attn.qkv.weight.copy_(
- torch.cat(
- [
- _n2p(w[f"{mha_prefix}{n}/kernel"], t=False).flatten(1).T
- for n in ("query", "key", "value")
- ]
- )
- )
- block.attn.qkv.bias.copy_(
- torch.cat(
- [
- _n2p(w[f"{mha_prefix}{n}/bias"], t=False).reshape(-1)
- for n in ("query", "key", "value")
- ]
- )
- )
- block.attn.proj.weight.copy_(_n2p(w[f"{mha_prefix}out/kernel"]).flatten(1))
- block.attn.proj.bias.copy_(_n2p(w[f"{mha_prefix}out/bias"]))
- for r in range(2):
- getattr(block.mlp, f"fc{r + 1}").weight.copy_(
- _n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/kernel"])
- )
- getattr(block.mlp, f"fc{r + 1}").bias.copy_(
- _n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/bias"])
- )
- block.norm2.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/scale"]))
- block.norm2.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/bias"]))
-
-
-def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):
- # Rescale the grid of position embeddings when loading from state_dict. Adapted from
- # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
- print("Resized position embedding: %s to %s", posemb.shape, posemb_new.shape)
- ntok_new = posemb_new.shape[1]
- if num_tokens:
- posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
- ntok_new -= num_tokens
- else:
- posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
- gs_old = int(math.sqrt(len(posemb_grid)))
- if not len(gs_new): # backwards compatibility
- gs_new = [int(math.sqrt(ntok_new))] * 2
- assert len(gs_new) >= 2
- print("Position embedding grid-size from %s to %s", [gs_old, gs_old], gs_new)
- posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
- posemb_grid = F.interpolate(
- posemb_grid, size=gs_new, mode="bicubic", align_corners=False
- )
- posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
- posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
- return
-
-
-def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
- # interpolate position embedding
- embedding_size = pos_embed_checkpoint.shape[-1]
- num_patches = visual_encoder.patch_embed.num_patches
- num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
- # height (== width) for the checkpoint position embedding
- orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
- # height (== width) for the new position embedding
- new_size = int(num_patches**0.5)
-
- if orig_size != new_size:
- # class_token and dist_token are kept unchanged
- extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
- # only the position tokens are interpolated
- pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
- pos_tokens = pos_tokens.reshape(
- -1, orig_size, orig_size, embedding_size
- ).permute(0, 3, 1, 2)
- pos_tokens = torch.nn.functional.interpolate(
- pos_tokens, size=(new_size, new_size), mode="bicubic", align_corners=False
- )
- pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
- new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
- print(
- "reshape position embedding from %d to %d" % (orig_size**2, new_size**2)
- )
-
- return new_pos_embed
- else:
- return pos_embed_checkpoint
-
-
-class VisionTransformerEncoder(VisionTransformer, BaseEncoder):
- @classmethod
- def from_config(cls, cfg, from_pretrained=False):
-
- vit_type = cfg.get("vit_type", "base")
- image_size = cfg.get("image_size", 384)
- ckpt_layer = cfg.get("vit_ckpt_layer", 0)
- drop_path_rate = cfg.get("vit_drop_path_rate", 0)
- norm_layer_eps = cfg.get("vit_layer_norm_epsilon", -1)
- use_grad_checkpointing = cfg.get("vit_grad_ckpt", False)
-
- if norm_layer_eps == -1:
- norm_layer = None
- else:
- norm_layer = partial(nn.LayerNorm, eps=norm_layer_eps)
-
- # norm_layer=partial(nn.LayerNorm, eps=1e-6),
- assert vit_type in ["base", "large"], "vit parameter must be base or large"
- if vit_type == "base":
- vision_width = 768
- visual_encoder = cls(
- img_size=image_size,
- patch_size=16,
- embed_dim=vision_width,
- depth=12,
- num_heads=12,
- use_grad_checkpointing=use_grad_checkpointing,
- ckpt_layer=ckpt_layer,
- drop_path_rate=0 or drop_path_rate,
- norm_layer=norm_layer,
- )
-
- if from_pretrained:
- checkpoint = torch.hub.load_state_dict_from_url(
- url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
- map_location="cpu",
- check_hash=True,
- )
- state_dict = checkpoint["model"]
- state_dict["pos_embed"] = interpolate_pos_embed(
- state_dict["pos_embed"], visual_encoder
- )
- msg = visual_encoder.load_state_dict(state_dict, strict=False)
-
- elif vit_type == "large":
- vision_width = 1024
- visual_encoder = cls(
- img_size=image_size,
- patch_size=16,
- embed_dim=vision_width,
- depth=24,
- num_heads=16,
- use_grad_checkpointing=use_grad_checkpointing,
- ckpt_layer=ckpt_layer,
- drop_path_rate=0.1 or drop_path_rate,
- norm_layer=norm_layer,
- )
- if from_pretrained:
- from timm.models.helpers import load_custom_pretrained
- from timm.models.vision_transformer import default_cfgs
-
- load_custom_pretrained(
- visual_encoder, default_cfgs["vit_large_patch16_224_in21k"]
- )
-
- visual_encoder.vision_width = vision_width
- return visual_encoder
-
- def forward_features(self, x, register_blk=-1):
- return super().forward(x, register_blk)
diff --git a/spaces/ShilongLiu/Grounding_DINO_demo/groundingdino/util/get_tokenlizer.py b/spaces/ShilongLiu/Grounding_DINO_demo/groundingdino/util/get_tokenlizer.py
deleted file mode 100644
index f7dcf7e95f03f95b20546b26442a94225924618b..0000000000000000000000000000000000000000
--- a/spaces/ShilongLiu/Grounding_DINO_demo/groundingdino/util/get_tokenlizer.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast
-
-
-def get_tokenlizer(text_encoder_type):
- if not isinstance(text_encoder_type, str):
- # print("text_encoder_type is not a str")
- if hasattr(text_encoder_type, "text_encoder_type"):
- text_encoder_type = text_encoder_type.text_encoder_type
- elif text_encoder_type.get("text_encoder_type", False):
- text_encoder_type = text_encoder_type.get("text_encoder_type")
- else:
- raise ValueError(
- "Unknown type of text_encoder_type: {}".format(type(text_encoder_type))
- )
- print("final text_encoder_type: {}".format(text_encoder_type))
-
- tokenizer = AutoTokenizer.from_pretrained(text_encoder_type)
- return tokenizer
-
-
-def get_pretrained_language_model(text_encoder_type):
- if text_encoder_type == "bert-base-uncased":
- return BertModel.from_pretrained(text_encoder_type)
- if text_encoder_type == "roberta-base":
- return RobertaModel.from_pretrained(text_encoder_type)
- raise ValueError("Unknown text_encoder_type {}".format(text_encoder_type))
diff --git a/spaces/Silentlin/DiffSinger/data_gen/tts/bin/binarize.py b/spaces/Silentlin/DiffSinger/data_gen/tts/bin/binarize.py
deleted file mode 100644
index 4bd3c1f69fa59ed52fdd32eb80e746dedbae7535..0000000000000000000000000000000000000000
--- a/spaces/Silentlin/DiffSinger/data_gen/tts/bin/binarize.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import os
-
-os.environ["OMP_NUM_THREADS"] = "1"
-
-import importlib
-from utils.hparams import set_hparams, hparams
-
-
-def binarize():
- binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizer.BaseBinarizer')
- pkg = ".".join(binarizer_cls.split(".")[:-1])
- cls_name = binarizer_cls.split(".")[-1]
- binarizer_cls = getattr(importlib.import_module(pkg), cls_name)
- print("| Binarizer: ", binarizer_cls)
- binarizer_cls().process()
-
-
-if __name__ == '__main__':
- set_hparams()
- binarize()
diff --git a/spaces/Souranil/VAE/inference.py b/spaces/Souranil/VAE/inference.py
deleted file mode 100644
index 4e78ce2e6f929dd306c60f99a42c07ff2fc88d34..0000000000000000000000000000000000000000
--- a/spaces/Souranil/VAE/inference.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from models import vae_models
-from config import config
-from PIL import Image
-from torchvision.transforms import Resize, ToPILImage, Compose
-
-from utils import load_model, tensor_to_img, resize_img, export_to_onnx
-
-
-
-def predict(model_ckpt="vae_alpha_1024_dim_128.ckpt"):
- model_type = config.model_type
- model = vae_models[model_type].load_from_checkpoint(f"./saved_models/{model_ckpt}")
- model.eval()
- test_iter = iter(model.test_dataloader())
- d, _ = next(test_iter)
- _, _, out = model(d)
- out_img = tensor_to_img(out)
- return out_img
-
-
-
-if __name__ == "__main__":
- predict()
- # export_to_onnx("./saved_models/vae.ckpt")
\ No newline at end of file
diff --git a/spaces/StarCore/PaddleOCR/pp_ocr.py b/spaces/StarCore/PaddleOCR/pp_ocr.py
deleted file mode 100644
index 943fc721c2b3979a4820bb08b275e894f8774d89..0000000000000000000000000000000000000000
--- a/spaces/StarCore/PaddleOCR/pp_ocr.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import tempfile
-import os
-
-import paddlehub as hub
-from PIL import Image
-
-pp_ocrv3 = hub.Module(name="ch_pp-ocrv3")
-
-def inference_img(img):
- with tempfile.TemporaryDirectory() as tempdir_name:
- pp_ocrv3.recognize_text(images=[img], use_gpu=False, output_dir=tempdir_name, visualization=True)
- result_names = os.listdir(tempdir_name)
- result_image = Image.open(os.path.join(tempdir_name, result_names[0]))
- return result_image
-
-def inference_json(img):
- results = pp_ocrv3.recognize_text(images=[img], use_gpu=False, visualization=False)
- return results
\ No newline at end of file
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/tests/test_displayhook.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/tests/test_displayhook.py
deleted file mode 100644
index 22899f3dd02658d51aad6d080376042c199fbc2b..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/tests/test_displayhook.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import sys
-from IPython.testing.tools import AssertPrints, AssertNotPrints
-from IPython.core.displayhook import CapturingDisplayHook
-from IPython.utils.capture import CapturedIO
-
-def test_output_displayed():
- """Checking to make sure that output is displayed"""
-
- with AssertPrints('2'):
- ip.run_cell('1+1', store_history=True)
-
- with AssertPrints('2'):
- ip.run_cell('1+1 # comment with a semicolon;', store_history=True)
-
- with AssertPrints('2'):
- ip.run_cell('1+1\n#commented_out_function();', store_history=True)
-
-
-def test_output_quiet():
- """Checking to make sure that output is quiet"""
-
- with AssertNotPrints('2'):
- ip.run_cell('1+1;', store_history=True)
-
- with AssertNotPrints('2'):
- ip.run_cell('1+1; # comment with a semicolon', store_history=True)
-
- with AssertNotPrints('2'):
- ip.run_cell('1+1;\n#commented_out_function()', store_history=True)
-
-def test_underscore_no_overwrite_user():
- ip.run_cell('_ = 42', store_history=True)
- ip.run_cell('1+1', store_history=True)
-
- with AssertPrints('42'):
- ip.run_cell('print(_)', store_history=True)
-
- ip.run_cell('del _', store_history=True)
- ip.run_cell('6+6', store_history=True)
- with AssertPrints('12'):
- ip.run_cell('_', store_history=True)
-
-
-def test_underscore_no_overwrite_builtins():
- ip.run_cell("import gettext ; gettext.install('foo')", store_history=True)
- ip.run_cell('3+3', store_history=True)
-
- with AssertPrints('gettext'):
- ip.run_cell('print(_)', store_history=True)
-
- ip.run_cell('_ = "userset"', store_history=True)
-
- with AssertPrints('userset'):
- ip.run_cell('print(_)', store_history=True)
- ip.run_cell('import builtins; del builtins._')
-
-
-def test_interactivehooks_ast_modes():
- """
- Test that ast nodes can be triggered with different modes
- """
- saved_mode = ip.ast_node_interactivity
- ip.ast_node_interactivity = 'last_expr_or_assign'
-
- try:
- with AssertPrints('2'):
- ip.run_cell('a = 1+1', store_history=True)
-
- with AssertPrints('9'):
- ip.run_cell('b = 1+8 # comment with a semicolon;', store_history=False)
-
- with AssertPrints('7'):
- ip.run_cell('c = 1+6\n#commented_out_function();', store_history=True)
-
- ip.run_cell('d = 11', store_history=True)
- with AssertPrints('12'):
- ip.run_cell('d += 1', store_history=True)
-
- with AssertNotPrints('42'):
- ip.run_cell('(u,v) = (41+1, 43-1)')
-
- finally:
- ip.ast_node_interactivity = saved_mode
-
-def test_interactivehooks_ast_modes_semi_suppress():
- """
- Test that ast nodes can be triggered with different modes and suppressed
- by semicolon
- """
- saved_mode = ip.ast_node_interactivity
- ip.ast_node_interactivity = 'last_expr_or_assign'
-
- try:
- with AssertNotPrints('2'):
- ip.run_cell('x = 1+1;', store_history=True)
-
- with AssertNotPrints('7'):
- ip.run_cell('y = 1+6; # comment with a semicolon', store_history=True)
-
- with AssertNotPrints('9'):
- ip.run_cell('z = 1+8;\n#commented_out_function()', store_history=True)
-
- finally:
- ip.ast_node_interactivity = saved_mode
-
-def test_capture_display_hook_format():
- """Tests that the capture display hook conforms to the CapturedIO output format"""
- hook = CapturingDisplayHook(ip)
- hook({"foo": "bar"})
- captured = CapturedIO(sys.stdout, sys.stderr, hook.outputs)
- # Should not raise with RichOutput transformation error
- captured.outputs
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/api/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/api/__init__.py
deleted file mode 100644
index 2472e14228ca5d5aeb69c707f127e3a3d9febe13..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/api/__init__.py
+++ /dev/null
@@ -1,342 +0,0 @@
-from abc import ABC, abstractmethod
-from typing import Sequence, Optional
-import pandas as pd
-from uuid import UUID
-from chromadb.api.models.Collection import Collection
-from chromadb.api.types import (
- CollectionMetadata,
- Documents,
- EmbeddingFunction,
- Embeddings,
- IDs,
- Include,
- Metadatas,
- Where,
- QueryResult,
- GetResult,
- WhereDocument,
-)
-from chromadb.config import Component
-import chromadb.utils.embedding_functions as ef
-from overrides import override
-
-
-class API(Component, ABC):
- @abstractmethod
- def heartbeat(self) -> int:
- """Returns the current server time in nanoseconds to check if the server is alive
-
- Args:
- None
-
- Returns:
- int: The current server time in nanoseconds
-
- """
- pass
-
- @abstractmethod
- def list_collections(self) -> Sequence[Collection]:
- """Returns all collections in the database
-
- Args:
- None
-
- Returns:
- dict: A dictionary of collections
-
- """
- pass
-
- @abstractmethod
- def create_collection(
- self,
- name: str,
- metadata: Optional[CollectionMetadata] = None,
- embedding_function: Optional[EmbeddingFunction] = ef.DefaultEmbeddingFunction(),
- get_or_create: bool = False,
- ) -> Collection:
- """Creates a new collection in the database
-
- Args:
- name The name of the collection to create. The name must be unique.
- metadata: A dictionary of metadata to associate with the collection. Defaults to None.
- embedding_function: A function that takes documents and returns an embedding. Defaults to None.
- get_or_create: If True, will return the collection if it already exists,
- and update the metadata (if applicable). Defaults to False.
-
- Returns:
- dict: the created collection
-
- """
- pass
-
- @abstractmethod
- def delete_collection(
- self,
- name: str,
- ) -> None:
- """Deletes a collection from the database
-
- Args:
- name: The name of the collection to delete
- """
-
- @abstractmethod
- def get_or_create_collection(
- self,
- name: str,
- metadata: Optional[CollectionMetadata] = None,
- embedding_function: Optional[EmbeddingFunction] = ef.DefaultEmbeddingFunction(),
- ) -> Collection:
- """Calls create_collection with get_or_create=True.
- If the collection exists, but with different metadata, the metadata will be replaced.
-
- Args:
- name: The name of the collection to create. The name must be unique.
- metadata: A dictionary of metadata to associate with the collection. Defaults to None.
- embedding_function: A function that takes documents and returns an embedding. Should be the same as the one used to create the collection. Defaults to None.
- Returns:
- the created collection
-
- """
- pass
-
- @abstractmethod
- def get_collection(
- self,
- name: str,
- embedding_function: Optional[EmbeddingFunction] = ef.DefaultEmbeddingFunction(),
- ) -> Collection:
- """Gets a collection from the database by either name or uuid
-
- Args:
- name: The name of the collection to get. Defaults to None.
- embedding_function: A function that takes documents and returns an embedding. Should be the same as the one used to create the collection. Defaults to None.
-
- Returns:
- dict: the requested collection
-
- """
- pass
-
- def _modify(
- self,
- id: UUID,
- new_name: Optional[str] = None,
- new_metadata: Optional[CollectionMetadata] = None,
- ) -> None:
- """Modify a collection in the database - can update the name and/or metadata
-
- Args:
- current_name: The name of the collection to modify
- new_name: The new name of the collection. Defaults to None.
- new_metadata: The new metadata to associate with the collection. Defaults to None.
- """
- pass
-
- @abstractmethod
- def _add(
- self,
- ids: IDs,
- collection_id: UUID,
- embeddings: Embeddings,
- metadatas: Optional[Metadatas] = None,
- documents: Optional[Documents] = None,
- increment_index: bool = True,
- ) -> bool:
- """Add embeddings to the data store. This is the most general way to add embeddings to the database.
- ⚠️ It is recommended to use the more specific methods below when possible.
-
- Args:
- collection_id: The collection to add the embeddings to
- embedding: The sequence of embeddings to add
- metadata: The metadata to associate with the embeddings. Defaults to None.
- documents: The documents to associate with the embeddings. Defaults to None.
- ids: The ids to associate with the embeddings. Defaults to None.
- """
- pass
-
- @abstractmethod
- def _update(
- self,
- collection_id: UUID,
- ids: IDs,
- embeddings: Optional[Embeddings] = None,
- metadatas: Optional[Metadatas] = None,
- documents: Optional[Documents] = None,
- ) -> bool:
- """Add embeddings to the data store. This is the most general way to add embeddings to the database.
- ⚠️ It is recommended to use the more specific methods below when possible.
-
- Args:
- collection_id: The collection to add the embeddings to
- embedding: The sequence of embeddings to add
- """
- pass
-
- @abstractmethod
- def _upsert(
- self,
- collection_id: UUID,
- ids: IDs,
- embeddings: Embeddings,
- metadatas: Optional[Metadatas] = None,
- documents: Optional[Documents] = None,
- increment_index: bool = True,
- ) -> bool:
- """Add or update entries in the embedding store.
- If an entry with the same id already exists, it will be updated, otherwise it will be added.
-
- Args:
- collection_id: The collection to add the embeddings to
- ids: The ids to associate with the embeddings. Defaults to None.
- embeddings: The sequence of embeddings to add
- metadatas: The metadata to associate with the embeddings. Defaults to None.
- documents: The documents to associate with the embeddings. Defaults to None.
- increment_index: If True, will incrementally add to the ANN index of the collection. Defaults to True.
- """
- pass
-
- @abstractmethod
- def _count(self, collection_id: UUID) -> int:
- """Returns the number of embeddings in the database
-
- Args:
- collection_id: The collection to count the embeddings in.
-
-
- Returns:
- int: The number of embeddings in the collection
-
- """
- pass
-
- @abstractmethod
- def _peek(self, collection_id: UUID, n: int = 10) -> GetResult:
- pass
-
- @abstractmethod
- def _get(
- self,
- collection_id: UUID,
- ids: Optional[IDs] = None,
- where: Optional[Where] = {},
- sort: Optional[str] = None,
- limit: Optional[int] = None,
- offset: Optional[int] = None,
- page: Optional[int] = None,
- page_size: Optional[int] = None,
- where_document: Optional[WhereDocument] = {},
- include: Include = ["embeddings", "metadatas", "documents"],
- ) -> GetResult:
- """Gets embeddings from the database. Supports filtering, sorting, and pagination.
- ⚠️ This method should not be used directly.
-
- Args:
- where: A dictionary of key-value pairs to filter the embeddings by. Defaults to {}.
- sort: The column to sort the embeddings by. Defaults to None.
- limit: The maximum number of embeddings to return. Defaults to None.
- offset: The number of embeddings to skip before returning. Defaults to None.
- page: The page number to return. Defaults to None.
- page_size: The number of embeddings to return per page. Defaults to None.
-
- Returns:
- pd.DataFrame: A pandas dataframe containing the embeddings and metadata
-
- """
- pass
-
- @abstractmethod
- def _delete(
- self,
- collection_id: UUID,
- ids: Optional[IDs],
- where: Optional[Where] = {},
- where_document: Optional[WhereDocument] = {},
- ) -> IDs:
- """Deletes embeddings from the database
- ⚠️ This method should not be used directly.
-
- Args:
- where: A dictionary of key-value pairs to filter the embeddings by. Defaults to {}.
-
- Returns:
- List: The list of internal UUIDs of the deleted embeddings
- """
- pass
-
- @abstractmethod
- def _query(
- self,
- collection_id: UUID,
- query_embeddings: Embeddings,
- n_results: int = 10,
- where: Where = {},
- where_document: WhereDocument = {},
- include: Include = ["embeddings", "metadatas", "documents", "distances"],
- ) -> QueryResult:
- """Gets the nearest neighbors of a single embedding
- ⚠️ This method should not be used directly.
-
- Args:
- embedding: The embedding to find the nearest neighbors of
- n_results: The number of nearest neighbors to return. Defaults to 10.
- where: A dictionary of key-value pairs to filter the embeddings by. Defaults to {}.
- """
- pass
-
- @override
- @abstractmethod
- def reset(self) -> None:
- """Resets the database
- ⚠️ This is destructive and will delete all data in the database.
- Args:
- None
-
- Returns:
- None
- """
- pass
-
- @abstractmethod
- def raw_sql(self, sql: str) -> pd.DataFrame:
- """Runs a raw SQL query against the database
- ⚠️ This method should not be used directly.
-
- Args:
- sql: The SQL query to run
-
- Returns:
- pd.DataFrame: A pandas dataframe containing the results of the query
- """
- pass
-
- @abstractmethod
- def create_index(self, collection_name: str) -> bool:
- """Creates an index for the given collection
- ⚠️ This method should not be used directly.
-
- Args:
- collection_name: The collection to create the index for. Uses the client's collection if None. Defaults to None.
-
- Returns:
- bool: True if the index was created successfully
-
- """
- pass
-
- @abstractmethod
- def persist(self) -> bool:
- """Persist the database to disk"""
- pass
-
- @abstractmethod
- def get_version(self) -> str:
- """Get the version of Chroma.
-
- Returns:
- str: The version of Chroma
-
- """
- pass
diff --git a/spaces/Superlang/ImageProcessor/annotator/manga_line/__init__.py b/spaces/Superlang/ImageProcessor/annotator/manga_line/__init__.py
deleted file mode 100644
index 7750765f60a2e91d89da9d439757d4919d1879dc..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/manga_line/__init__.py
+++ /dev/null
@@ -1,247 +0,0 @@
-import os
-import torch
-import torch.nn as nn
-from PIL import Image
-import fnmatch
-import cv2
-
-import sys
-
-import numpy as np
-from einops import rearrange
-from annotator.base_annotator import BaseProcessor
-
-
-class _bn_relu_conv(nn.Module):
- def __init__(self, in_filters, nb_filters, fw, fh, subsample=1):
- super(_bn_relu_conv, self).__init__()
- self.model = nn.Sequential(
- nn.BatchNorm2d(in_filters, eps=1e-3),
- nn.LeakyReLU(0.2),
- nn.Conv2d(in_filters, nb_filters, (fw, fh), stride=subsample, padding=(fw//2, fh//2), padding_mode='zeros')
- )
-
- def forward(self, x):
- return self.model(x)
-
- # the following are for debugs
- print("****", np.max(x.cpu().numpy()), np.min(x.cpu().numpy()), np.mean(x.cpu().numpy()), np.std(x.cpu().numpy()), x.shape)
- for i,layer in enumerate(self.model):
- if i != 2:
- x = layer(x)
- else:
- x = layer(x)
- #x = nn.functional.pad(x, (1, 1, 1, 1), mode='constant', value=0)
- print("____", np.max(x.cpu().numpy()), np.min(x.cpu().numpy()), np.mean(x.cpu().numpy()), np.std(x.cpu().numpy()), x.shape)
- print(x[0])
- return x
-
-class _u_bn_relu_conv(nn.Module):
- def __init__(self, in_filters, nb_filters, fw, fh, subsample=1):
- super(_u_bn_relu_conv, self).__init__()
- self.model = nn.Sequential(
- nn.BatchNorm2d(in_filters, eps=1e-3),
- nn.LeakyReLU(0.2),
- nn.Conv2d(in_filters, nb_filters, (fw, fh), stride=subsample, padding=(fw//2, fh//2)),
- nn.Upsample(scale_factor=2, mode='nearest')
- )
-
- def forward(self, x):
- return self.model(x)
-
-
-
-class _shortcut(nn.Module):
- def __init__(self, in_filters, nb_filters, subsample=1):
- super(_shortcut, self).__init__()
- self.process = False
- self.model = None
- if in_filters != nb_filters or subsample != 1:
- self.process = True
- self.model = nn.Sequential(
- nn.Conv2d(in_filters, nb_filters, (1, 1), stride=subsample)
- )
-
- def forward(self, x, y):
- #print(x.size(), y.size(), self.process)
- if self.process:
- y0 = self.model(x)
- #print("merge+", torch.max(y0+y), torch.min(y0+y),torch.mean(y0+y), torch.std(y0+y), y0.shape)
- return y0 + y
- else:
- #print("merge", torch.max(x+y), torch.min(x+y),torch.mean(x+y), torch.std(x+y), y.shape)
- return x + y
-
-class _u_shortcut(nn.Module):
- def __init__(self, in_filters, nb_filters, subsample):
- super(_u_shortcut, self).__init__()
- self.process = False
- self.model = None
- if in_filters != nb_filters:
- self.process = True
- self.model = nn.Sequential(
- nn.Conv2d(in_filters, nb_filters, (1, 1), stride=subsample, padding_mode='zeros'),
- nn.Upsample(scale_factor=2, mode='nearest')
- )
-
- def forward(self, x, y):
- if self.process:
- return self.model(x) + y
- else:
- return x + y
-
-
-class basic_block(nn.Module):
- def __init__(self, in_filters, nb_filters, init_subsample=1):
- super(basic_block, self).__init__()
- self.conv1 = _bn_relu_conv(in_filters, nb_filters, 3, 3, subsample=init_subsample)
- self.residual = _bn_relu_conv(nb_filters, nb_filters, 3, 3)
- self.shortcut = _shortcut(in_filters, nb_filters, subsample=init_subsample)
-
- def forward(self, x):
- x1 = self.conv1(x)
- x2 = self.residual(x1)
- return self.shortcut(x, x2)
-
-class _u_basic_block(nn.Module):
- def __init__(self, in_filters, nb_filters, init_subsample=1):
- super(_u_basic_block, self).__init__()
- self.conv1 = _u_bn_relu_conv(in_filters, nb_filters, 3, 3, subsample=init_subsample)
- self.residual = _bn_relu_conv(nb_filters, nb_filters, 3, 3)
- self.shortcut = _u_shortcut(in_filters, nb_filters, subsample=init_subsample)
-
- def forward(self, x):
- y = self.residual(self.conv1(x))
- return self.shortcut(x, y)
-
-
-class _residual_block(nn.Module):
- def __init__(self, in_filters, nb_filters, repetitions, is_first_layer=False):
- super(_residual_block, self).__init__()
- layers = []
- for i in range(repetitions):
- init_subsample = 1
- if i == repetitions - 1 and not is_first_layer:
- init_subsample = 2
- if i == 0:
- l = basic_block(in_filters=in_filters, nb_filters=nb_filters, init_subsample=init_subsample)
- else:
- l = basic_block(in_filters=nb_filters, nb_filters=nb_filters, init_subsample=init_subsample)
- layers.append(l)
-
- self.model = nn.Sequential(*layers)
-
- def forward(self, x):
- return self.model(x)
-
-
-class _upsampling_residual_block(nn.Module):
- def __init__(self, in_filters, nb_filters, repetitions):
- super(_upsampling_residual_block, self).__init__()
- layers = []
- for i in range(repetitions):
- l = None
- if i == 0:
- l = _u_basic_block(in_filters=in_filters, nb_filters=nb_filters)#(input)
- else:
- l = basic_block(in_filters=nb_filters, nb_filters=nb_filters)#(input)
- layers.append(l)
-
- self.model = nn.Sequential(*layers)
-
- def forward(self, x):
- return self.model(x)
-
-
-class res_skip(nn.Module):
-
- def __init__(self):
- super(res_skip, self).__init__()
- self.block0 = _residual_block(in_filters=1, nb_filters=24, repetitions=2, is_first_layer=True)#(input)
- self.block1 = _residual_block(in_filters=24, nb_filters=48, repetitions=3)#(block0)
- self.block2 = _residual_block(in_filters=48, nb_filters=96, repetitions=5)#(block1)
- self.block3 = _residual_block(in_filters=96, nb_filters=192, repetitions=7)#(block2)
- self.block4 = _residual_block(in_filters=192, nb_filters=384, repetitions=12)#(block3)
-
- self.block5 = _upsampling_residual_block(in_filters=384, nb_filters=192, repetitions=7)#(block4)
- self.res1 = _shortcut(in_filters=192, nb_filters=192)#(block3, block5, subsample=(1,1))
-
- self.block6 = _upsampling_residual_block(in_filters=192, nb_filters=96, repetitions=5)#(res1)
- self.res2 = _shortcut(in_filters=96, nb_filters=96)#(block2, block6, subsample=(1,1))
-
- self.block7 = _upsampling_residual_block(in_filters=96, nb_filters=48, repetitions=3)#(res2)
- self.res3 = _shortcut(in_filters=48, nb_filters=48)#(block1, block7, subsample=(1,1))
-
- self.block8 = _upsampling_residual_block(in_filters=48, nb_filters=24, repetitions=2)#(res3)
- self.res4 = _shortcut(in_filters=24, nb_filters=24)#(block0,block8, subsample=(1,1))
-
- self.block9 = _residual_block(in_filters=24, nb_filters=16, repetitions=2, is_first_layer=True)#(res4)
- self.conv15 = _bn_relu_conv(in_filters=16, nb_filters=1, fh=1, fw=1, subsample=1)#(block7)
-
- def forward(self, x):
- x0 = self.block0(x)
- x1 = self.block1(x0)
- x2 = self.block2(x1)
- x3 = self.block3(x2)
- x4 = self.block4(x3)
-
- x5 = self.block5(x4)
- res1 = self.res1(x3, x5)
-
- x6 = self.block6(res1)
- res2 = self.res2(x2, x6)
-
- x7 = self.block7(res2)
- res3 = self.res3(x1, x7)
-
- x8 = self.block8(res3)
- res4 = self.res4(x0, x8)
-
- x9 = self.block9(res4)
- y = self.conv15(x9)
-
- return y
-
-
-class MangaLineExtration(BaseProcessor):
- def __init__(self, **kwargs):
- super().__init__(**kwargs)
- self.model = None
- self.model_dir = os.path.join(self.models_path, "manga_line")
- # self.device = devices.get_device_for("controlnet")
-
- def load_model(self):
- remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/erika.pth"
- modelpath = os.path.join(self.model_dir, "erika.pth")
- if not os.path.exists(modelpath):
- from basicsr.utils.download_util import load_file_from_url
- load_file_from_url(remote_model_path, model_dir=self.model_dir)
- #norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
- net = res_skip()
- ckpt = torch.load(modelpath)
- for key in list(ckpt.keys()):
- if 'module.' in key:
- ckpt[key.replace('module.', '')] = ckpt[key]
- del ckpt[key]
- net.load_state_dict(ckpt)
- net.eval()
- self.model = net.to(self.device)
-
- def unload_model(self):
- if self.model is not None:
- self.model.cpu()
-
- def __call__(self, input_image):
- if self.model is None:
- self.load_model()
- self.model.to(self.device)
- img = cv2.cvtColor(input_image, cv2.COLOR_RGB2GRAY)
- img = np.ascontiguousarray(img.copy()).copy()
- with torch.no_grad():
- image_feed = torch.from_numpy(img).float().to(self.device)
- image_feed = rearrange(image_feed, 'h w -> 1 1 h w')
- line = self.model(image_feed)
- line = 255 - line.cpu().numpy()[0, 0]
- return line.clip(0, 255).astype(np.uint8)
-
-
diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/datasets/cityscapes_panoptic.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/datasets/cityscapes_panoptic.py
deleted file mode 100644
index 7ce9ec48f673dadf3f5b4ae0592fc82415d9f925..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/datasets/cityscapes_panoptic.py
+++ /dev/null
@@ -1,187 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import json
-import logging
-import os
-
-from annotator.oneformer.detectron2.data import DatasetCatalog, MetadataCatalog
-from annotator.oneformer.detectron2.data.datasets.builtin_meta import CITYSCAPES_CATEGORIES
-from annotator.oneformer.detectron2.utils.file_io import PathManager
-
-"""
-This file contains functions to register the Cityscapes panoptic dataset to the DatasetCatalog.
-"""
-
-
-logger = logging.getLogger(__name__)
-
-
-def get_cityscapes_panoptic_files(image_dir, gt_dir, json_info):
- files = []
- # scan through the directory
- cities = PathManager.ls(image_dir)
- logger.info(f"{len(cities)} cities found in '{image_dir}'.")
- image_dict = {}
- for city in cities:
- city_img_dir = os.path.join(image_dir, city)
- for basename in PathManager.ls(city_img_dir):
- image_file = os.path.join(city_img_dir, basename)
-
- suffix = "_leftImg8bit.png"
- assert basename.endswith(suffix), basename
- basename = os.path.basename(basename)[: -len(suffix)]
-
- image_dict[basename] = image_file
-
- for ann in json_info["annotations"]:
- image_file = image_dict.get(ann["image_id"], None)
- assert image_file is not None, "No image {} found for annotation {}".format(
- ann["image_id"], ann["file_name"]
- )
- label_file = os.path.join(gt_dir, ann["file_name"])
- segments_info = ann["segments_info"]
-
- files.append((image_file, label_file, segments_info))
-
- assert len(files), "No images found in {}".format(image_dir)
- assert PathManager.isfile(files[0][0]), files[0][0]
- assert PathManager.isfile(files[0][1]), files[0][1]
- return files
-
-
-def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta):
- """
- Args:
- image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
- gt_dir (str): path to the raw annotations. e.g.,
- "~/cityscapes/gtFine/cityscapes_panoptic_train".
- gt_json (str): path to the json file. e.g.,
- "~/cityscapes/gtFine/cityscapes_panoptic_train.json".
- meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id"
- and "stuff_dataset_id_to_contiguous_id" to map category ids to
- contiguous ids for training.
-
- Returns:
- list[dict]: a list of dicts in Detectron2 standard format. (See
- `Using Custom Datasets `_ )
- """
-
- def _convert_category_id(segment_info, meta):
- if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
- segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
- segment_info["category_id"]
- ]
- else:
- segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
- segment_info["category_id"]
- ]
- return segment_info
-
- assert os.path.exists(
- gt_json
- ), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa
- with open(gt_json) as f:
- json_info = json.load(f)
- files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info)
- ret = []
- for image_file, label_file, segments_info in files:
- sem_label_file = (
- image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png"
- )
- segments_info = [_convert_category_id(x, meta) for x in segments_info]
- ret.append(
- {
- "file_name": image_file,
- "image_id": "_".join(
- os.path.splitext(os.path.basename(image_file))[0].split("_")[:3]
- ),
- "sem_seg_file_name": sem_label_file,
- "pan_seg_file_name": label_file,
- "segments_info": segments_info,
- }
- )
- assert len(ret), f"No images found in {image_dir}!"
- assert PathManager.isfile(
- ret[0]["sem_seg_file_name"]
- ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
- assert PathManager.isfile(
- ret[0]["pan_seg_file_name"]
- ), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa
- return ret
-
-
-_RAW_CITYSCAPES_PANOPTIC_SPLITS = {
- "cityscapes_fine_panoptic_train": (
- "cityscapes/leftImg8bit/train",
- "cityscapes/gtFine/cityscapes_panoptic_train",
- "cityscapes/gtFine/cityscapes_panoptic_train.json",
- ),
- "cityscapes_fine_panoptic_val": (
- "cityscapes/leftImg8bit/val",
- "cityscapes/gtFine/cityscapes_panoptic_val",
- "cityscapes/gtFine/cityscapes_panoptic_val.json",
- ),
- # "cityscapes_fine_panoptic_test": not supported yet
-}
-
-
-def register_all_cityscapes_panoptic(root):
- meta = {}
- # The following metadata maps contiguous id from [0, #thing categories +
- # #stuff categories) to their names and colors. We have to replica of the
- # same name and color under "thing_*" and "stuff_*" because the current
- # visualization function in D2 handles thing and class classes differently
- # due to some heuristic used in Panoptic FPN. We keep the same naming to
- # enable reusing existing visualization functions.
- thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
- thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
- stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
- stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
-
- meta["thing_classes"] = thing_classes
- meta["thing_colors"] = thing_colors
- meta["stuff_classes"] = stuff_classes
- meta["stuff_colors"] = stuff_colors
-
- # There are three types of ids in cityscapes panoptic segmentation:
- # (1) category id: like semantic segmentation, it is the class id for each
- # pixel. Since there are some classes not used in evaluation, the category
- # id is not always contiguous and thus we have two set of category ids:
- # - original category id: category id in the original dataset, mainly
- # used for evaluation.
- # - contiguous category id: [0, #classes), in order to train the classifier
- # (2) instance id: this id is used to differentiate different instances from
- # the same category. For "stuff" classes, the instance id is always 0; for
- # "thing" classes, the instance id starts from 1 and 0 is reserved for
- # ignored instances (e.g. crowd annotation).
- # (3) panoptic id: this is the compact id that encode both category and
- # instance id by: category_id * 1000 + instance_id.
- thing_dataset_id_to_contiguous_id = {}
- stuff_dataset_id_to_contiguous_id = {}
-
- for k in CITYSCAPES_CATEGORIES:
- if k["isthing"] == 1:
- thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
- else:
- stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
-
- meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
- meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
-
- for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items():
- image_dir = os.path.join(root, image_dir)
- gt_dir = os.path.join(root, gt_dir)
- gt_json = os.path.join(root, gt_json)
-
- DatasetCatalog.register(
- key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta)
- )
- MetadataCatalog.get(key).set(
- panoptic_root=gt_dir,
- image_root=image_dir,
- panoptic_json=gt_json,
- gt_dir=gt_dir.replace("cityscapes_panoptic_", ""),
- evaluator_type="cityscapes_panoptic_seg",
- ignore_label=255,
- label_divisor=1000,
- **meta,
- )
diff --git a/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py b/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py
deleted file mode 100644
index 7368ae8031188a9f946d9d3f29633c96e791e68e..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py
+++ /dev/null
@@ -1,333 +0,0 @@
-# MIT License
-
-# Copyright (c) 2022 Intelligent Systems Lab Org
-
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-# File author: Shariq Farooq Bhat
-
-import itertools
-
-import torch
-import torch.nn as nn
-
-from zoedepth.models.depth_model import DepthModel
-from zoedepth.models.base_models.midas import MidasCore
-from zoedepth.models.layers.attractor import AttractorLayer, AttractorLayerUnnormed
-from zoedepth.models.layers.dist_layers import ConditionalLogBinomial
-from zoedepth.models.layers.localbins_layers import (Projector, SeedBinRegressor,
- SeedBinRegressorUnnormed)
-from zoedepth.models.layers.patch_transformer import PatchTransformerEncoder
-from zoedepth.models.model_io import load_state_from_resource
-
-
-class ZoeDepthNK(DepthModel):
- def __init__(self, core, bin_conf, bin_centers_type="softplus", bin_embedding_dim=128,
- n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp',
- min_temp=5, max_temp=50,
- memory_efficient=False, train_midas=True,
- is_midas_pretrained=True, midas_lr_factor=1, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs):
- """ZoeDepthNK model. This is the version of ZoeDepth that has two metric heads and uses a learned router to route to experts.
-
- Args:
- core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features
-
- bin_conf (List[dict]): A list of dictionaries that contain the bin configuration for each metric head. Each dictionary should contain the following keys:
- "name" (str, typically same as the dataset name), "n_bins" (int), "min_depth" (float), "max_depth" (float)
-
- The length of this list determines the number of metric heads.
- bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers.
- For "softplus", softplus activation is used and thus are unbounded. Defaults to "normed".
- bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128.
-
- n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1].
- attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300.
- attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2.
- attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'.
- attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'.
-
- min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5.
- max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50.
-
- memory_efficient (bool, optional): Whether to use memory efficient version of attractor layers. Memory efficient version is slower but is recommended incase of multiple metric heads in order save GPU memory. Defaults to False.
-
- train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True.
- is_midas_pretrained (bool, optional): Is "core" pretrained? Defaults to True.
- midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10.
- encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10.
- pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10.
-
- """
-
- super().__init__()
-
- self.core = core
- self.bin_conf = bin_conf
- self.min_temp = min_temp
- self.max_temp = max_temp
- self.memory_efficient = memory_efficient
- self.train_midas = train_midas
- self.is_midas_pretrained = is_midas_pretrained
- self.midas_lr_factor = midas_lr_factor
- self.encoder_lr_factor = encoder_lr_factor
- self.pos_enc_lr_factor = pos_enc_lr_factor
- self.inverse_midas = inverse_midas
-
- N_MIDAS_OUT = 32
- btlnck_features = self.core.output_channels[0]
- num_out_features = self.core.output_channels[1:]
- # self.scales = [16, 8, 4, 2] # spatial scale factors
-
- self.conv2 = nn.Conv2d(
- btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0)
-
- # Transformer classifier on the bottleneck
- self.patch_transformer = PatchTransformerEncoder(
- btlnck_features, 1, 128, use_class_token=True)
- self.mlp_classifier = nn.Sequential(
- nn.Linear(128, 128),
- nn.ReLU(),
- nn.Linear(128, 2)
- )
-
- if bin_centers_type == "normed":
- SeedBinRegressorLayer = SeedBinRegressor
- Attractor = AttractorLayer
- elif bin_centers_type == "softplus":
- SeedBinRegressorLayer = SeedBinRegressorUnnormed
- Attractor = AttractorLayerUnnormed
- elif bin_centers_type == "hybrid1":
- SeedBinRegressorLayer = SeedBinRegressor
- Attractor = AttractorLayerUnnormed
- elif bin_centers_type == "hybrid2":
- SeedBinRegressorLayer = SeedBinRegressorUnnormed
- Attractor = AttractorLayer
- else:
- raise ValueError(
- "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'")
- self.bin_centers_type = bin_centers_type
- # We have bins for each bin conf.
- # Create a map (ModuleDict) of 'name' -> seed_bin_regressor
- self.seed_bin_regressors = nn.ModuleDict(
- {conf['name']: SeedBinRegressorLayer(btlnck_features, conf["n_bins"], mlp_dim=bin_embedding_dim//2, min_depth=conf["min_depth"], max_depth=conf["max_depth"])
- for conf in bin_conf}
- )
-
- self.seed_projector = Projector(
- btlnck_features, bin_embedding_dim, mlp_dim=bin_embedding_dim//2)
- self.projectors = nn.ModuleList([
- Projector(num_out, bin_embedding_dim, mlp_dim=bin_embedding_dim//2)
- for num_out in num_out_features
- ])
-
- # Create a map (ModuleDict) of 'name' -> attractors (ModuleList)
- self.attractors = nn.ModuleDict(
- {conf['name']: nn.ModuleList([
- Attractor(bin_embedding_dim, n_attractors[i],
- mlp_dim=bin_embedding_dim, alpha=attractor_alpha,
- gamma=attractor_gamma, kind=attractor_kind,
- attractor_type=attractor_type, memory_efficient=memory_efficient,
- min_depth=conf["min_depth"], max_depth=conf["max_depth"])
- for i in range(len(n_attractors))
- ])
- for conf in bin_conf}
- )
-
- last_in = N_MIDAS_OUT
- # conditional log binomial for each bin conf
- self.conditional_log_binomial = nn.ModuleDict(
- {conf['name']: ConditionalLogBinomial(last_in, bin_embedding_dim, conf['n_bins'], bottleneck_factor=4, min_temp=self.min_temp, max_temp=self.max_temp)
- for conf in bin_conf}
- )
-
- def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs):
- """
- Args:
- x (torch.Tensor): Input image tensor of shape (B, C, H, W). Assumes all images are from the same domain.
- return_final_centers (bool, optional): Whether to return the final centers of the attractors. Defaults to False.
- denorm (bool, optional): Whether to denormalize the input image. Defaults to False.
- return_probs (bool, optional): Whether to return the probabilities of the bins. Defaults to False.
-
- Returns:
- dict: Dictionary of outputs with keys:
- - "rel_depth": Relative depth map of shape (B, 1, H, W)
- - "metric_depth": Metric depth map of shape (B, 1, H, W)
- - "domain_logits": Domain logits of shape (B, 2)
- - "bin_centers": Bin centers of shape (B, N, H, W). Present only if return_final_centers is True
- - "probs": Bin probabilities of shape (B, N, H, W). Present only if return_probs is True
- """
- b, c, h, w = x.shape
- self.orig_input_width = w
- self.orig_input_height = h
- rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True)
-
- outconv_activation = out[0]
- btlnck = out[1]
- x_blocks = out[2:]
-
- x_d0 = self.conv2(btlnck)
- x = x_d0
-
- # Predict which path to take
- embedding = self.patch_transformer(x)[0] # N, E
- domain_logits = self.mlp_classifier(embedding) # N, 2
- domain_vote = torch.softmax(domain_logits.sum(
- dim=0, keepdim=True), dim=-1) # 1, 2
-
- # Get the path
- bin_conf_name = ["nyu", "kitti"][torch.argmax(
- domain_vote, dim=-1).squeeze().item()]
-
- try:
- conf = [c for c in self.bin_conf if c.name == bin_conf_name][0]
- except IndexError:
- raise ValueError(
- f"bin_conf_name {bin_conf_name} not found in bin_confs")
-
- min_depth = conf['min_depth']
- max_depth = conf['max_depth']
-
- seed_bin_regressor = self.seed_bin_regressors[bin_conf_name]
- _, seed_b_centers = seed_bin_regressor(x)
- if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2':
- b_prev = (seed_b_centers - min_depth)/(max_depth - min_depth)
- else:
- b_prev = seed_b_centers
- prev_b_embedding = self.seed_projector(x)
-
- attractors = self.attractors[bin_conf_name]
- for projector, attractor, x in zip(self.projectors, attractors, x_blocks):
- b_embedding = projector(x)
- b, b_centers = attractor(
- b_embedding, b_prev, prev_b_embedding, interpolate=True)
- b_prev = b
- prev_b_embedding = b_embedding
-
- last = outconv_activation
-
- b_centers = nn.functional.interpolate(
- b_centers, last.shape[-2:], mode='bilinear', align_corners=True)
- b_embedding = nn.functional.interpolate(
- b_embedding, last.shape[-2:], mode='bilinear', align_corners=True)
-
- clb = self.conditional_log_binomial[bin_conf_name]
- x = clb(last, b_embedding)
-
- # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor
- # print(x.shape, b_centers.shape)
- # b_centers = nn.functional.interpolate(b_centers, x.shape[-2:], mode='bilinear', align_corners=True)
- out = torch.sum(x * b_centers, dim=1, keepdim=True)
-
- output = dict(domain_logits=domain_logits, metric_depth=out)
- if return_final_centers or return_probs:
- output['bin_centers'] = b_centers
-
- if return_probs:
- output['probs'] = x
- return output
-
- def get_lr_params(self, lr):
- """
- Learning rate configuration for different layers of the model
-
- Args:
- lr (float) : Base learning rate
- Returns:
- list : list of parameters to optimize and their learning rates, in the format required by torch optimizers.
- """
- param_conf = []
- if self.train_midas:
- def get_rel_pos_params():
- for name, p in self.core.core.pretrained.named_parameters():
- if "relative_position" in name:
- yield p
-
- def get_enc_params_except_rel_pos():
- for name, p in self.core.core.pretrained.named_parameters():
- if "relative_position" not in name:
- yield p
-
- encoder_params = get_enc_params_except_rel_pos()
- rel_pos_params = get_rel_pos_params()
- midas_params = self.core.core.scratch.parameters()
- midas_lr_factor = self.midas_lr_factor if self.is_midas_pretrained else 1.0
- param_conf.extend([
- {'params': encoder_params, 'lr': lr / self.encoder_lr_factor},
- {'params': rel_pos_params, 'lr': lr / self.pos_enc_lr_factor},
- {'params': midas_params, 'lr': lr / midas_lr_factor}
- ])
-
- remaining_modules = []
- for name, child in self.named_children():
- if name != 'core':
- remaining_modules.append(child)
- remaining_params = itertools.chain(
- *[child.parameters() for child in remaining_modules])
- param_conf.append({'params': remaining_params, 'lr': lr})
- return param_conf
-
- def get_conf_parameters(self, conf_name):
- """
- Returns parameters of all the ModuleDicts children that are exclusively used for the given bin configuration
- """
- params = []
- for name, child in self.named_children():
- if isinstance(child, nn.ModuleDict):
- for bin_conf_name, module in child.items():
- if bin_conf_name == conf_name:
- params += list(module.parameters())
- return params
-
- def freeze_conf(self, conf_name):
- """
- Freezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration
- """
- for p in self.get_conf_parameters(conf_name):
- p.requires_grad = False
-
- def unfreeze_conf(self, conf_name):
- """
- Unfreezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration
- """
- for p in self.get_conf_parameters(conf_name):
- p.requires_grad = True
-
- def freeze_all_confs(self):
- """
- Freezes all the parameters of all the ModuleDicts children
- """
- for name, child in self.named_children():
- if isinstance(child, nn.ModuleDict):
- for bin_conf_name, module in child.items():
- for p in module.parameters():
- p.requires_grad = False
-
- @staticmethod
- def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs):
- core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas,
- train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs)
- model = ZoeDepthNK(core, **kwargs)
- if pretrained_resource:
- assert isinstance(pretrained_resource, str), "pretrained_resource must be a string"
- model = load_state_from_resource(model, pretrained_resource)
- return model
-
- @staticmethod
- def build_from_config(config):
- return ZoeDepthNK.build(**config)
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/_virtualenv.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/_virtualenv.py
deleted file mode 100644
index 17f73b1d48e11ded185780973123d825a7e219d7..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/_virtualenv.py
+++ /dev/null
@@ -1,102 +0,0 @@
-"""Patches that are applied at runtime to the virtual environment."""
-
-from __future__ import annotations
-
-import os
-import sys
-from contextlib import suppress
-
-VIRTUALENV_PATCH_FILE = os.path.join(__file__)
-
-
-def patch_dist(dist):
- """
- Distutils allows user to configure some arguments via a configuration file:
- https://docs.python.org/3/install/index.html#distutils-configuration-files.
-
- Some of this arguments though don't make sense in context of the virtual environment files, let's fix them up.
- """ # noqa: D205
- # we cannot allow some install config as that would get packages installed outside of the virtual environment
- old_parse_config_files = dist.Distribution.parse_config_files
-
- def parse_config_files(self, *args, **kwargs):
- result = old_parse_config_files(self, *args, **kwargs)
- install = self.get_option_dict("install")
-
- if "prefix" in install: # the prefix governs where to install the libraries
- install["prefix"] = VIRTUALENV_PATCH_FILE, os.path.abspath(sys.prefix)
- for base in ("purelib", "platlib", "headers", "scripts", "data"):
- key = f"install_{base}"
- if key in install: # do not allow global configs to hijack venv paths
- install.pop(key, None)
- return result
-
- dist.Distribution.parse_config_files = parse_config_files
-
-
-# Import hook that patches some modules to ignore configuration values that break package installation in case
-# of virtual environments.
-_DISTUTILS_PATCH = "distutils.dist", "setuptools.dist"
-# https://docs.python.org/3/library/importlib.html#setting-up-an-importer
-
-
-class _Finder:
- """A meta path finder that allows patching the imported distutils modules."""
-
- fullname = None
-
- # lock[0] is threading.Lock(), but initialized lazily to avoid importing threading very early at startup,
- # because there are gevent-based applications that need to be first to import threading by themselves.
- # See https://github.com/pypa/virtualenv/issues/1895 for details.
- lock = [] # noqa: RUF012
-
- def find_spec(self, fullname, path, target=None): # noqa: ARG002
- if fullname in _DISTUTILS_PATCH and self.fullname is None:
- # initialize lock[0] lazily
- if len(self.lock) == 0:
- import threading
-
- lock = threading.Lock()
- # there is possibility that two threads T1 and T2 are simultaneously running into find_spec,
- # observing .lock as empty, and further going into hereby initialization. However due to the GIL,
- # list.append() operation is atomic and this way only one of the threads will "win" to put the lock
- # - that every thread will use - into .lock[0].
- # https://docs.python.org/3/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe
- self.lock.append(lock)
-
- from functools import partial
- from importlib.util import find_spec
-
- with self.lock[0]:
- self.fullname = fullname
- try:
- spec = find_spec(fullname, path)
- if spec is not None:
- # https://www.python.org/dev/peps/pep-0451/#how-loading-will-work
- is_new_api = hasattr(spec.loader, "exec_module")
- func_name = "exec_module" if is_new_api else "load_module"
- old = getattr(spec.loader, func_name)
- func = self.exec_module if is_new_api else self.load_module
- if old is not func:
- with suppress(AttributeError): # C-Extension loaders are r/o such as zipimporter with <3.7
- setattr(spec.loader, func_name, partial(func, old))
- return spec
- finally:
- self.fullname = None
- return None
-
- @staticmethod
- def exec_module(old, module):
- old(module)
- if module.__name__ in _DISTUTILS_PATCH:
- patch_dist(module)
-
- @staticmethod
- def load_module(old, name):
- module = old(name)
- if module.__name__ in _DISTUTILS_PATCH:
- patch_dist(module)
- return module
-
-
-sys.meta_path.insert(0, _Finder())
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/ordered_set.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/ordered_set.py
deleted file mode 100644
index 14876000de895a609d5b9f3de39c3c8fc44ef1fc..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/ordered_set.py
+++ /dev/null
@@ -1,488 +0,0 @@
-"""
-An OrderedSet is a custom MutableSet that remembers its order, so that every
-entry has an index that can be looked up.
-
-Based on a recipe originally posted to ActiveState Recipes by Raymond Hettiger,
-and released under the MIT license.
-"""
-import itertools as it
-from collections import deque
-
-try:
- # Python 3
- from collections.abc import MutableSet, Sequence
-except ImportError:
- # Python 2.7
- from collections import MutableSet, Sequence
-
-SLICE_ALL = slice(None)
-__version__ = "3.1"
-
-
-def is_iterable(obj):
- """
- Are we being asked to look up a list of things, instead of a single thing?
- We check for the `__iter__` attribute so that this can cover types that
- don't have to be known by this module, such as NumPy arrays.
-
- Strings, however, should be considered as atomic values to look up, not
- iterables. The same goes for tuples, since they are immutable and therefore
- valid entries.
-
- We don't need to check for the Python 2 `unicode` type, because it doesn't
- have an `__iter__` attribute anyway.
- """
- return (
- hasattr(obj, "__iter__")
- and not isinstance(obj, str)
- and not isinstance(obj, tuple)
- )
-
-
-class OrderedSet(MutableSet, Sequence):
- """
- An OrderedSet is a custom MutableSet that remembers its order, so that
- every entry has an index that can be looked up.
-
- Example:
- >>> OrderedSet([1, 1, 2, 3, 2])
- OrderedSet([1, 2, 3])
- """
-
- def __init__(self, iterable=None):
- self.items = []
- self.map = {}
- if iterable is not None:
- self |= iterable
-
- def __len__(self):
- """
- Returns the number of unique elements in the ordered set
-
- Example:
- >>> len(OrderedSet([]))
- 0
- >>> len(OrderedSet([1, 2]))
- 2
- """
- return len(self.items)
-
- def __getitem__(self, index):
- """
- Get the item at a given index.
-
- If `index` is a slice, you will get back that slice of items, as a
- new OrderedSet.
-
- If `index` is a list or a similar iterable, you'll get a list of
- items corresponding to those indices. This is similar to NumPy's
- "fancy indexing". The result is not an OrderedSet because you may ask
- for duplicate indices, and the number of elements returned should be
- the number of elements asked for.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset[1]
- 2
- """
- if isinstance(index, slice) and index == SLICE_ALL:
- return self.copy()
- elif is_iterable(index):
- return [self.items[i] for i in index]
- elif hasattr(index, "__index__") or isinstance(index, slice):
- result = self.items[index]
- if isinstance(result, list):
- return self.__class__(result)
- else:
- return result
- else:
- raise TypeError("Don't know how to index an OrderedSet by %r" % index)
-
- def copy(self):
- """
- Return a shallow copy of this object.
-
- Example:
- >>> this = OrderedSet([1, 2, 3])
- >>> other = this.copy()
- >>> this == other
- True
- >>> this is other
- False
- """
- return self.__class__(self)
-
- def __getstate__(self):
- if len(self) == 0:
- # The state can't be an empty list.
- # We need to return a truthy value, or else __setstate__ won't be run.
- #
- # This could have been done more gracefully by always putting the state
- # in a tuple, but this way is backwards- and forwards- compatible with
- # previous versions of OrderedSet.
- return (None,)
- else:
- return list(self)
-
- def __setstate__(self, state):
- if state == (None,):
- self.__init__([])
- else:
- self.__init__(state)
-
- def __contains__(self, key):
- """
- Test if the item is in this ordered set
-
- Example:
- >>> 1 in OrderedSet([1, 3, 2])
- True
- >>> 5 in OrderedSet([1, 3, 2])
- False
- """
- return key in self.map
-
- def add(self, key):
- """
- Add `key` as an item to this OrderedSet, then return its index.
-
- If `key` is already in the OrderedSet, return the index it already
- had.
-
- Example:
- >>> oset = OrderedSet()
- >>> oset.append(3)
- 0
- >>> print(oset)
- OrderedSet([3])
- """
- if key not in self.map:
- self.map[key] = len(self.items)
- self.items.append(key)
- return self.map[key]
-
- append = add
-
- def update(self, sequence):
- """
- Update the set with the given iterable sequence, then return the index
- of the last element inserted.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset.update([3, 1, 5, 1, 4])
- 4
- >>> print(oset)
- OrderedSet([1, 2, 3, 5, 4])
- """
- item_index = None
- try:
- for item in sequence:
- item_index = self.add(item)
- except TypeError:
- raise ValueError(
- "Argument needs to be an iterable, got %s" % type(sequence)
- )
- return item_index
-
- def index(self, key):
- """
- Get the index of a given entry, raising an IndexError if it's not
- present.
-
- `key` can be an iterable of entries that is not a string, in which case
- this returns a list of indices.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset.index(2)
- 1
- """
- if is_iterable(key):
- return [self.index(subkey) for subkey in key]
- return self.map[key]
-
- # Provide some compatibility with pd.Index
- get_loc = index
- get_indexer = index
-
- def pop(self):
- """
- Remove and return the last element from the set.
-
- Raises KeyError if the set is empty.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset.pop()
- 3
- """
- if not self.items:
- raise KeyError("Set is empty")
-
- elem = self.items[-1]
- del self.items[-1]
- del self.map[elem]
- return elem
-
- def discard(self, key):
- """
- Remove an element. Do not raise an exception if absent.
-
- The MutableSet mixin uses this to implement the .remove() method, which
- *does* raise an error when asked to remove a non-existent item.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset.discard(2)
- >>> print(oset)
- OrderedSet([1, 3])
- >>> oset.discard(2)
- >>> print(oset)
- OrderedSet([1, 3])
- """
- if key in self:
- i = self.map[key]
- del self.items[i]
- del self.map[key]
- for k, v in self.map.items():
- if v >= i:
- self.map[k] = v - 1
-
- def clear(self):
- """
- Remove all items from this OrderedSet.
- """
- del self.items[:]
- self.map.clear()
-
- def __iter__(self):
- """
- Example:
- >>> list(iter(OrderedSet([1, 2, 3])))
- [1, 2, 3]
- """
- return iter(self.items)
-
- def __reversed__(self):
- """
- Example:
- >>> list(reversed(OrderedSet([1, 2, 3])))
- [3, 2, 1]
- """
- return reversed(self.items)
-
- def __repr__(self):
- if not self:
- return "%s()" % (self.__class__.__name__,)
- return "%s(%r)" % (self.__class__.__name__, list(self))
-
- def __eq__(self, other):
- """
- Returns true if the containers have the same items. If `other` is a
- Sequence, then order is checked, otherwise it is ignored.
-
- Example:
- >>> oset = OrderedSet([1, 3, 2])
- >>> oset == [1, 3, 2]
- True
- >>> oset == [1, 2, 3]
- False
- >>> oset == [2, 3]
- False
- >>> oset == OrderedSet([3, 2, 1])
- False
- """
- # In Python 2 deque is not a Sequence, so treat it as one for
- # consistent behavior with Python 3.
- if isinstance(other, (Sequence, deque)):
- # Check that this OrderedSet contains the same elements, in the
- # same order, as the other object.
- return list(self) == list(other)
- try:
- other_as_set = set(other)
- except TypeError:
- # If `other` can't be converted into a set, it's not equal.
- return False
- else:
- return set(self) == other_as_set
-
- def union(self, *sets):
- """
- Combines all unique items.
- Each items order is defined by its first appearance.
-
- Example:
- >>> oset = OrderedSet.union(OrderedSet([3, 1, 4, 1, 5]), [1, 3], [2, 0])
- >>> print(oset)
- OrderedSet([3, 1, 4, 5, 2, 0])
- >>> oset.union([8, 9])
- OrderedSet([3, 1, 4, 5, 2, 0, 8, 9])
- >>> oset | {10}
- OrderedSet([3, 1, 4, 5, 2, 0, 10])
- """
- cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
- containers = map(list, it.chain([self], sets))
- items = it.chain.from_iterable(containers)
- return cls(items)
-
- def __and__(self, other):
- # the parent implementation of this is backwards
- return self.intersection(other)
-
- def intersection(self, *sets):
- """
- Returns elements in common between all sets. Order is defined only
- by the first set.
-
- Example:
- >>> oset = OrderedSet.intersection(OrderedSet([0, 1, 2, 3]), [1, 2, 3])
- >>> print(oset)
- OrderedSet([1, 2, 3])
- >>> oset.intersection([2, 4, 5], [1, 2, 3, 4])
- OrderedSet([2])
- >>> oset.intersection()
- OrderedSet([1, 2, 3])
- """
- cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
- if sets:
- common = set.intersection(*map(set, sets))
- items = (item for item in self if item in common)
- else:
- items = self
- return cls(items)
-
- def difference(self, *sets):
- """
- Returns all elements that are in this set but not the others.
-
- Example:
- >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]))
- OrderedSet([1, 3])
- >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]), OrderedSet([3]))
- OrderedSet([1])
- >>> OrderedSet([1, 2, 3]) - OrderedSet([2])
- OrderedSet([1, 3])
- >>> OrderedSet([1, 2, 3]).difference()
- OrderedSet([1, 2, 3])
- """
- cls = self.__class__
- if sets:
- other = set.union(*map(set, sets))
- items = (item for item in self if item not in other)
- else:
- items = self
- return cls(items)
-
- def issubset(self, other):
- """
- Report whether another set contains this set.
-
- Example:
- >>> OrderedSet([1, 2, 3]).issubset({1, 2})
- False
- >>> OrderedSet([1, 2, 3]).issubset({1, 2, 3, 4})
- True
- >>> OrderedSet([1, 2, 3]).issubset({1, 4, 3, 5})
- False
- """
- if len(self) > len(other): # Fast check for obvious cases
- return False
- return all(item in other for item in self)
-
- def issuperset(self, other):
- """
- Report whether this set contains another set.
-
- Example:
- >>> OrderedSet([1, 2]).issuperset([1, 2, 3])
- False
- >>> OrderedSet([1, 2, 3, 4]).issuperset({1, 2, 3})
- True
- >>> OrderedSet([1, 4, 3, 5]).issuperset({1, 2, 3})
- False
- """
- if len(self) < len(other): # Fast check for obvious cases
- return False
- return all(item in self for item in other)
-
- def symmetric_difference(self, other):
- """
- Return the symmetric difference of two OrderedSets as a new set.
- That is, the new set will contain all elements that are in exactly
- one of the sets.
-
- Their order will be preserved, with elements from `self` preceding
- elements from `other`.
-
- Example:
- >>> this = OrderedSet([1, 4, 3, 5, 7])
- >>> other = OrderedSet([9, 7, 1, 3, 2])
- >>> this.symmetric_difference(other)
- OrderedSet([4, 5, 9, 2])
- """
- cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
- diff1 = cls(self).difference(other)
- diff2 = cls(other).difference(self)
- return diff1.union(diff2)
-
- def _update_items(self, items):
- """
- Replace the 'items' list of this OrderedSet with a new one, updating
- self.map accordingly.
- """
- self.items = items
- self.map = {item: idx for (idx, item) in enumerate(items)}
-
- def difference_update(self, *sets):
- """
- Update this OrderedSet to remove items from one or more other sets.
-
- Example:
- >>> this = OrderedSet([1, 2, 3])
- >>> this.difference_update(OrderedSet([2, 4]))
- >>> print(this)
- OrderedSet([1, 3])
-
- >>> this = OrderedSet([1, 2, 3, 4, 5])
- >>> this.difference_update(OrderedSet([2, 4]), OrderedSet([1, 4, 6]))
- >>> print(this)
- OrderedSet([3, 5])
- """
- items_to_remove = set()
- for other in sets:
- items_to_remove |= set(other)
- self._update_items([item for item in self.items if item not in items_to_remove])
-
- def intersection_update(self, other):
- """
- Update this OrderedSet to keep only items in another set, preserving
- their order in this set.
-
- Example:
- >>> this = OrderedSet([1, 4, 3, 5, 7])
- >>> other = OrderedSet([9, 7, 1, 3, 2])
- >>> this.intersection_update(other)
- >>> print(this)
- OrderedSet([1, 3, 7])
- """
- other = set(other)
- self._update_items([item for item in self.items if item in other])
-
- def symmetric_difference_update(self, other):
- """
- Update this OrderedSet to remove items from another set, then
- add items from the other set that were not present in this set.
-
- Example:
- >>> this = OrderedSet([1, 4, 3, 5, 7])
- >>> other = OrderedSet([9, 7, 1, 3, 2])
- >>> this.symmetric_difference_update(other)
- >>> print(this)
- OrderedSet([4, 5, 9, 2])
- """
- items_to_add = [item for item in other if item not in self]
- items_to_remove = set(other)
- self._update_items(
- [item for item in self.items if item not in items_to_remove] + items_to_add
- )
diff --git a/spaces/TencentARC/T2I-Adapter-SDXL-Sketch/app.py b/spaces/TencentARC/T2I-Adapter-SDXL-Sketch/app.py
deleted file mode 100644
index 7152631b17f750e34f06887fb179fbbe13952671..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/T2I-Adapter-SDXL-Sketch/app.py
+++ /dev/null
@@ -1,266 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import random
-
-import gradio as gr
-import numpy as np
-import PIL.Image
-import torch
-import torchvision.transforms.functional as TF
-from diffusers import (
- AutoencoderKL,
- EulerAncestralDiscreteScheduler,
- StableDiffusionXLAdapterPipeline,
- T2IAdapter,
-)
-
-DESCRIPTION = '''# Doodly - T2I-Adapter-SDXL **Sketch**
-To try out all the [6 T2I-Adapter](https://huggingface.co/collections/TencentARC/t2i-adapter-sdxl-64fac9cbf393f30370eeb02f) released for SDXL, [click here](https://huggingface.co/spaces/TencentARC/T2I-Adapter-SDXL)
-'''
-
-if not torch.cuda.is_available():
- DESCRIPTION += "\nRunning on CPU 🥶 This demo does not work on CPU.
"
-
-style_list = [
- {
- "name": "(No style)",
- "prompt": "{prompt}",
- "negative_prompt": "",
- },
- {
- "name": "Cinematic",
- "prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
- "negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
- },
- {
- "name": "3D Model",
- "prompt": "professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting",
- "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
- },
- {
- "name": "Anime",
- "prompt": "anime artwork {prompt} . anime style, key visual, vibrant, studio anime, highly detailed",
- "negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
- },
- {
- "name": "Digital Art",
- "prompt": "concept art {prompt} . digital artwork, illustrative, painterly, matte painting, highly detailed",
- "negative_prompt": "photo, photorealistic, realism, ugly",
- },
- {
- "name": "Photographic",
- "prompt": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed",
- "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
- },
- {
- "name": "Pixel art",
- "prompt": "pixel-art {prompt} . low-res, blocky, pixel art style, 8-bit graphics",
- "negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
- },
- {
- "name": "Fantasy art",
- "prompt": "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
- "negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
- },
- {
- "name": "Neonpunk",
- "prompt": "neonpunk style {prompt} . cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
- "negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
- },
- {
- "name": "Manga",
- "prompt": "manga style {prompt} . vibrant, high-energy, detailed, iconic, Japanese comic style",
- "negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
- },
-]
-
-styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
-STYLE_NAMES = list(styles.keys())
-DEFAULT_STYLE_NAME = "(No style)"
-
-
-def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]:
- p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
- return p.replace("{prompt}", positive), n + negative
-
-
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-if torch.cuda.is_available():
- model_id = "stabilityai/stable-diffusion-xl-base-1.0"
- adapter = T2IAdapter.from_pretrained(
- "TencentARC/t2i-adapter-sketch-sdxl-1.0", torch_dtype=torch.float16, variant="fp16"
- )
- scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
- pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
- model_id,
- vae=AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16),
- adapter=adapter,
- scheduler=scheduler,
- torch_dtype=torch.float16,
- variant="fp16",
- )
- pipe.to(device)
-else:
- pipe = None
-
-MAX_SEED = np.iinfo(np.int32).max
-
-
-def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
- if randomize_seed:
- seed = random.randint(0, MAX_SEED)
- return seed
-
-
-def run(
- image: PIL.Image.Image,
- prompt: str,
- negative_prompt: str,
- style_name: str = DEFAULT_STYLE_NAME,
- num_steps: int = 25,
- guidance_scale: float = 5,
- adapter_conditioning_scale: float = 0.8,
- adapter_conditioning_factor: float = 0.8,
- seed: int = 0,
- progress=gr.Progress(track_tqdm=True),
-) -> PIL.Image.Image:
- image = image.convert("RGB")
- image = TF.to_tensor(image) > 0.5
- image = TF.to_pil_image(image.to(torch.float32))
-
- prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
-
- generator = torch.Generator(device=device).manual_seed(seed)
- out = pipe(
- prompt=prompt,
- negative_prompt=negative_prompt,
- image=image,
- num_inference_steps=num_steps,
- generator=generator,
- guidance_scale=guidance_scale,
- adapter_conditioning_scale=adapter_conditioning_scale,
- adapter_conditioning_factor=adapter_conditioning_factor,
- ).images[0]
- return out
-
-
-with gr.Blocks(css="style.css") as demo:
- gr.Markdown(DESCRIPTION, elem_id="description")
- gr.DuplicateButton(
- value="Duplicate Space for private use",
- elem_id="duplicate-button",
- visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
- )
-
- with gr.Row():
- with gr.Column():
- with gr.Group():
- image = gr.Image(
- source="canvas",
- tool="sketch",
- type="pil",
- image_mode="L",
- invert_colors=True,
- shape=(1024, 1024),
- brush_radius=4,
- height=440,
- )
- prompt = gr.Textbox(label="Prompt")
- style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
- run_button = gr.Button("Run")
- with gr.Accordion("Advanced options", open=False):
- negative_prompt = gr.Textbox(
- label="Negative prompt",
- value=" extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured",
- )
- num_steps = gr.Slider(
- label="Number of steps",
- minimum=1,
- maximum=50,
- step=1,
- value=25,
- )
- guidance_scale = gr.Slider(
- label="Guidance scale",
- minimum=0.1,
- maximum=10.0,
- step=0.1,
- value=5,
- )
- adapter_conditioning_scale = gr.Slider(
- label="Adapter conditioning scale",
- minimum=0.5,
- maximum=1,
- step=0.1,
- value=0.8,
- )
- adapter_conditioning_factor = gr.Slider(
- label="Adapter conditioning factor",
- info="Fraction of timesteps for which adapter should be applied",
- minimum=0.5,
- maximum=1,
- step=0.1,
- value=0.8,
- )
- seed = gr.Slider(
- label="Seed",
- minimum=0,
- maximum=MAX_SEED,
- step=1,
- value=0,
- )
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
- with gr.Column():
- result = gr.Image(label="Result", height=400)
-
- inputs = [
- image,
- prompt,
- negative_prompt,
- style,
- num_steps,
- guidance_scale,
- adapter_conditioning_scale,
- adapter_conditioning_factor,
- seed,
- ]
- prompt.submit(
- fn=randomize_seed_fn,
- inputs=[seed, randomize_seed],
- outputs=seed,
- queue=False,
- api_name=False,
- ).then(
- fn=run,
- inputs=inputs,
- outputs=result,
- api_name=False,
- )
- negative_prompt.submit(
- fn=randomize_seed_fn,
- inputs=[seed, randomize_seed],
- outputs=seed,
- queue=False,
- api_name=False,
- ).then(
- fn=run,
- inputs=inputs,
- outputs=result,
- api_name=False,
- )
- run_button.click(
- fn=randomize_seed_fn,
- inputs=[seed, randomize_seed],
- outputs=seed,
- queue=False,
- api_name=False,
- ).then(
- fn=run,
- inputs=inputs,
- outputs=result,
- api_name=False,
- )
-
-if __name__ == "__main__":
- demo.queue(max_size=20).launch()
diff --git a/spaces/TencentARC/VLog/models/grit_src/grit/modeling/text/text_decoder.py b/spaces/TencentARC/VLog/models/grit_src/grit/modeling/text/text_decoder.py
deleted file mode 100644
index 071baa7a52d21d7132cc492f070cba066d17aa43..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/grit/modeling/text/text_decoder.py
+++ /dev/null
@@ -1,672 +0,0 @@
-# Modified by Jialian Wu from
-# https://github.com/microsoft/GenerativeImage2Text/blob/main/generativeimage2text/layers/decoder.py
-# and https://github.com/kdexd/virtex
-from torch import nn
-import torch
-import functools
-from torch.nn import functional as F
-import warnings
-
-
-class TextualHead(nn.Module):
- def __init__(self,
- visual_feature_size: int, vocab_size: int, hidden_size: int):
- super().__init__()
- self.visual_feature_size = visual_feature_size
- self.vocab_size = vocab_size
- self.hidden_size = hidden_size
-
- @property
- def textual_feature_size(self):
- return self.hidden_size
-
-
-class WordAndPositionalEmbedding(nn.Module):
- def __init__(
- self,
- vocab_size: int,
- hidden_size: int,
- dropout: float = 0.0,
- max_caption_length: int = 30,
- padding_idx: int = 0,
- ):
- super().__init__()
- self.vocab_size = vocab_size
- self.padding_idx = padding_idx
-
- #self.words = nn.Embedding(vocab_size, hidden_size, padding_idx=padding_idx)
- self.words = nn.Embedding(vocab_size, hidden_size)
-
- # We provide no "padding index" for positional embeddings. We zero out
- # the positional embeddings of padded positions as a post-processing.
- self.positions = nn.Embedding(max_caption_length, hidden_size)
- self.layer_norm = nn.LayerNorm(
- hidden_size, eps=1e-8, elementwise_affine=True
- )
- self.dropout = nn.Dropout(p=dropout)
-
- def forward(self, tokens: torch.Tensor):
- position_indices = self._create_position_indices(tokens)
-
- # shape: (batch_size, max_caption_length, hidden_size)
- word_embeddings = self.words(tokens)
- position_embeddings = self.positions(position_indices)
-
- # shape: (batch_size, max_caption_length, hidden_size)
- embeddings = self.layer_norm(word_embeddings + position_embeddings)
- embeddings = self.dropout(embeddings)
-
- return embeddings
-
- @functools.lru_cache(maxsize=128)
- def _create_position_indices(self, tokens: torch.Tensor):
-
- # Create position indices of the same size as token indices.
- batch_size, max_caption_length = tokens.size()
- positions = torch.arange(
- max_caption_length, dtype=tokens.dtype, device=tokens.device
- )
- # shape: (batch_size, max_caption_length)
- positions = positions.unsqueeze(0).expand(batch_size, max_caption_length)
- return positions
-
-
-class BertEncoderAsDecoder(nn.Module):
- def __init__(self, encoder):
- super().__init__()
- self.encoder = encoder
-
- def forward(self, tgt, memory,
- tgt_mask=None,
- tgt_key_padding_mask=None,
- memory_key_padding_mask=None,
- tgt_bi_valid_mask=None,
- encoder_history_states=None,
- ):
- assert tgt_key_padding_mask is None, 'not supported'
- assert tgt_mask.dim() == 2
- assert tgt_mask.shape[0] == tgt_mask.shape[1]
- # tgt_mask should always be 0/negative infinity
- tgt = tgt.transpose(0, 1)
- memory = memory.transpose(0, 1)
-
- hidden_states = torch.cat((memory, tgt), dim=1)
- num_tgt = tgt.shape[1]
- num_memory = memory.shape[1]
- device = tgt.device
- dtype = tgt.dtype
- top_left = torch.zeros((num_memory, num_memory), device=device, dtype=dtype)
- top_right = torch.full((num_memory, num_tgt), float('-inf'), device=tgt.device, dtype=dtype,)
- bottom_left = torch.zeros((num_tgt, num_memory), dtype=dtype, device=tgt_mask.device,)
- left = torch.cat((top_left, bottom_left), dim=0)
- right = torch.cat((top_right, tgt_mask.to(dtype)), dim=0)
-
- full_attention_mask = torch.cat((left, right), dim=1)[None, :]
-
- if memory_key_padding_mask is None:
- memory_key_padding_mask = torch.full((memory.shape[0], memory.shape[1]), fill_value=False, device=device)
- # if it is False, it means valid. That is, it is not a padding
- assert memory_key_padding_mask.dtype == torch.bool
- zero_negative_infinity = torch.zeros_like(memory_key_padding_mask, dtype=tgt.dtype)
- zero_negative_infinity[memory_key_padding_mask] = float('-inf')
- full_attention_mask = full_attention_mask.expand((memory_key_padding_mask.shape[0], num_memory + num_tgt, num_memory + num_tgt))
- full_attention_mask = full_attention_mask.clone()
- origin_left = full_attention_mask[:, :, :num_memory]
- update = zero_negative_infinity[:, None, :]
- full_attention_mask[:, :, :num_memory] = origin_left + update
-
- if tgt_bi_valid_mask is not None:
- # verify the correctness
- bs = full_attention_mask.shape[0]
- # during inference, tgt_bi_valid_mask's length is not changed, but
- # num_tgt can be increased
- max_valid_target = tgt_bi_valid_mask.shape[1]
- mask = tgt_bi_valid_mask[:, None, :].expand((bs, num_memory+num_tgt, max_valid_target))
- full_attention_mask[:, :, num_memory:(num_memory+max_valid_target)][mask] = 0
-
- # add axis for multi-head
- full_attention_mask = full_attention_mask[:, None, :, :]
-
- if encoder_history_states is None:
- result = self.encoder(
- hidden_states=hidden_states,
- attention_mask=full_attention_mask,
- encoder_history_states=encoder_history_states,
- )
- result = list(result)
- result[0] = result[0][:, num_memory:].transpose(0, 1)
- if self.encoder.output_hidden_states:
- return result[0], result[1]
- else:
- # make it back-compatible
- return result[0]
- else:
- encoder_out = self.encoder(
- hidden_states=hidden_states[:, -1:],
- attention_mask=full_attention_mask[:, :, -1:],
- encoder_history_states=encoder_history_states,
- )
- result = encoder_out[0].transpose(0, 1)
- if self.encoder.output_hidden_states:
- return result, encoder_out[1]
- else:
- return result
-
-
-def create_transformer(decoder_type, norm_type,
- textual_feature_size,
- attention_heads,
- feedforward_size,
- dropout,
- num_layers,
- output_hidden_states=False,
- use_mlp_wrapper=None,
- use_act_checkpoint=True,
- ):
- assert norm_type in ['post', 'pre']
- if decoder_type is None:
- LayerClass = (
- nn.TransformerDecoderLayer
- if norm_type == "post"
- else PreNormTransformerDecoderLayer
- )
- _layer = LayerClass(
- textual_feature_size,
- attention_heads,
- dim_feedforward=feedforward_size,
- dropout=dropout,
- activation="gelu",
- )
- return nn.TransformerDecoder(_layer, num_layers)
- elif decoder_type == 'bert_en':
- from .modeling_bert import BertConfig, BertEncoder
- config = BertConfig(
- vocab_size_or_config_json_file=30522,
- hidden_size=textual_feature_size,
- num_hidden_layers=num_layers,
- num_attention_heads=attention_heads,
- intermediate_size=feedforward_size,
- hidden_act="gelu",
- hidden_dropout_prob=0.1,
- attention_probs_dropout_prob=0.1,
- layer_norm_eps=1e-12,
- )
- config.pre_norm = (norm_type == 'pre')
- config.use_mlp_wrapper = use_mlp_wrapper
- config.output_hidden_states = output_hidden_states
- encoder = BertEncoder(config, use_act_checkpoint=use_act_checkpoint)
- return BertEncoderAsDecoder(encoder)
-
-
-class PreNormTransformerDecoderLayer(nn.TransformerDecoderLayer):
- def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
- tgt_key_padding_mask=None, memory_key_padding_mask=None):
- # fmt: off
- # We use the members (modules) from super-class, just the order of
- # operations is changed here. First layernorm, then attention.
- tgt2 = self.norm1(tgt)
- tgt2, _ = self.self_attn(
- tgt2, tgt2, tgt2, attn_mask=tgt_mask,
- key_padding_mask=tgt_key_padding_mask
- )
- tgt = tgt + self.dropout1(tgt2)
-
- # Layernorm first, then decoder attention.
- tgt2 = self.norm2(tgt)
- tgt2, _ = self.multihead_attn(
- tgt2, memory, memory, attn_mask=memory_mask,
- key_padding_mask=memory_key_padding_mask
- )
- tgt = tgt + self.dropout2(tgt2)
-
- # Layernorm first, then transformation through feedforward network.
- tgt2 = self.norm3(tgt)
- tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
- tgt = tgt + self.dropout3(tgt2)
- return tgt
-
-
-class TransformerDecoderTextualHead(TextualHead):
- def __init__(
- self,
- object_feature_size: int,
- vocab_size: int,
- hidden_size: int,
- num_layers: int,
- attention_heads: int,
- feedforward_size: int,
- dropout: float = 0.1,
- norm_type: str = "post",
- mask_future_positions: bool = True,
- max_caption_length: int = 1024,
- padding_idx: int = 0,
- decoder_type=None,
- not_tie_weight=None,
- output_hidden_states=None,
- use_mlp_wrapper=None,
- use_act_checkpoint=True,
- ):
- super().__init__(object_feature_size, vocab_size, hidden_size)
- self.num_layers = num_layers
- self.attention_heads = attention_heads
- self.feedforward_size = feedforward_size
- self.dropout = dropout
- assert mask_future_positions
- self.padding_idx = padding_idx
-
- self.object_feature_projection = nn.Sequential(
- nn.Linear(object_feature_size, self.textual_feature_size),
- nn.LayerNorm(self.textual_feature_size))
-
- self.embedding = WordAndPositionalEmbedding(
- self.vocab_size,
- self.textual_feature_size,
- dropout=dropout,
- max_caption_length=max_caption_length,
- padding_idx=padding_idx,
- )
- self.transformer = create_transformer(
- decoder_type=decoder_type,
- norm_type=norm_type,
- textual_feature_size=self.textual_feature_size,
- attention_heads=self.attention_heads,
- feedforward_size=self.feedforward_size,
- dropout=dropout,
- num_layers=self.num_layers,
- output_hidden_states=output_hidden_states,
- use_mlp_wrapper=use_mlp_wrapper,
- use_act_checkpoint=use_act_checkpoint,
- )
- self.apply(self._init_weights)
-
- # Create an output linear layer and tie the input and output word
- # embeddings to reduce parametejs.
- self.output = nn.Linear(self.textual_feature_size, vocab_size)
- if not not_tie_weight:
- self.output.weight = self.embedding.words.weight
-
- @staticmethod
- def _init_weights(module):
- """Initialize weights like BERT - N(0.0, 0.02), bias = 0."""
-
- if isinstance(module, nn.Linear):
- module.weight.data.normal_(mean=0.0, std=0.02)
- elif isinstance(module, nn.MultiheadAttention):
- module.in_proj_weight.data.normal_(mean=0.0, std=0.02)
- module.out_proj.weight.data.normal_(mean=0.0, std=0.02)
- elif isinstance(module, nn.Embedding):
- module.weight.data.normal_(mean=0.0, std=0.02)
- if module.padding_idx is not None:
- module.weight.data[module.padding_idx].zero_()
-
- def forward(
- self,
- hidden_states,
- text_tokens,
- ):
- projected_object_features = self.object_feature_projection(hidden_states) if hidden_states is not None else None
- batch_size, max_text_length = text_tokens.size()
- text_embeddings = self.embedding(text_tokens)
-
- # An additive mask for masking the future (one direction).
- uni_mask_zero_neg = self._generate_future_mask(
- max_text_length, text_embeddings.dtype, text_embeddings.device
- )
-
- # We transpose the first two dimensions of tokens embeddings and visual
- # features, as required by decoder.
- text_embeddings = text_embeddings.transpose(0, 1)
-
- projected_object_features = projected_object_features.transpose(0, 1)
-
- # if transformer here is the pytorch/decoder, there is no chance, the
- # output is always tensor
- trans_out = self.transformer(
- text_embeddings,
- projected_object_features,
- tgt_mask=uni_mask_zero_neg,
- )
- if isinstance(trans_out, tuple):
- textual_features = trans_out[0]
- else:
- assert isinstance(trans_out, torch.Tensor)
- textual_features = trans_out
- # Undo the transpose and bring batch to dim 0.
- # shape: (batch_size, max_caption_length, hidden_size)
- textual_features = textual_features.transpose(0, 1)
-
- # shape: (batch_size, max_caption_length, vocab_size)
- output_logits = self.output(textual_features)
- if isinstance(trans_out, tuple):
- return output_logits, trans_out[1]
- else:
- return output_logits
-
- def _generate_future_mask(
- self, size: int, dtype: torch.dtype, device: torch.device
- ):
- # Default mask is for forward direction. Flip for backward direction.
- mask = torch.triu(
- torch.ones(size, size, device=device, dtype=dtype), diagonal=1
- )
- mask = mask.masked_fill(mask == 1, float("-inf"))
- return mask
-
-
-class AutoRegressiveBeamSearch(object):
- def __init__(
- self,
- end_token_id: int,
- max_steps: int = 50,
- beam_size: int = 5,
- objectdet=True,
- per_node_beam_size: int = 2,
- ):
- self._eos_index = end_token_id
- self.max_steps = max_steps
- self.beam_size = beam_size
- self.objectdet = objectdet
- self.per_node_beam_size = per_node_beam_size or beam_size
-
- def search(self, begin_tokens, step):
- if self.beam_size > 1 and self.objectdet:
- only_return_best = False
- else:
- only_return_best = True
-
- batch_size = begin_tokens.size()[0]
-
- predictions = begin_tokens.unsqueeze(1).expand((batch_size, self.beam_size, begin_tokens.shape[-1]))
- # Calculate the first timestep. This is done outside the main loop
- # because we are going from a single decoder input (the output from the
- # encoder) to the top `beam_size` decoder outputs. On the other hand,
- # within the main loop we are going from the `beam_size` elements of the
- # beam to `beam_size`^2 candidates from which we will select the top
- # `beam_size` elements for the next iteration.
- # shape: (batch_size, num_classes)
- start_class_logits = step(begin_tokens)
-
- # Convert logits to logprobs.
- # shape: (batch_size * beam_size, vocab_size)
- start_class_logprobs = F.log_softmax(start_class_logits, dim=1)
-
- num_classes = start_class_logprobs.size()[1]
-
- # shape: (batch_size, beam_size), (batch_size, beam_size)
- start_top_logprobs, start_predicted_classes = start_class_logprobs.topk(
- self.beam_size
- )
-
- if (
- self.beam_size == 1
- and (start_predicted_classes == self._eos_index).all()
- ):
- warnings.warn(
- "Empty object description predicted. You may want to increase beam"
- "size or ensure your step function is working properly.",
- RuntimeWarning,
- )
- if only_return_best:
- return start_predicted_classes, start_top_logprobs
- else:
- return start_predicted_classes.unsqueeze(-1), start_top_logprobs
-
- # The log probs for the last time step.
- # shape: (batch_size, beam_size)
- last_logprobs = start_top_logprobs
-
- # shape: (batch_size, beam_size, sequence_length)
- predictions = torch.cat([predictions, start_predicted_classes.unsqueeze(-1)], dim=-1)
-
- # Log probability tensor that mandates that the end token is selected.
- # shape: (batch_size * beam_size, num_classes)
- logprobs_after_end = start_class_logprobs.new_full(
- (batch_size * self.beam_size, num_classes), float("-inf")
- )
- logprobs_after_end[:, self._eos_index] = 0.0
-
- logits_after_end = start_class_logprobs.new_full(
- (batch_size * self.beam_size, num_classes), float("-inf")
- )
- logits_after_end[:, self._eos_index] = 0
-
- while predictions.shape[-1] < self.max_steps:
- # shape: (batch_size * beam_size,)
- last_predictions = predictions[:, :, -1].reshape(batch_size * self.beam_size)
-
- # If every predicted token from the last step is `self._eos_index`,
- # then we can stop early.
- if (last_predictions == self._eos_index).all():
- break
-
- predictions_so_far = predictions.view(
- batch_size * self.beam_size, -1
- )
- # shape: (batch_size * beam_size, num_classes)
- class_logits = step(predictions_so_far)
-
- # Set logprobs of last predicted tokens as high negative value to avoid
- # repetition in description.
- class_logits = class_logits.scatter(1, predictions_so_far[:, -1].view((-1, 1)), -10000)
-
- # shape: (batch_size * beam_size, num_classes)
- last_predictions_expanded = last_predictions.unsqueeze(-1).expand(
- batch_size * self.beam_size, num_classes
- )
-
- # Here we are finding any beams where we predicted the end token in
- # the previous timestep and replacing the distribution with a
- # one-hot distribution, forcing the beam to predict the end token
- # this timestep as well.
- class_logits = torch.where(
- last_predictions_expanded == self._eos_index,
- logits_after_end,
- class_logits,
- )
-
- # Convert logits to logprobs.
- # shape: (batch_size * beam_size, vocab_size)
- class_logprobs = F.log_softmax(class_logits, dim=1)
-
- # shape (both): (batch_size * beam_size, per_node_beam_size)
- top_logprobs, predicted_classes = class_logprobs.topk(
- self.per_node_beam_size
- )
-
- # Here we expand the last log probs to `(batch_size * beam_size,
- # per_node_beam_size)` so that we can add them to the current log
- # probs for this timestep. This lets us maintain the log
- # probability of each element on the beam.
- # shape: (batch_size * beam_size, per_node_beam_size)
- expanded_last_logprobs = (
- last_logprobs.unsqueeze(2)
- .expand(batch_size, self.beam_size, self.per_node_beam_size)
- .reshape(batch_size * self.beam_size, self.per_node_beam_size)
- )
- # shape: (batch_size * beam_size, per_node_beam_size)
- summed_top_logprobs = top_logprobs + expanded_last_logprobs
-
- # shape: (batch_size, beam_size * per_node_beam_size)
- reshaped_summed = summed_top_logprobs.reshape(
- batch_size, self.beam_size * self.per_node_beam_size
- )
- # shape: (batch_size, beam_size * per_node_beam_size)
- reshaped_predicted_classes = predicted_classes.reshape(
- batch_size, self.beam_size * self.per_node_beam_size
- )
- # Append the predictions to the current beam.
- reshaped_beam = (
- predictions.view(batch_size * self.beam_size, 1, -1)
- .repeat(1, self.per_node_beam_size, 1)
- .reshape(batch_size, self.beam_size * self.per_node_beam_size, -1)
- )
- # batch_size, (beam_size * per_node_beach_size), #token
- reshaped_beam = torch.cat([reshaped_beam, reshaped_predicted_classes.unsqueeze(-1)], dim=-1)
-
- # Keep only the top `beam_size` beam indices.
- # shape: (batch_size, beam_size), (batch_size, beam_size)
- restricted_beam_logprobs, restricted_beam_indices = reshaped_summed.topk(
- self.beam_size
- )
- predictions = reshaped_beam.gather(
- 1, restricted_beam_indices.unsqueeze(-1).repeat(1,1,reshaped_beam.shape[-1])
- )
-
- # shape: (batch_size, beam_size)
- last_logprobs = restricted_beam_logprobs
-
- if not torch.isfinite(last_logprobs).all():
- warnings.warn(
- "Infinite log probs encountered. Some final descriptions may not "
- "make sense. This can happen when the beam size is larger than"
- " the number of valid (non-zero probability) transitions that "
- "the step function produces.",
- RuntimeWarning,
- )
-
- # Optionally select best beam and its logprobs.
- if only_return_best:
- # shape: (batch_size, sequence_length)
- predictions = predictions[:, 0, :]
- last_logprobs = last_logprobs[:, 0]
- num_valid = (predictions != self._eos_index).sum(dim=-1)
- num_valid += (predictions == self._eos_index).sum(dim=-1) > 0
- num_valid = num_valid - begin_tokens.shape[1]
- num_valid = num_valid.clip(min=1)
-
- last_logprobs = last_logprobs / num_valid
-
- return predictions, last_logprobs
-
-
-class GRiTTextDecoder(nn.Module):
- def __init__(
- self,
- transformer,
- begin_token_id=101,
- beamsearch_decode=None,
- loss_type=None,
- tokenizer=None,
- ):
- super().__init__()
- self.textual = transformer
- self.padding_idx = self.textual.padding_idx
-
- self.begin_token_id = begin_token_id
- self.beamsearch_decode = beamsearch_decode
- self.tokenizer = tokenizer
-
- if loss_type is None:
- self.loss = nn.CrossEntropyLoss(ignore_index=self.padding_idx)
- elif loss_type == 'smooth':
- self.loss = SmoothLabelCrossEntropyLoss(ignore_index=self.padding_idx)
- else:
- raise NotImplementedError(loss_type)
-
- def forward(self, batch):
- object_features = batch['object_features']
-
- if self.training:
- caption_token_input = batch["text_tokens"]
-
- output_logits = self.textual(
- object_features,
- caption_token_input,
- )
-
- if 'need_predict' in batch:
- # in place should also be good, but we do not choose that for
- # safety as we may use it in prediction results in future
- target = batch["text_tokens"].clone()
- target[batch['need_predict'] == 0] = self.padding_idx
- else:
- target = batch["text_tokens"]
-
- feat = output_logits[:, :-1].contiguous()
- target = target[:, 1:].contiguous()
- feat = feat.view(-1, self.textual.vocab_size)
- target = target.view(-1)
-
- valid_mask = target != self.padding_idx
- target = target[valid_mask]
- feat = feat[valid_mask]
- loss = self.loss(feat, target)
-
- return loss
- else:
- output_dict = self.infer(object_features)
- return output_dict
-
- def infer(self, object_features):
- batch_size = object_features.size(0)
- begin_tokens = object_features.new_full(
- (batch_size, 1), self.begin_token_id
- ).long()
-
- decoding_step = functools.partial(
- self.decoding_step, object_features
- )
-
- object_description_tokens, logprobs = self.beamsearch_decode.search(
- begin_tokens, decoding_step
- )
-
- output_dict = {
- 'predictions': object_description_tokens,
- 'logprobs': logprobs,
- }
-
- return output_dict
-
- def decoding_step(self, object_features, partial_text):
- batch_size = object_features.shape[0]
- beam_size = int(partial_text.size(0) / batch_size)
- if beam_size > 1:
- batch_size, num_token, channels = object_features.size()
- object_features = object_features.unsqueeze(1).repeat(1, beam_size, 1, 1)
- object_features = object_features.view(
- batch_size * beam_size, num_token, channels
- )
-
- text_lengths = torch.ones_like(partial_text)
- if len(text_lengths.size()) != 2:
- partial_text = partial_text.unsqueeze(1)
-
- # shape: (batch_size * beam_size, partial_caption_length, vocab_size)
- logits = self.textual(
- object_features,
- partial_text,
- )
-
- return logits[:, -1, :].float()
-
-
-class SmoothLabelCrossEntropyLoss(nn.Module):
- def __init__(self, eps=0.1, log_prefix='', ignore_index=None):
- super().__init__()
- self.eps = eps
- self.log_soft = nn.LogSoftmax(dim=1)
- self.kl = nn.KLDivLoss(reduction='none')
-
- self.iter = 0
- self.max_loss = 0
- self.min_loss = 0
- self.log_prefix = log_prefix
- self.ignore_index = ignore_index
-
- def forward(self, feature, target):
- feature = feature.float()
- if self.ignore_index is not None:
- valid_mask = target != self.ignore_index
- target = target[valid_mask]
- feature = feature[valid_mask]
- assert target.numel() > 0
- self.iter += 1
- eps = self.eps
- n_class = feature.size(1)
- one_hot = torch.zeros_like(feature).scatter(1, target.view(-1, 1), 1)
- one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
- log_prb = self.log_soft(feature)
- loss = self.kl(log_prb, one_hot)
- return loss.sum(dim=1).mean()
-
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/README_D2.md b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/README_D2.md
deleted file mode 100644
index a88ad7e21ce1d8651ec0d73848ce6dcd17f19d00..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/README_D2.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-
-Detectron2 is Facebook AI Research's next generation software system
-that implements state-of-the-art object detection algorithms.
-It is a ground-up rewrite of the previous version,
-[Detectron](https://github.com/facebookresearch/Detectron/),
-and it originates from [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark/).
-
-
-
-
-
-### What's New
-* It is powered by the [PyTorch](https://pytorch.org) deep learning framework.
-* Includes more features such as panoptic segmentation, Densepose, Cascade R-CNN, rotated bounding boxes, PointRend,
- DeepLab, etc.
-* Can be used as a library to support [different projects](projects/) on top of it.
- We'll open source more research projects in this way.
-* It [trains much faster](https://detectron2.readthedocs.io/notes/benchmarks.html).
-* Models can be exported to TorchScript format or Caffe2 format for deployment.
-
-See our [blog post](https://ai.facebook.com/blog/-detectron2-a-pytorch-based-modular-object-detection-library-/)
-to see more demos and learn about detectron2.
-
-## Installation
-
-See [INSTALL.md](INSTALL.md).
-
-## Getting Started
-
-Follow the [installation instructions](https://detectron2.readthedocs.io/tutorials/install.html) to
-install detectron2.
-
-See [Getting Started with Detectron2](https://detectron2.readthedocs.io/tutorials/getting_started.html),
-and the [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)
-to learn about basic usage.
-
-Learn more at our [documentation](https://detectron2.readthedocs.org).
-And see [projects/](projects/) for some projects that are built on top of detectron2.
-
-## Model Zoo and Baselines
-
-We provide a large set of baseline results and trained models available for download in the [Detectron2 Model Zoo](MODEL_ZOO.md).
-
-
-## License
-
-Detectron2 is released under the [Apache 2.0 license](LICENSE).
-
-## Citing Detectron2
-
-If you use Detectron2 in your research or wish to refer to the baseline results published in the [Model Zoo](MODEL_ZOO.md), please use the following BibTeX entry.
-
-```BibTeX
-@misc{wu2019detectron2,
- author = {Yuxin Wu and Alexander Kirillov and Francisco Massa and
- Wan-Yen Lo and Ross Girshick},
- title = {Detectron2},
- howpublished = {\url{https://github.com/facebookresearch/detectron2}},
- year = {2019}
-}
-```
diff --git a/spaces/TheStinger/Ilaria_RVC/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py b/spaces/TheStinger/Ilaria_RVC/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
deleted file mode 100644
index b2c592527a5966e6f8e79e8c52dc5b414246dcc6..0000000000000000000000000000000000000000
--- a/spaces/TheStinger/Ilaria_RVC/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
-import parselmouth
-import numpy as np
-
-
-class PMF0Predictor(F0Predictor):
- def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
- self.hop_length = hop_length
- self.f0_min = f0_min
- self.f0_max = f0_max
- self.sampling_rate = sampling_rate
-
- def interpolate_f0(self, f0):
- """
- 对F0进行插值处理
- """
-
- data = np.reshape(f0, (f0.size, 1))
-
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
- vuv_vector[data > 0.0] = 1.0
- vuv_vector[data <= 0.0] = 0.0
-
- ip_data = data
-
- frame_number = data.size
- last_value = 0.0
- for i in range(frame_number):
- if data[i] <= 0.0:
- j = i + 1
- for j in range(i + 1, frame_number):
- if data[j] > 0.0:
- break
- if j < frame_number - 1:
- if last_value > 0.0:
- step = (data[j] - data[i - 1]) / float(j - i)
- for k in range(i, j):
- ip_data[k] = data[i - 1] + step * (k - i + 1)
- else:
- for k in range(i, j):
- ip_data[k] = data[j]
- else:
- for k in range(i, frame_number):
- ip_data[k] = last_value
- else:
- ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
- last_value = data[i]
-
- return ip_data[:, 0], vuv_vector[:, 0]
-
- def compute_f0(self, wav, p_len=None):
- x = wav
- if p_len is None:
- p_len = x.shape[0] // self.hop_length
- else:
- assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
- time_step = self.hop_length / self.sampling_rate * 1000
- f0 = (
- parselmouth.Sound(x, self.sampling_rate)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=self.f0_min,
- pitch_ceiling=self.f0_max,
- )
- .selected_array["frequency"]
- )
-
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
- f0, uv = self.interpolate_f0(f0)
- return f0
-
- def compute_f0_uv(self, wav, p_len=None):
- x = wav
- if p_len is None:
- p_len = x.shape[0] // self.hop_length
- else:
- assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
- time_step = self.hop_length / self.sampling_rate * 1000
- f0 = (
- parselmouth.Sound(x, self.sampling_rate)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=self.f0_min,
- pitch_ceiling=self.f0_max,
- )
- .selected_array["frequency"]
- )
-
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
- f0, uv = self.interpolate_f0(f0)
- return f0, uv
diff --git a/spaces/TornikeO/dis-background-removal/hce_metric_main.py b/spaces/TornikeO/dis-background-removal/hce_metric_main.py
deleted file mode 100644
index 1b102e6defa72f9cde378a353542fe6234b0ba3b..0000000000000000000000000000000000000000
--- a/spaces/TornikeO/dis-background-removal/hce_metric_main.py
+++ /dev/null
@@ -1,188 +0,0 @@
-## hce_metric.py
-import numpy as np
-from skimage import io
-import matplotlib.pyplot as plt
-import cv2 as cv
-from skimage.morphology import skeletonize
-from skimage.morphology import erosion, dilation, disk
-from skimage.measure import label
-
-import os
-import sys
-from tqdm import tqdm
-from glob import glob
-import pickle as pkl
-
-def filter_bdy_cond(bdy_, mask, cond):
-
- cond = cv.dilate(cond.astype(np.uint8),disk(1))
- labels = label(mask) # find the connected regions
- lbls = np.unique(labels) # the indices of the connected regions
- indep = np.ones(lbls.shape[0]) # the label of each connected regions
- indep[0] = 0 # 0 indicate the background region
-
- boundaries = []
- h,w = cond.shape[0:2]
- ind_map = np.zeros((h,w))
- indep_cnt = 0
-
- for i in range(0,len(bdy_)):
- tmp_bdies = []
- tmp_bdy = []
- for j in range(0,bdy_[i].shape[0]):
- r, c = bdy_[i][j,0,1],bdy_[i][j,0,0]
-
- if(np.sum(cond[r,c])==0 or ind_map[r,c]!=0):
- if(len(tmp_bdy)>0):
- tmp_bdies.append(tmp_bdy)
- tmp_bdy = []
- continue
- tmp_bdy.append([c,r])
- ind_map[r,c] = ind_map[r,c] + 1
- indep[labels[r,c]] = 0 # indicates part of the boundary of this region needs human correction
- if(len(tmp_bdy)>0):
- tmp_bdies.append(tmp_bdy)
-
- # check if the first and the last boundaries are connected
- # if yes, invert the first boundary and attach it after the last boundary
- if(len(tmp_bdies)>1):
- first_x, first_y = tmp_bdies[0][0]
- last_x, last_y = tmp_bdies[-1][-1]
- if((abs(first_x-last_x)==1 and first_y==last_y) or
- (first_x==last_x and abs(first_y-last_y)==1) or
- (abs(first_x-last_x)==1 and abs(first_y-last_y)==1)
- ):
- tmp_bdies[-1].extend(tmp_bdies[0][::-1])
- del tmp_bdies[0]
-
- for k in range(0,len(tmp_bdies)):
- tmp_bdies[k] = np.array(tmp_bdies[k])[:,np.newaxis,:]
- if(len(tmp_bdies)>0):
- boundaries.extend(tmp_bdies)
-
- return boundaries, np.sum(indep)
-
-# this function approximate each boundary by DP algorithm
-# https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm
-def approximate_RDP(boundaries,epsilon=1.0):
-
- boundaries_ = []
- boundaries_len_ = []
- pixel_cnt_ = 0
-
- # polygon approximate of each boundary
- for i in range(0,len(boundaries)):
- boundaries_.append(cv.approxPolyDP(boundaries[i],epsilon,False))
-
- # count the control points number of each boundary and the total control points number of all the boundaries
- for i in range(0,len(boundaries_)):
- boundaries_len_.append(len(boundaries_[i]))
- pixel_cnt_ = pixel_cnt_ + len(boundaries_[i])
-
- return boundaries_, boundaries_len_, pixel_cnt_
-
-
-def relax_HCE(gt, rs, gt_ske, relax=5, epsilon=2.0):
- # print("max(gt_ske): ", np.amax(gt_ske))
- # gt_ske = gt_ske>128
- # print("max(gt_ske): ", np.amax(gt_ske))
-
- # Binarize gt
- if(len(gt.shape)>2):
- gt = gt[:,:,0]
-
- epsilon_gt = 128#(np.amin(gt)+np.amax(gt))/2.0
- gt = (gt>epsilon_gt).astype(np.uint8)
-
- # Binarize rs
- if(len(rs.shape)>2):
- rs = rs[:,:,0]
- epsilon_rs = 128#(np.amin(rs)+np.amax(rs))/2.0
- rs = (rs>epsilon_rs).astype(np.uint8)
-
- Union = np.logical_or(gt,rs)
- TP = np.logical_and(gt,rs)
- FP = rs - TP
- FN = gt - TP
-
- # relax the Union of gt and rs
- Union_erode = Union.copy()
- Union_erode = cv.erode(Union_erode.astype(np.uint8),disk(1),iterations=relax)
-
- # --- get the relaxed False Positive regions for computing the human efforts in correcting them ---
- FP_ = np.logical_and(FP,Union_erode) # get the relaxed FP
- for i in range(0,relax):
- FP_ = cv.dilate(FP_.astype(np.uint8),disk(1))
- FP_ = np.logical_and(FP_, 1-np.logical_or(TP,FN))
- FP_ = np.logical_and(FP, FP_)
-
- # --- get the relaxed False Negative regions for computing the human efforts in correcting them ---
- FN_ = np.logical_and(FN,Union_erode) # preserve the structural components of FN
- ## recover the FN, where pixels are not close to the TP borders
- for i in range(0,relax):
- FN_ = cv.dilate(FN_.astype(np.uint8),disk(1))
- FN_ = np.logical_and(FN_,1-np.logical_or(TP,FP))
- FN_ = np.logical_and(FN,FN_)
- FN_ = np.logical_or(FN_, np.logical_xor(gt_ske,np.logical_and(TP,gt_ske))) # preserve the structural components of FN
-
- ## 2. =============Find exact polygon control points and independent regions==============
- ## find contours from FP_
- ctrs_FP, hier_FP = cv.findContours(FP_.astype(np.uint8), cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
- ## find control points and independent regions for human correction
- bdies_FP, indep_cnt_FP = filter_bdy_cond(ctrs_FP, FP_, np.logical_or(TP,FN_))
- ## find contours from FN_
- ctrs_FN, hier_FN = cv.findContours(FN_.astype(np.uint8), cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
- ## find control points and independent regions for human correction
- bdies_FN, indep_cnt_FN = filter_bdy_cond(ctrs_FN, FN_, 1-np.logical_or(np.logical_or(TP,FP_),FN_))
-
- poly_FP, poly_FP_len, poly_FP_point_cnt = approximate_RDP(bdies_FP,epsilon=epsilon)
- poly_FN, poly_FN_len, poly_FN_point_cnt = approximate_RDP(bdies_FN,epsilon=epsilon)
-
- return poly_FP_point_cnt, indep_cnt_FP, poly_FN_point_cnt, indep_cnt_FN
-
-def compute_hce(pred_root,gt_root,gt_ske_root):
-
- gt_name_list = glob(pred_root+'/*.png')
- gt_name_list = sorted([x.split('/')[-1] for x in gt_name_list])
-
- hces = []
- for gt_name in tqdm(gt_name_list, total=len(gt_name_list)):
- gt_path = os.path.join(gt_root, gt_name)
- pred_path = os.path.join(pred_root, gt_name)
-
- gt = cv.imread(gt_path, cv.IMREAD_GRAYSCALE)
- pred = cv.imread(pred_path, cv.IMREAD_GRAYSCALE)
-
- ske_path = os.path.join(gt_ske_root,gt_name)
- if os.path.exists(ske_path):
- ske = cv.imread(ske_path,cv.IMREAD_GRAYSCALE)
- ske = ske>128
- else:
- ske = skeletonize(gt>128)
-
- FP_points, FP_indep, FN_points, FN_indep = relax_HCE(gt, pred,ske)
- print(gt_path.split('/')[-1],FP_points, FP_indep, FN_points, FN_indep)
- hces.append([FP_points, FP_indep, FN_points, FN_indep, FP_points+FP_indep+FN_points+FN_indep])
-
- hce_metric ={'names': gt_name_list,
- 'hces': hces}
-
-
- file_metric = open(pred_root+'/hce_metric.pkl','wb')
- pkl.dump(hce_metric,file_metric)
- # file_metrics.write(cmn_metrics)
- file_metric.close()
-
- return np.mean(np.array(hces)[:,-1])
-
-def main():
-
- gt_root = "../DIS5K/DIS-VD/gt"
- gt_ske_root = ""
- pred_root = "../Results/isnet(ours)/DIS-VD"
-
- print("The average HCE metric: ", compute_hce(pred_root,gt_root,gt_ske_root))
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/UserXTheUnknown/stablediffusion-infinity/PyPatchMatch/examples/py_example_global_mask.py b/spaces/UserXTheUnknown/stablediffusion-infinity/PyPatchMatch/examples/py_example_global_mask.py
deleted file mode 100644
index 9025cc4230edcce7cbfa545f55600a240994edcf..0000000000000000000000000000000000000000
--- a/spaces/UserXTheUnknown/stablediffusion-infinity/PyPatchMatch/examples/py_example_global_mask.py
+++ /dev/null
@@ -1,27 +0,0 @@
-#! /usr/bin/env python3
-# -*- coding: utf-8 -*-
-# File : test.py
-# Author : Jiayuan Mao
-# Email : maojiayuan@gmail.com
-# Date : 01/09/2020
-#
-# Distributed under terms of the MIT license.
-
-import numpy as np
-from PIL import Image
-
-import sys
-sys.path.insert(0, '../')
-import patch_match
-
-
-if __name__ == '__main__':
- patch_match.set_verbose(True)
- source = Image.open('./images/forest_pruned.bmp')
- source = np.array(source)
- source[:100, :100] = 255
- global_mask = np.zeros_like(source[..., 0])
- global_mask[:100, :100] = 1
- result = patch_match.inpaint(source, global_mask=global_mask, patch_size=3)
- Image.fromarray(result).save('./images/forest_recovered.bmp')
-
diff --git a/spaces/ViralWeb/aifi/Dockerfile b/spaces/ViralWeb/aifi/Dockerfile
deleted file mode 100644
index 481d7cc5e3037930f21b43f555e6849f108005ae..0000000000000000000000000000000000000000
--- a/spaces/ViralWeb/aifi/Dockerfile
+++ /dev/null
@@ -1,126 +0,0 @@
-ARG MODEL_NAME
-ARG MODEL_PARAMS
-ARG MODEL_PROMPT_TEMPLATE
-ARG APP_COLOR
-ARG APP_NAME
-
-
-FROM node:19 as chatui-builder
-ARG MODEL_NAME
-ARG MODEL_PARAMS
-ARG APP_COLOR
-ARG APP_NAME
-ARG MODEL_PROMPT_TEMPLATE
-
-WORKDIR /app
-
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- git gettext && \
- rm -rf /var/lib/apt/lists/*
-
-
-RUN git clone https://github.com/huggingface/chat-ui.git
-
-WORKDIR /app/chat-ui
-
-
-COPY .env.local.template .env.local.template
-
-RUN mkdir defaults
-ADD defaults /defaults
-RUN chmod -R 777 /defaults
-RUN --mount=type=secret,id=MONGODB_URL,mode=0444 \
- MODEL_NAME="${MODEL_NAME:="$(cat /defaults/MODEL_NAME)"}" && export MODEL_NAME \
- && MODEL_PARAMS="${MODEL_PARAMS:="$(cat /defaults/MODEL_PARAMS)"}" && export MODEL_PARAMS \
- && MODEL_PROMPT_TEMPLATE="${MODEL_PROMPT_TEMPLATE:="$(cat /defaults/MODEL_PROMPT_TEMPLATE)"}" && export MODEL_PROMPT_TEMPLATE \
- && APP_COLOR="${APP_COLOR:="$(cat /defaults/APP_COLOR)"}" && export APP_COLOR \
- && APP_NAME="${APP_NAME:="$(cat /defaults/APP_NAME)"}" && export APP_NAME \
- && MONGODB_URL=$(cat /run/secrets/MONGODB_URL > /dev/null | grep '^' || cat /defaults/MONGODB_URL) && export MONGODB_URL && \
- echo "${MONGODB_URL}" && \
- envsubst < ".env.local.template" > ".env.local" \
- && rm .env.local.template
-
-
-
-RUN --mount=type=cache,target=/app/.npm \
- npm set cache /app/.npm && \
- npm ci
-
-RUN npm run build
-
-FROM ghcr.io/huggingface/text-generation-inference:latest
-
-ARG MODEL_NAME
-ARG MODEL_PARAMS
-ARG MODEL_PROMPT_TEMPLATE
-ARG APP_COLOR
-ARG APP_NAME
-
-ENV TZ=Europe/Paris \
- PORT=3000
-
-
-
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- gnupg \
- curl \
- gettext && \
- rm -rf /var/lib/apt/lists/*
-COPY entrypoint.sh.template entrypoint.sh.template
-
-RUN mkdir defaults
-ADD defaults /defaults
-RUN chmod -R 777 /defaults
-
-RUN --mount=type=secret,id=MONGODB_URL,mode=0444 \
- MODEL_NAME="${MODEL_NAME:="$(cat /defaults/MODEL_NAME)"}" && export MODEL_NAME \
- && MODEL_PARAMS="${MODEL_PARAMS:="$(cat /defaults/MODEL_PARAMS)"}" && export MODEL_PARAMS \
- && MODEL_PROMPT_TEMPLATE="${MODEL_PROMPT_TEMPLATE:="$(cat /defaults/MODEL_PROMPT_TEMPLATE)"}" && export MODEL_PROMPT_TEMPLATE \
- && APP_COLOR="${APP_COLOR:="$(cat /defaults/APP_COLOR)"}" && export APP_COLOR \
- && APP_NAME="${APP_NAME:="$(cat /defaults/APP_NAME)"}" && export APP_NAME \
- && MONGODB_URL=$(cat /run/secrets/MONGODB_URL > /dev/null | grep '^' || cat /defaults/MONGODB_URL) && export MONGODB_URL && \
- envsubst < "entrypoint.sh.template" > "entrypoint.sh" \
- && rm entrypoint.sh.template
-
-
-RUN curl -fsSL https://pgp.mongodb.com/server-6.0.asc | \
- gpg -o /usr/share/keyrings/mongodb-server-6.0.gpg \
- --dearmor
-
-RUN echo "deb [ arch=amd64,arm64 signed-by=/usr/share/keyrings/mongodb-server-6.0.gpg ] https://repo.mongodb.org/apt/ubuntu focal/mongodb-org/6.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-6.0.list
-
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- mongodb-org && \
- rm -rf /var/lib/apt/lists/*
-
-RUN mkdir -p /data/db
-RUN chown -R 1000:1000 /data
-
-RUN curl -fsSL https://deb.nodesource.com/setup_19.x | /bin/bash -
-
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- nodejs && \
- rm -rf /var/lib/apt/lists/*
-
-RUN mkdir /app
-RUN chown -R 1000:1000 /app
-
-RUN useradd -m -u 1000 user
-
-# Switch to the "user" user
-USER user
-
-ENV HOME=/home/user \
- PATH=/home/user/.local/bin:$PATH
-
-RUN npm config set prefix /home/user/.local
-RUN npm install -g pm2
-
-COPY --from=chatui-builder --chown=1000 /app/chat-ui/node_modules /app/node_modules
-COPY --from=chatui-builder --chown=1000 /app/chat-ui/package.json /app/package.json
-COPY --from=chatui-builder --chown=1000 /app/chat-ui/build /app/build
-
-ENTRYPOINT ["/bin/bash"]
-CMD ["entrypoint.sh"]
-
-
diff --git a/spaces/Walterchamy/Kiitec_virtual_assistant/app.py b/spaces/Walterchamy/Kiitec_virtual_assistant/app.py
deleted file mode 100644
index 998d18931b864ee2e27cf3746ae83051952984d1..0000000000000000000000000000000000000000
--- a/spaces/Walterchamy/Kiitec_virtual_assistant/app.py
+++ /dev/null
@@ -1,103 +0,0 @@
-import os
-import openai
-import streamlit as st
-openai.api_key = os.getenv("OPENAI_API_KEY")
-
-KIITEC_CONTENT = """
-
-KIITEC is a technical institution registered by NACTE (REG/EOS/027) based in Moshono, Arusha next to Masai Camp.
-
-The institute was founded in 2004 by French engineers and has thence contrived to produce the most competent technicians in the country. The institute is financed and supported by two NGOs: The Foundation for Technical Education (FTE-Swiss) and Action Development Education International (ADEI-French). In 2004, ADEI's partner FTE built the Kilimanjaro International Institute of Telecommunications, Electronics and Computers (KIITEC) introducing state-of-the-art teaching facilities to train technicians in Arusha, Tanzania.
-
-Following construction, ADEI joined FTE in its ambition to make change through technical education and has played a pivotal role in the on-the-ground education programming and training at KIITEC ever since. Today, KIITEC acts as the international training center where educators travel from different corners of Africa to upgrade their skills.
-
-Resting on a 15-acre campus with hostel available inside it, KIITEC offers the most advanced targeted training technologies in the region. The innovative education model developed at KIITEC is based on a hands-on and student-centered approach to learning with full access to modern learning equipment simulating real-world practical experiences. The training center is registered and accredited by the National Council for Technical Education (NACTE) and awards successful graduates with a 3-year National Technical Award Level Six (NTA-6) Diploma.
-
-KIITEC specializes in diploma programs including:
-- Electrical & Computer Engineering
-- Electrical & Industrial Automation
-- Electronics & Telecommunication Engineering
-- Electrical & Renewable Energies
-
-Short course programs offered by KIITEC include:
-- Computer Application
-- IT & System Security
-- Electrical Domestic & Solar PV System Installation
-
-Future training programs or courses in development: Biomedical, Avionics.
-
-ADMISSION REQUIREMENTS of ELECTRONICS AND TELECOMMUNICATION ENGINEERING:
-- Possession of a Certificate of Secondary Education Examinations (CSEE) with a minimum of FOUR passes, of which TWO must be among the following subjects: Mathematics, Physics/Engineering Science, Biology, and Chemistry; excluding religious subjects.
-- OR Possession of a National Vocational Award (NVA) Level III in Electrical, Electronics, Telecommunication, Mechanical, and related fields offered by VETA and a Certificate of Secondary Education Examination (CSEE) with at least two passes.
-
-ADMISSION REQUIREMENTS of ELECTRICAL AND INDUSTRIAL AUTOMATION ENGINEERING:
-- Possession of a Certificate of Secondary Education Examinations (CSEE) with a minimum of FOUR passes, of which TWO must be among the following subjects: Mathematics, Physics/Engineering Science, Biology, and Chemistry; excluding religious subjects.
-- OR Possession of a National Vocational Award (NVA) Level III in Electrical, Electronics, Telecommunication, Mechanical, and related fields offered by VETA and a Certificate of Secondary Education Examination (CSEE) with at least two passes.
-
-ADMISSION REQUIREMENTS of ELECTRICAL AND COMPUTER ENGINEERING PROGRAMS:
-- Possession of a Certificate of Secondary Education Examinations (CSEE) with a minimum of FOUR passes, of which TWO must be among the following subjects: Mathematics, Physics/Engineering Science, Biology, and Chemistry; excluding religious subjects.
-- OR Possession of a National Vocational Award (NVA) Level III in Electrical, Electronics, Telecommunication, Mechanical, and related fields offered by VETA and a Certificate of Secondary Education Examination (CSEE) with at least two passes.
-
-ADMISSION REQUIREMENTS of ELECTRICAL AND RENEWABLE ENERGY ENGINEERING:
-- Possession of a Certificate of Secondary Education Examinations (CSEE) with a minimum of FOUR passes, of which TWO must be among the following subjects: Mathematics, Physics/Engineering Science, Biology, and Chemistry; excluding religious subjects.
-- OR Possession of a National Vocational Award (NVA) Level III in Electrical, Electronics, Telecommunication, Mechanical, and related fields offered by VETA and a Certificate of Secondary Education Examination (CSEE) with at least two passes.
-
-
-APPLICATION FOR EITHER DIPLOMA COURSE OR SHORT COURSE
-- You can apply for Diploma Course through this link https://kiitec.ac.tz/apply-here/
-- You can apply for short Course through this link https://kiitec.ac.tz/apply-here/
-
-Fee structure and Mode of Payment for Diploma Programs:
-- The first semester fee is 695,000 Tsh and can be paid in two installments before the end of the semester.
-- The second semester fee is 625,000 Tsh and can be paid in two installments before the end of the semester.
-- The fees should be paid through the Bank of ABSA, Account number 002-4001687, Account Name KIITEC.
-
-KIITEC students wear uniforms. The uniforms consist of light blue shirts and dark blue sweaters, dark blue skirts, and dark blue trousers.
-
-KIITEC Vision:
-To become a leading provider of quality technical education and training to empower the youth of Tanzania and the Eastern Africa region.
-
-KIITEC Mission:
-- To provide quality hands-on technical training for students in ICTs, Electrical, Renewable Energies, Industrial Automation, and related disciplines.
-- To conduct quality research and consultancy in these fields.
-- To promote the development and usage of modern technology that meets national, regional, and international needs and standards through skills and practical-oriented training.
-
-KIITEC Values:
-- Hard work and excellence
-- Honesty
-- Respect
-- Responsibility
-- Lifelong learning
-- Innovation and creativity.
-
-For contact information:
-- Phone: +255 685 273 530
-- Mobile: +255 757 845 118
-- Email: info@kiitec.ac.tz
-- P.O. Box 3172, Arusha
-"""
-
-def generate_response(user_input):
- response = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "system", "content": "All your answers should be in Swahili or English. Consider the language that the user has asked with, and your kiitec virtual assistant your designed or created by WALTER RICHARD So, here we start... Your virtual assistant of DON BOSCO KIITEC. You will reply to the queries that the user might ask!\n\n" + KIITEC_CONTENT},
- {"role": "assistant", "content": "Hi, how can i assist you today?"},
- {"role": "user", "content": user_input},
- ]
- )
- return response['choices'][0]['message']['content']
-
-def main():
- st.image('logo.png', caption=None, width=None, use_column_width=None, clamp=False, channels="RGB", output_format="auto")
-
- st.markdown("KILIMANJARO INTERNATIONAL INSTITUTE FOR TELECOMMUNICATION, ELECTRONICS &COMPUTERS", unsafe_allow_html=True)
- st.title("KIITEC VIRTUAL ASSISTANT")
- user_input = st.text_input("Enter your question:")
- if st.button("Answer"):
- with st.spinner("Generating Response...."):
- response_text = generate_response(user_input)
- st.write("Response:", response_text)
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/XAI/VisualCorrespondenceHumanStudy/download_utils.py b/spaces/XAI/VisualCorrespondenceHumanStudy/download_utils.py
deleted file mode 100644
index 9ca915ab1ce2770c0e35eb48aeec9fc17bf31d03..0000000000000000000000000000000000000000
--- a/spaces/XAI/VisualCorrespondenceHumanStudy/download_utils.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import json
-import os
-import pickle
-import random
-import tarfile
-import zipfile
-from collections import Counter
-from glob import glob
-
-import gdown
-import matplotlib.pyplot as plt
-import numpy as np
-import pandas as pd
-import seaborn as sns
-import streamlit as st
-from PIL import Image
-
-import SessionState
-
-
-def download_files(
- root_visualization_dir,
- viz_url,
- viz_archivefile,
- demonstration_url,
- demonst_zipfile,
- picklefile_url,
- prediction_root,
- prediction_pickle,
-):
- # Get Visualization
- if not os.path.exists(root_visualization_dir):
- gdown.download(viz_url, viz_archivefile, quiet=False)
- os.makedirs(root_visualization_dir, exist_ok=True)
-
- if viz_archivefile.endswith("tar.gz"):
- tar = tarfile.open(viz_archivefile, "r:gz")
- tar.extractall(path=root_visualization_dir)
- tar.close()
- elif viz_archivefile.endswith("zip"):
- with zipfile.ZipFile(viz_archivefile, "r") as zip_ref:
- zip_ref.extractall(root_visualization_dir)
-
- # Get Demonstrations
- if not os.path.exists(demonst_zipfile):
- gdown.download(demonstration_url, demonst_zipfile, quiet=False)
- # os.makedirs(roo_demonstration_dir, exist_ok=True)
-
- with zipfile.ZipFile(demonst_zipfile, "r") as zip_ref:
- zip_ref.extractall("./")
-
- # Get Predictions
- if not os.path.exists(prediction_pickle):
- os.makedirs(prediction_root, exist_ok=True)
- gdown.download(picklefile_url, prediction_pickle, quiet=False)
diff --git a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/text/data.py b/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/text/data.py
deleted file mode 100644
index 9245caa3dd2ac93c43ee0dc67a98157a2922de53..0000000000000000000000000000000000000000
--- a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/text/data.py
+++ /dev/null
@@ -1,483 +0,0 @@
-"NLP data loading pipeline. Supports csv, folders, and preprocessed data."
-from ..torch_core import *
-from .transform import *
-from ..basic_data import *
-from ..data_block import *
-from ..layers import *
-from ..callback import Callback
-
-__all__ = ['LanguageModelPreLoader', 'SortSampler', 'SortishSampler', 'TextList', 'pad_collate', 'TextDataBunch',
- 'TextLMDataBunch', 'TextClasDataBunch', 'Text', 'open_text', 'TokenizeProcessor', 'NumericalizeProcessor',
- 'OpenFileProcessor', 'LMLabelList', 'LMTextList', 'SPProcessor']
-
-TextMtd = IntEnum('TextMtd', 'DF TOK IDS')
-text_extensions = {'.txt'}
-
-class LanguageModelPreLoader(Callback):
- "Transforms the tokens in `dataset` to a stream of contiguous batches for language modelling."
-
- class CircularIndex():
- "Handles shuffle, direction of indexing, wraps around to head tail in the ragged array as needed"
- def __init__(self, length:int, forward:bool): self.idx, self.forward = np.arange(length), forward
- def __getitem__(self, i):
- return self.idx[ i%len(self.idx) if self.forward else len(self.idx)-1-i%len(self.idx)]
- def __len__(self) -> int: return len(self.idx)
- def shuffle(self): np.random.shuffle(self.idx)
-
- def __init__(self, dataset:LabelList, lengths:Collection[int]=None, bs:int=32, bptt:int=70, backwards:bool=False,
- shuffle:bool=False):
- self.dataset,self.bs,self.bptt,self.shuffle,self.backwards,self.lengths = dataset,bs,bptt,shuffle,backwards,lengths
- self.bs *= num_distrib() or 1
- self.totalToks,self.ite_len,self.idx = int(0),None,None
-
- def __len__(self):
- if self.ite_len is None:
- if self.lengths is None: self.lengths = np.array([len(item) for item in self.dataset.x.items])
- self.totalToks = self.lengths.sum()
- self.ite_len = self.bs*int( math.ceil( self.totalToks/(self.bptt*self.bs) )) if self.item is None else 1
- return self.ite_len
-
- def __getattr__(self,k:str)->Any: return getattr(self.dataset, k)
-
- def allocate_buffers(self):
- "Create the ragged array that will be filled when we ask for items."
- if self.ite_len is None: len(self)
- self.idx = LanguageModelPreLoader.CircularIndex(len(self.dataset.x.items), not self.backwards)
- self.batch = np.zeros((self.bs, self.bptt+1), dtype=np.int64)
- self.batch_x, self.batch_y = self.batch[:,0:self.bptt], self.batch[:,1:self.bptt+1]
- #ro: index of the text we're at inside our datasets for the various batches
- self.ro = np.zeros(self.bs, dtype=np.int64)
- #ri: index of the token we're at inside our current text for the various batches
- self.ri = np.zeros(self.bs, dtype=np.int)
-
- def on_epoch_begin(self, **kwargs):
- if self.idx is None or len(self.idx) != len(self.dataset.x.items): self.allocate_buffers()
- elif self.shuffle: self.idx.shuffle()
- self.idx.forward = not self.backwards
-
- step = self.totalToks / self.bs
- ln_rag, countTokens, i_rag = 0, 0, -1
- for i in range(0,self.bs):
- #Compute the initial values for ro and ri
- while ln_rag + countTokens <= int(step * i):
- countTokens += ln_rag
- i_rag += 1
- ln_rag = self.lengths[self.idx[i_rag]]
- self.ro[i] = i_rag
- self.ri[i] = ( ln_rag - int(step * i - countTokens) ) if self.backwards else int(step * i - countTokens)
-
- #Training dl gets on_epoch_begin called, val_dl, on_epoch_end
- def on_epoch_end(self, **kwargs): self.on_epoch_begin()
-
- def __getitem__(self, k:int):
- j = k % self.bs
- if self.item is not None: return self.dataset[0]
- if self.idx is None: self.on_epoch_begin()
- self.ro[j],self.ri[j] = self.fill_row(not self.backwards, self.dataset.x.items, self.idx, self.batch[j],
- self.ro[j], self.ri[j], overlap=1, lengths=self.lengths)
- return self.batch_x[j], self.batch_y[j]
-
- def fill_row(self, forward, items, idx, row, ro, ri, overlap,lengths):
- "Fill the row with tokens from the ragged array. --OBS-- overlap != 1 has not been implemented"
- ibuf = n = 0
- ro -= 1
- while ibuf < row.size:
- ro += 1
- ix = idx[ro]
- rag = items[ix]
- if forward:
- ri = 0 if ibuf else ri
- n = min(lengths[ix] - ri, row.size - ibuf)
- row[ibuf:ibuf+n] = rag[ri:ri+n]
- else:
- ri = lengths[ix] if ibuf else ri
- n = min(ri, row.size - ibuf)
- row[ibuf:ibuf+n] = rag[ri-n:ri][::-1]
- ibuf += n
- return ro, ri + ((n-overlap) if forward else -(n-overlap))
-
-class SortSampler(Sampler):
- "Go through the text data by order of length."
-
- def __init__(self, data_source:NPArrayList, key:KeyFunc): self.data_source,self.key = data_source,key
- def __len__(self) -> int: return len(self.data_source)
- def __iter__(self):
- return iter(sorted(range_of(self.data_source), key=self.key, reverse=True))
-
-class SortishSampler(Sampler):
- "Go through the text data by order of length with a bit of randomness."
-
- def __init__(self, data_source:NPArrayList, key:KeyFunc, bs:int):
- self.data_source,self.key,self.bs = data_source,key,bs
-
- def __len__(self) -> int: return len(self.data_source)
-
- def __iter__(self):
- idxs = np.random.permutation(len(self.data_source))
- sz = self.bs*50
- ck_idx = [idxs[i:i+sz] for i in range(0, len(idxs), sz)]
- sort_idx = np.concatenate([sorted(s, key=self.key, reverse=True) for s in ck_idx])
- sz = self.bs
- ck_idx = [sort_idx[i:i+sz] for i in range(0, len(sort_idx), sz)]
- max_ck = np.argmax([self.key(ck[0]) for ck in ck_idx]) # find the chunk with the largest key,
- ck_idx[0],ck_idx[max_ck] = ck_idx[max_ck],ck_idx[0] # then make sure it goes first.
- sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([],dtype=np.int)
- sort_idx = np.concatenate((ck_idx[0], sort_idx))
- return iter(sort_idx)
-
-def pad_collate(samples:BatchSamples, pad_idx:int=1, pad_first:bool=True, backwards:bool=False) -> Tuple[LongTensor, LongTensor]:
- "Function that collect samples and adds padding. Flips token order if needed"
- samples = to_data(samples)
- max_len = max([len(s[0]) for s in samples])
- res = torch.zeros(len(samples), max_len).long() + pad_idx
- if backwards: pad_first = not pad_first
- for i,s in enumerate(samples):
- if pad_first: res[i,-len(s[0]):] = LongTensor(s[0])
- else: res[i,:len(s[0]):] = LongTensor(s[0])
- if backwards: res = res.flip(1)
- return res, tensor(np.array([s[1] for s in samples]))
-
-def _get_processor(tokenizer:Tokenizer=None, vocab:Vocab=None, chunksize:int=10000, max_vocab:int=60000,
- min_freq:int=2, mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False):
- return [TokenizeProcessor(tokenizer=tokenizer, chunksize=chunksize,
- mark_fields=mark_fields, include_bos=include_bos, include_eos=include_eos),
- NumericalizeProcessor(vocab=vocab, max_vocab=max_vocab, min_freq=min_freq)]
-
-class TextDataBunch(DataBunch):
- "General class to get a `DataBunch` for NLP. Subclassed by `TextLMDataBunch` and `TextClasDataBunch`."
-
- @classmethod
- def from_ids(cls, path:PathOrStr, vocab:Vocab, train_ids:Collection[Collection[int]], valid_ids:Collection[Collection[int]],
- test_ids:Collection[Collection[int]]=None, train_lbls:Collection[Union[int,float]]=None,
- valid_lbls:Collection[Union[int,float]]=None, classes:Collection[Any]=None,
- processor:PreProcessor=None, **kwargs) -> DataBunch:
- "Create a `TextDataBunch` from ids, labels and a `vocab`. `kwargs` are passed to the dataloader creation."
- src = ItemLists(path, TextList(train_ids, vocab, path=path, processor=[]),
- TextList(valid_ids, vocab, path=path, processor=[]))
- src = src.label_for_lm() if cls==TextLMDataBunch else src.label_from_lists(train_lbls, valid_lbls, classes=classes, processor=[])
- if not is1d(train_lbls): src.train.y.one_hot,src.valid.y.one_hot = True,True
- if test_ids is not None: src.add_test(TextList(test_ids, vocab, path=path), label=train_lbls[0])
- src.valid.x.processor = ifnone(processor, [TokenizeProcessor(), NumericalizeProcessor(vocab=vocab)])
- if classes is not None: src.valid.y.processor = ifnone(processor, [CategoryProcessor(src.valid.y)])
- return src.databunch(**kwargs)
-
- @classmethod
- def load(cls, path:PathOrStr, cache_name:PathOrStr='tmp', processor:PreProcessor=None, **kwargs):
- "Load a `TextDataBunch` from `path/cache_name`. `kwargs` are passed to the dataloader creation."
- warn("""This method is deprecated and only kept to load data serialized in v1.0.43 or earlier.
- Use `load_data` for data saved with v1.0.44 or later.""", DeprecationWarning)
- cache_path = Path(path)/cache_name
- vocab = Vocab(pickle.load(open(cache_path/'itos.pkl','rb')))
- train_ids,train_lbls = np.load(cache_path/f'train_ids.npy'), np.load(cache_path/f'train_lbl.npy')
- valid_ids,valid_lbls = np.load(cache_path/f'valid_ids.npy'), np.load(cache_path/f'valid_lbl.npy')
- test_ids = np.load(cache_path/f'test_ids.npy') if os.path.isfile(cache_path/f'test_ids.npy') else None
- classes = loadtxt_str(cache_path/'classes.txt') if os.path.isfile(cache_path/'classes.txt') else None
- return cls.from_ids(path, vocab, train_ids, valid_ids, test_ids, train_lbls, valid_lbls, classes, processor, **kwargs)
-
- @classmethod#TODO: test
- def from_tokens(cls, path:PathOrStr, trn_tok:Collection[Collection[str]], trn_lbls:Collection[Union[int,float]],
- val_tok:Collection[Collection[str]], val_lbls:Collection[Union[int,float]], vocab:Vocab=None,
- tst_tok:Collection[Collection[str]]=None, classes:Collection[Any]=None, max_vocab:int=60000, min_freq:int=3,
- **kwargs) -> DataBunch:
- "Create a `TextDataBunch` from tokens and labels. `kwargs` are passed to the dataloader creation."
- processor = NumericalizeProcessor(vocab=vocab, max_vocab=max_vocab, min_freq=min_freq)
- src = ItemLists(path, TextList(trn_tok, path=path, processor=processor),
- TextList(val_tok, path=path, processor=processor))
- src = src.label_for_lm() if cls==TextLMDataBunch else src.label_from_lists(trn_lbls, val_lbls, classes=classes)
- if tst_tok is not None: src.add_test(TextList(tst_tok, path=path))
- return src.databunch(**kwargs)
-
- @classmethod
- def from_df(cls, path:PathOrStr, train_df:DataFrame, valid_df:DataFrame, test_df:Optional[DataFrame]=None,
- tokenizer:Tokenizer=None, vocab:Vocab=None, classes:Collection[str]=None, text_cols:IntsOrStrs=1,
- label_cols:IntsOrStrs=0, label_delim:str=None, chunksize:int=10000, max_vocab:int=60000,
- min_freq:int=2, mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False, **kwargs) -> DataBunch:
- "Create a `TextDataBunch` from DataFrames. `kwargs` are passed to the dataloader creation."
- processor = _get_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab,
- min_freq=min_freq, mark_fields=mark_fields,
- include_bos=include_bos, include_eos=include_eos)
- if classes is None and is_listy(label_cols) and len(label_cols) > 1: classes = label_cols
- src = ItemLists(path, TextList.from_df(train_df, path, cols=text_cols, processor=processor),
- TextList.from_df(valid_df, path, cols=text_cols, processor=processor))
- if cls==TextLMDataBunch: src = src.label_for_lm()
- else:
- if label_delim is not None: src = src.label_from_df(cols=label_cols, classes=classes, label_delim=label_delim)
- else: src = src.label_from_df(cols=label_cols, classes=classes)
- if test_df is not None: src.add_test(TextList.from_df(test_df, path, cols=text_cols))
- return src.databunch(**kwargs)
-
- @classmethod
- def from_csv(cls, path:PathOrStr, csv_name, valid_pct:float=0.2, test:Optional[str]=None,
- tokenizer:Tokenizer=None, vocab:Vocab=None, classes:Collection[str]=None, delimiter:str=None, header='infer',
- text_cols:IntsOrStrs=1, label_cols:IntsOrStrs=0, label_delim:str=None,
- chunksize:int=10000, max_vocab:int=60000, min_freq:int=2,
- mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False, **kwargs) -> DataBunch:
- "Create a `TextDataBunch` from texts in csv files. `kwargs` are passed to the dataloader creation."
- df = pd.read_csv(Path(path)/csv_name, header=header, delimiter=delimiter)
- df = df.iloc[np.random.permutation(len(df))]
- cut = int(valid_pct * len(df)) + 1
- train_df, valid_df = df[cut:], df[:cut]
- test_df = None if test is None else pd.read_csv(Path(path)/test, header=header, delimiter=delimiter)
- return cls.from_df(path, train_df, valid_df, test_df, tokenizer=tokenizer, vocab=vocab, classes=classes, text_cols=text_cols,
- label_cols=label_cols, label_delim=label_delim, chunksize=chunksize, max_vocab=max_vocab,
- min_freq=min_freq, mark_fields=mark_fields,
- include_bos=include_bos, include_eos=include_eos, **kwargs)
-
- @classmethod
- def from_folder(cls, path:PathOrStr, train:str='train', valid:str='valid', test:Optional[str]=None,
- classes:Collection[Any]=None, tokenizer:Tokenizer=None, vocab:Vocab=None, chunksize:int=10000, max_vocab:int=60000,
- min_freq:int=2, mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False, **kwargs):
- "Create a `TextDataBunch` from text files in folders."
- path = Path(path).absolute()
- processor = [OpenFileProcessor()] + _get_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab,
- min_freq=min_freq, mark_fields=mark_fields, include_bos=include_bos, include_eos=include_eos)
- src = (TextList.from_folder(path, processor=processor)
- .split_by_folder(train=train, valid=valid))
- src = src.label_for_lm() if cls==TextLMDataBunch else src.label_from_folder(classes=classes)
- if test is not None: src.add_test_folder(path/test)
- return src.databunch(**kwargs)
-
-class TextLMDataBunch(TextDataBunch):
- "Create a `TextDataBunch` suitable for training a language model."
- @classmethod
- def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', no_check:bool=False, bs=64, val_bs:int=None,
- num_workers:int=0, device:torch.device=None, collate_fn:Callable=data_collate,
- dl_tfms:Optional[Collection[Callable]]=None, bptt:int=70, backwards:bool=False, **dl_kwargs) -> DataBunch:
- "Create a `TextDataBunch` in `path` from the `datasets` for language modelling. Passes `**dl_kwargs` on to `DataLoader()`"
- datasets = cls._init_ds(train_ds, valid_ds, test_ds)
- val_bs = ifnone(val_bs, bs)
- datasets = [LanguageModelPreLoader(ds, shuffle=(i==0), bs=(bs if i==0 else val_bs), bptt=bptt, backwards=backwards)
- for i,ds in enumerate(datasets)]
- val_bs = bs
- dls = [DataLoader(d, b, shuffle=False, **dl_kwargs) for d,b in zip(datasets, (bs,val_bs,val_bs,val_bs)) if d is not None]
- return cls(*dls, path=path, device=device, dl_tfms=dl_tfms, collate_fn=collate_fn, no_check=no_check)
-
-class TextClasDataBunch(TextDataBunch):
- "Create a `TextDataBunch` suitable for training an RNN classifier."
- @classmethod
- def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', bs:int=32, val_bs:int=None, pad_idx=1,
- pad_first=True, device:torch.device=None, no_check:bool=False, backwards:bool=False,
- dl_tfms:Optional[Collection[Callable]]=None, **dl_kwargs) -> DataBunch:
- "Function that transform the `datasets` in a `DataBunch` for classification. Passes `**dl_kwargs` on to `DataLoader()`"
- datasets = cls._init_ds(train_ds, valid_ds, test_ds)
- val_bs = ifnone(val_bs, bs)
- collate_fn = partial(pad_collate, pad_idx=pad_idx, pad_first=pad_first, backwards=backwards)
- train_sampler = SortishSampler(datasets[0].x, key=lambda t: len(datasets[0][t][0].data), bs=bs)
- train_dl = DataLoader(datasets[0], batch_size=bs, sampler=train_sampler, drop_last=True, **dl_kwargs)
- dataloaders = [train_dl]
- for ds in datasets[1:]:
- lengths = [len(t) for t in ds.x.items]
- sampler = SortSampler(ds.x, key=lengths.__getitem__)
- dataloaders.append(DataLoader(ds, batch_size=val_bs, sampler=sampler, **dl_kwargs))
- return cls(*dataloaders, path=path, device=device, dl_tfms=dl_tfms, collate_fn=collate_fn, no_check=no_check)
-
-def open_text(fn:PathOrStr, enc='utf-8'):
- "Read the text in `fn`."
- with open(fn,'r', encoding = enc) as f: return ''.join(f.readlines())
-
-class Text(ItemBase):
- "Basic item for text
data in numericalized `ids`."
- def __init__(self, ids, text): self.data,self.text = np.array(ids, dtype=np.int64),text
- def __str__(self): return str(self.text)
-
-class TokenizeProcessor(PreProcessor):
- "`PreProcessor` that tokenizes the texts in `ds`."
- def __init__(self, ds:ItemList=None, tokenizer:Tokenizer=None, chunksize:int=10000,
- mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False):
- self.tokenizer,self.chunksize,self.mark_fields = ifnone(tokenizer, Tokenizer()),chunksize,mark_fields
- self.include_bos, self.include_eos = include_bos, include_eos
-
- def process_one(self, item):
- return self.tokenizer._process_all_1(_join_texts([item], self.mark_fields, self.include_bos, self.include_eos))[0]
-
- def process(self, ds):
- ds.items = _join_texts(ds.items, self.mark_fields, self.include_bos, self.include_eos)
- tokens = []
- for i in progress_bar(range(0,len(ds),self.chunksize), leave=False):
- tokens += self.tokenizer.process_all(ds.items[i:i+self.chunksize])
- ds.items = tokens
-
-class NumericalizeProcessor(PreProcessor):
- "`PreProcessor` that numericalizes the tokens in `ds`."
- def __init__(self, ds:ItemList=None, vocab:Vocab=None, max_vocab:int=60000, min_freq:int=3):
- vocab = ifnone(vocab, ds.vocab if ds is not None else None)
- self.vocab,self.max_vocab,self.min_freq = vocab,max_vocab,min_freq
-
- def process_one(self,item): return np.array(self.vocab.numericalize(item), dtype=np.int64)
- def process(self, ds):
- if self.vocab is None: self.vocab = Vocab.create(ds.items, self.max_vocab, self.min_freq)
- ds.vocab = self.vocab
- super().process(ds)
-
-class OpenFileProcessor(PreProcessor):
- "`PreProcessor` that opens the filenames and read the texts."
- def process(self, ds:Collection): ds.items = array([self.process_one(item) for item in ds.items], dtype=np.object)
- def process_one(self,item): return open_text(item) if isinstance(item, Path) else item
-
-class TextList(ItemList):
- "Basic `ItemList` for text data."
- _bunch = TextClasDataBunch
- _processor = [TokenizeProcessor, NumericalizeProcessor]
- _is_lm = False
-
- def __init__(self, items:Iterator, vocab:Vocab=None, pad_idx:int=1, sep=' ', **kwargs):
- super().__init__(items, **kwargs)
- self.vocab,self.pad_idx,self.sep = vocab,pad_idx,sep
- self.copy_new += ['vocab', 'pad_idx', 'sep']
-
- def get(self, i):
- o = super().get(i)
- return o if self.vocab is None else Text(o, self.vocab.textify(o, self.sep))
-
- def label_for_lm(self, **kwargs):
- "A special labelling method for language models."
- self.__class__ = LMTextList
- kwargs['label_cls'] = LMLabelList
- return self.label_const(0, **kwargs)
-
- def reconstruct(self, t:Tensor):
- idx_min = (t != self.pad_idx).nonzero().min()
- idx_max = (t != self.pad_idx).nonzero().max()
- return Text(t[idx_min:idx_max+1], self.vocab.textify(t[idx_min:idx_max+1]))
-
- @classmethod
- def from_folder(cls, path:PathOrStr='.', extensions:Collection[str]=text_extensions, vocab:Vocab=None,
- processor:PreProcessor=None, **kwargs)->'TextList':
- "Get the list of files in `path` that have a text suffix. `recurse` determines if we search subfolders."
- processor = ifnone(processor, [OpenFileProcessor(), TokenizeProcessor(), NumericalizeProcessor(vocab=vocab)])
- return super().from_folder(path=path, extensions=extensions, processor=processor, **kwargs)
-
- def show_xys(self, xs, ys, max_len:int=70)->None:
- "Show the `xs` (inputs) and `ys` (targets). `max_len` is the maximum number of tokens displayed."
- from IPython.display import display, HTML
- names = ['idx','text'] if self._is_lm else ['text','target']
- items = []
- for i, (x,y) in enumerate(zip(xs,ys)):
- txt_x = ' '.join(x.text.split(' ')[:max_len]) if max_len is not None else x.text
- items.append([i, txt_x] if self._is_lm else [txt_x, y])
- items = np.array(items)
- df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names)
- with pd.option_context('display.max_colwidth', -1):
- display(HTML(df.to_html(index=False)))
-
- def show_xyzs(self, xs, ys, zs, max_len:int=70):
- "Show `xs` (inputs), `ys` (targets) and `zs` (predictions). `max_len` is the maximum number of tokens displayed."
- from IPython.display import display, HTML
- items,names = [],['text','target','prediction']
- for i, (x,y,z) in enumerate(zip(xs,ys,zs)):
- txt_x = ' '.join(x.text.split(' ')[:max_len]) if max_len is not None else x.text
- items.append([txt_x, y, z])
- items = np.array(items)
- df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names)
- with pd.option_context('display.max_colwidth', -1):
- display(HTML(df.to_html(index=False)))
-
-class LMLabelList(EmptyLabelList):
- "Basic `ItemList` for dummy labels."
- def __init__(self, items:Iterator, **kwargs):
- super().__init__(items, **kwargs)
- self.loss_func = CrossEntropyFlat()
-
-class LMTextList(TextList):
- "Special `TextList` for a language model."
- _bunch = TextLMDataBunch
- _is_lm = True
-
-def _join_texts(texts:Collection[str], mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False):
- if not isinstance(texts, np.ndarray): texts = np.array(texts)
- if is1d(texts): texts = texts[:,None]
- df = pd.DataFrame({i:texts[:,i] for i in range(texts.shape[1])})
- bos_tok = f'{BOS} ' if include_bos else ''
- text_col = f'{bos_tok}{FLD} {1} ' + df[0].astype(str) if mark_fields else f'{bos_tok}' + df[0].astype(str)
- for i in range(1,len(df.columns)):
- text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df[i].astype(str)
- if include_eos: text_col = text_col + f' {EOS}'
- return text_col.values
-
-def apply_rules(text, pre_rules=None, post_rules=None):
- "Apply `pre_rules` and `post_rules` to `text`"
- text = text.strip(' ')
- for r in ifnone(pre_rules, defaults.text_pre_rules): text = r(text)
- toks = text.split()
- for r in ifnone(post_rules, defaults.text_post_rules): toks = r(toks)
- return ' '.join(toks)
-
-def get_default_size(texts, max_vocab_sz):
- "Either max_vocab_sz or one quarter of the number of unique words in `texts`"
- cnt = Counter()
- for t in texts:
- cnt.update(t.split())
- if len(cnt)//4 > max_vocab_sz: return max_vocab_sz
- res = len(cnt)//4
- while res%8 != 0: res+=1
- return res
-
-full_char_coverage_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
- "it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
-
-def train_sentencepiece(texts:Collection[str], path:PathOrStr, pre_rules: ListRules=None, post_rules:ListRules=None,
- vocab_sz:int=None, max_vocab_sz:int=30000, model_type:str='unigram', max_sentence_len:int=20480, lang='en',
- char_coverage=None, tmp_dir='tmp'):
- "Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
- from sentencepiece import SentencePieceTrainer
- cache_dir = Path(path)/tmp_dir
- os.makedirs(cache_dir, exist_ok=True)
- if vocab_sz is None: vocab_sz=get_default_size(texts, max_vocab_sz)
- raw_text_path = cache_dir / 'all_text.out'
- with open(raw_text_path, 'w') as f: f.write("\n".join(texts))
- spec_tokens = ['\u2581'+s for s in defaults.text_spec_tok]
- SentencePieceTrainer.Train(" ".join([
- f"--input={raw_text_path} --max_sentence_length={max_sentence_len}",
- f"--character_coverage={ifnone(char_coverage, 0.99999 if lang in full_char_coverage_langs else 0.9998)}",
- f"--unk_id={len(defaults.text_spec_tok)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
- f"--user_defined_symbols={','.join(spec_tokens)}",
- f"--model_prefix={cache_dir/'spm'} --vocab_size={vocab_sz} --model_type={model_type}"]))
- raw_text_path.unlink()
- return cache_dir
-
-class SPProcessor(PreProcessor):
- "`PreProcessor` that tokenizes and numericalizes with `sentencepiece`"
- def __init__(self, ds:ItemList=None, pre_rules: ListRules=None, post_rules:ListRules=None, vocab_sz:int=None,
- max_vocab_sz:int=30000, model_type:str='unigram', max_sentence_len:int=20480, lang='en',
- char_coverage=None, tmp_dir='tmp', mark_fields:bool=False, include_bos:bool=True,
- include_eos:bool=False, sp_model=None, sp_vocab=None, n_cpus:int=None):
- try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
- except ImportError:
- raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
- self.pre_rules,self.post_rules = pre_rules,post_rules
- self.mark_fields,self.include_bos,self.include_eos = mark_fields,include_bos,include_eos
- self.sp_model,self.sp_vocab,self.n_cpus = sp_model,sp_vocab,ifnone(n_cpus,defaults.cpus)
- self.train_func = partial(train_sentencepiece, pre_rules=pre_rules, post_rules=post_rules, vocab_sz=vocab_sz,
- max_vocab_sz=max_vocab_sz, model_type=model_type, max_sentence_len=max_sentence_len, lang=lang,
- char_coverage=char_coverage, tmp_dir=tmp_dir)
-
- def process_one(self, item, join=True):
- if join: text = _join_texts([item], self.mark_fields, self.include_bos, self.include_eos)[0]
- text = apply_rules(text, pre_rules=self.pre_rules, post_rules=self.post_rules)
- return self._encode_batch([text])[0]
-
- def process(self, ds):
- ds.items = _join_texts(ds.items, self.mark_fields, self.include_bos, self.include_eos)
- ds.items = [apply_rules(t, pre_rules=self.pre_rules, post_rules=self.post_rules)
- for t in progress_bar(ds.items, leave=False)]
- if self.sp_model is None or self.sp_vocab is None:
- cache_dir = self.train_func(ds.items, ds.path)
- self.sp_model,self.sp_vocab = cache_dir/'spm.model',cache_dir/'spm.vocab'
- if not getattr(self, 'vocab', False):
- with open(self.sp_vocab, 'r') as f: self.vocab = Vocab([line.split('\t')[0] for line in f.readlines()])
- if self.n_cpus <= 1: ds.items = self._encode_batch(ds.items)
- else:
- with ProcessPoolExecutor(self.n_cpus) as e:
- ds.items = np.array(sum(e.map(self._encode_batch, partition_by_cores(ds.items, self.n_cpus)), []))
- ds.vocab = self.vocab
-
- def _encode_batch(self, texts):
- from sentencepiece import SentencePieceProcessor
- tok = SentencePieceProcessor()
- tok.Load(str(self.sp_model))
- return [np.array(tok.EncodeAsIds(t)) for t in texts]
-
- @classmethod
- def load(cls, path:PathOrStr, tmp_dir:PathOrStr='tmp', name:str='spm'):
- cache_dir = Path(path)/tmp_dir
- return cls(sp_model=cache_dir/f'{name}.model', sp_vocab=cache_dir/f'{name}.vocab')
diff --git a/spaces/XzJosh/Lumi-Bert-VITS2/text/__init__.py b/spaces/XzJosh/Lumi-Bert-VITS2/text/__init__.py
deleted file mode 100644
index 7566bf351ca9b95af9cdc6d729557a9da083800f..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/Lumi-Bert-VITS2/text/__init__.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from text.symbols import *
-
-
-_symbol_to_id = {s: i for i, s in enumerate(symbols)}
-
-def cleaned_text_to_sequence(cleaned_text, tones, language):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- phones = [_symbol_to_id[symbol] for symbol in cleaned_text]
- tone_start = language_tone_start_map[language]
- tones = [i + tone_start for i in tones]
- lang_id = language_id_map[language]
- lang_ids = [lang_id for i in phones]
- return phones, tones, lang_ids
-
-def get_bert(norm_text, word2ph, language):
- from .chinese_bert import get_bert_feature as zh_bert
- from .english_bert_mock import get_bert_feature as en_bert
- lang_bert_func_map = {
- 'ZH': zh_bert,
- 'EN': en_bert
- }
- bert = lang_bert_func_map[language](norm_text, word2ph)
- return bert
diff --git a/spaces/XzJosh/Taffy-Bert-VITS2/monotonic_align/__init__.py b/spaces/XzJosh/Taffy-Bert-VITS2/monotonic_align/__init__.py
deleted file mode 100644
index 75603d26cf2b8d6196f5a68a89f9e49d8e519bc8..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/Taffy-Bert-VITS2/monotonic_align/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from numpy import zeros, int32, float32
-from torch import from_numpy
-
-from .core import maximum_path_jit
-
-def maximum_path(neg_cent, mask):
- device = neg_cent.device
- dtype = neg_cent.dtype
- neg_cent = neg_cent.data.cpu().numpy().astype(float32)
- path = zeros(neg_cent.shape, dtype=int32)
-
- t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32)
- t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32)
- maximum_path_jit(path, neg_cent, t_t_max, t_s_max)
- return from_numpy(path).to(device=device, dtype=dtype)
diff --git a/spaces/XzJosh/Taffy-Bert-VITS2/text/english.py b/spaces/XzJosh/Taffy-Bert-VITS2/text/english.py
deleted file mode 100644
index 781d0a56cef71f66fc67db51d76538be90d3ddd2..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/Taffy-Bert-VITS2/text/english.py
+++ /dev/null
@@ -1,138 +0,0 @@
-import pickle
-import os
-import re
-from g2p_en import G2p
-from string import punctuation
-
-from text import symbols
-
-current_file_path = os.path.dirname(__file__)
-CMU_DICT_PATH = os.path.join(current_file_path, 'cmudict.rep')
-CACHE_PATH = os.path.join(current_file_path, 'cmudict_cache.pickle')
-_g2p = G2p()
-
-arpa = {'AH0', 'S', 'AH1', 'EY2', 'AE2', 'EH0', 'OW2', 'UH0', 'NG', 'B', 'G', 'AY0', 'M', 'AA0', 'F', 'AO0', 'ER2', 'UH1', 'IY1', 'AH2', 'DH', 'IY0', 'EY1', 'IH0', 'K', 'N', 'W', 'IY2', 'T', 'AA1', 'ER1', 'EH2', 'OY0', 'UH2', 'UW1', 'Z', 'AW2', 'AW1', 'V', 'UW2', 'AA2', 'ER', 'AW0', 'UW0', 'R', 'OW1', 'EH1', 'ZH', 'AE0', 'IH2', 'IH', 'Y', 'JH', 'P', 'AY1', 'EY0', 'OY2', 'TH', 'HH', 'D', 'ER0', 'CH', 'AO1', 'AE1', 'AO2', 'OY1', 'AY2', 'IH1', 'OW0', 'L', 'SH'}
-
-
-def post_replace_ph(ph):
- rep_map = {
- ':': ',',
- ';': ',',
- ',': ',',
- '。': '.',
- '!': '!',
- '?': '?',
- '\n': '.',
- "·": ",",
- '、': ",",
- '...': '…',
- 'v': "V"
- }
- if ph in rep_map.keys():
- ph = rep_map[ph]
- if ph in symbols:
- return ph
- if ph not in symbols:
- ph = 'UNK'
- return ph
-
-def read_dict():
- g2p_dict = {}
- start_line = 49
- with open(CMU_DICT_PATH) as f:
- line = f.readline()
- line_index = 1
- while line:
- if line_index >= start_line:
- line = line.strip()
- word_split = line.split(' ')
- word = word_split[0]
-
- syllable_split = word_split[1].split(' - ')
- g2p_dict[word] = []
- for syllable in syllable_split:
- phone_split = syllable.split(' ')
- g2p_dict[word].append(phone_split)
-
- line_index = line_index + 1
- line = f.readline()
-
- return g2p_dict
-
-
-def cache_dict(g2p_dict, file_path):
- with open(file_path, 'wb') as pickle_file:
- pickle.dump(g2p_dict, pickle_file)
-
-
-def get_dict():
- if os.path.exists(CACHE_PATH):
- with open(CACHE_PATH, 'rb') as pickle_file:
- g2p_dict = pickle.load(pickle_file)
- else:
- g2p_dict = read_dict()
- cache_dict(g2p_dict, CACHE_PATH)
-
- return g2p_dict
-
-eng_dict = get_dict()
-
-def refine_ph(phn):
- tone = 0
- if re.search(r'\d$', phn):
- tone = int(phn[-1]) + 1
- phn = phn[:-1]
- return phn.lower(), tone
-
-def refine_syllables(syllables):
- tones = []
- phonemes = []
- for phn_list in syllables:
- for i in range(len(phn_list)):
- phn = phn_list[i]
- phn, tone = refine_ph(phn)
- phonemes.append(phn)
- tones.append(tone)
- return phonemes, tones
-
-
-def text_normalize(text):
- # todo: eng text normalize
- return text
-
-def g2p(text):
-
- phones = []
- tones = []
- words = re.split(r"([,;.\-\?\!\s+])", text)
- for w in words:
- if w.upper() in eng_dict:
- phns, tns = refine_syllables(eng_dict[w.upper()])
- phones += phns
- tones += tns
- else:
- phone_list = list(filter(lambda p: p != " ", _g2p(w)))
- for ph in phone_list:
- if ph in arpa:
- ph, tn = refine_ph(ph)
- phones.append(ph)
- tones.append(tn)
- else:
- phones.append(ph)
- tones.append(0)
- # todo: implement word2ph
- word2ph = [1 for i in phones]
-
- phones = [post_replace_ph(i) for i in phones]
- return phones, tones, word2ph
-
-if __name__ == "__main__":
- # print(get_dict())
- # print(eng_word_to_phoneme("hello"))
- print(g2p("In this paper, we propose 1 DSPGAN, a GAN-based universal vocoder."))
- # all_phones = set()
- # for k, syllables in eng_dict.items():
- # for group in syllables:
- # for ph in group:
- # all_phones.add(ph)
- # print(all_phones)
\ No newline at end of file
diff --git a/spaces/XzJosh/otto-Bert-VITS2/text/english_bert_mock.py b/spaces/XzJosh/otto-Bert-VITS2/text/english_bert_mock.py
deleted file mode 100644
index 3b894ced5b6d619a18d6bdd7d7606ba9e6532050..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/otto-Bert-VITS2/text/english_bert_mock.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import torch
-
-
-def get_bert_feature(norm_text, word2ph):
- return torch.zeros(1024, sum(word2ph))
diff --git a/spaces/Yan233th/so-vits-svc-models/README.md b/spaces/Yan233th/so-vits-svc-models/README.md
deleted file mode 100644
index d4e32ad5a214aa9753a0acf2e5368efc02d0f4b5..0000000000000000000000000000000000000000
--- a/spaces/Yan233th/so-vits-svc-models/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: So Vits Svc Models
-emoji: 📈
-colorFrom: gray
-colorTo: green
-sdk: gradio
-sdk_version: 3.21.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/schedulers/scheduling_pndm_flax.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/schedulers/scheduling_pndm_flax.py
deleted file mode 100644
index 298e62de20d15febcd44b00f87046c431f4e2337..0000000000000000000000000000000000000000
--- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/schedulers/scheduling_pndm_flax.py
+++ /dev/null
@@ -1,531 +0,0 @@
-# Copyright 2022 Zhejiang University Team and The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
-
-import math
-from dataclasses import dataclass
-from typing import Optional, Tuple, Union
-
-import flax
-import jax
-import jax.numpy as jnp
-
-from ..configuration_utils import ConfigMixin, register_to_config
-from .scheduling_utils_flax import (
- _FLAX_COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS,
- FlaxSchedulerMixin,
- FlaxSchedulerOutput,
- broadcast_to_shape_from_left,
-)
-
-
-def betas_for_alpha_bar(num_diffusion_timesteps: int, max_beta=0.999) -> jnp.ndarray:
- """
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
- (1-beta) over time from t = [0,1].
-
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
- to that part of the diffusion process.
-
-
- Args:
- num_diffusion_timesteps (`int`): the number of betas to produce.
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
- prevent singularities.
-
- Returns:
- betas (`jnp.ndarray`): the betas used by the scheduler to step the model outputs
- """
-
- def alpha_bar(time_step):
- return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
-
- betas = []
- for i in range(num_diffusion_timesteps):
- t1 = i / num_diffusion_timesteps
- t2 = (i + 1) / num_diffusion_timesteps
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
- return jnp.array(betas, dtype=jnp.float32)
-
-
-@flax.struct.dataclass
-class PNDMSchedulerState:
- # setable values
- _timesteps: jnp.ndarray
- num_inference_steps: Optional[int] = None
- prk_timesteps: Optional[jnp.ndarray] = None
- plms_timesteps: Optional[jnp.ndarray] = None
- timesteps: Optional[jnp.ndarray] = None
-
- # running values
- cur_model_output: Optional[jnp.ndarray] = None
- counter: int = 0
- cur_sample: Optional[jnp.ndarray] = None
- ets: jnp.ndarray = jnp.array([])
-
- @classmethod
- def create(cls, num_train_timesteps: int):
- return cls(_timesteps=jnp.arange(0, num_train_timesteps)[::-1])
-
-
-@dataclass
-class FlaxPNDMSchedulerOutput(FlaxSchedulerOutput):
- state: PNDMSchedulerState
-
-
-class FlaxPNDMScheduler(FlaxSchedulerMixin, ConfigMixin):
- """
- Pseudo numerical methods for diffusion models (PNDM) proposes using more advanced ODE integration techniques,
- namely Runge-Kutta method and a linear multi-step method.
-
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
- [`~SchedulerMixin.from_pretrained`] functions.
-
- For more details, see the original paper: https://arxiv.org/abs/2202.09778
-
- Args:
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
- beta_start (`float`): the starting `beta` value of inference.
- beta_end (`float`): the final `beta` value.
- beta_schedule (`str`):
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
- trained_betas (`jnp.ndarray`, optional):
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
- skip_prk_steps (`bool`):
- allows the scheduler to skip the Runge-Kutta steps that are defined in the original paper as being required
- before plms steps; defaults to `False`.
- set_alpha_to_one (`bool`, default `False`):
- each diffusion step uses the value of alphas product at that step and at the previous one. For the final
- step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
- otherwise it uses the value of alpha at step 0.
- steps_offset (`int`, default `0`):
- an offset added to the inference steps. You can use a combination of `offset=1` and
- `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
- stable diffusion.
- """
-
- _compatibles = _FLAX_COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy()
-
- @property
- def has_state(self):
- return True
-
- @register_to_config
- def __init__(
- self,
- num_train_timesteps: int = 1000,
- beta_start: float = 0.0001,
- beta_end: float = 0.02,
- beta_schedule: str = "linear",
- trained_betas: Optional[jnp.ndarray] = None,
- skip_prk_steps: bool = False,
- set_alpha_to_one: bool = False,
- steps_offset: int = 0,
- ):
- if trained_betas is not None:
- self.betas = jnp.asarray(trained_betas)
- elif beta_schedule == "linear":
- self.betas = jnp.linspace(beta_start, beta_end, num_train_timesteps, dtype=jnp.float32)
- elif beta_schedule == "scaled_linear":
- # this schedule is very specific to the latent diffusion model.
- self.betas = jnp.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=jnp.float32) ** 2
- elif beta_schedule == "squaredcos_cap_v2":
- # Glide cosine schedule
- self.betas = betas_for_alpha_bar(num_train_timesteps)
- else:
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
-
- self.alphas = 1.0 - self.betas
- self.alphas_cumprod = jnp.cumprod(self.alphas, axis=0)
-
- self.final_alpha_cumprod = jnp.array(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
-
- # For now we only support F-PNDM, i.e. the runge-kutta method
- # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
- # mainly at formula (9), (12), (13) and the Algorithm 2.
- self.pndm_order = 4
-
- # standard deviation of the initial noise distribution
- self.init_noise_sigma = 1.0
-
- def create_state(self):
- return PNDMSchedulerState.create(num_train_timesteps=self.config.num_train_timesteps)
-
- def set_timesteps(self, state: PNDMSchedulerState, num_inference_steps: int, shape: Tuple) -> PNDMSchedulerState:
- """
- Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
-
- Args:
- state (`PNDMSchedulerState`):
- the `FlaxPNDMScheduler` state data class instance.
- num_inference_steps (`int`):
- the number of diffusion steps used when generating samples with a pre-trained model.
- shape (`Tuple`):
- the shape of the samples to be generated.
- """
- offset = self.config.steps_offset
-
- step_ratio = self.config.num_train_timesteps // num_inference_steps
- # creates integer timesteps by multiplying by ratio
- # rounding to avoid issues when num_inference_step is power of 3
- _timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round() + offset
-
- state = state.replace(num_inference_steps=num_inference_steps, _timesteps=_timesteps)
-
- if self.config.skip_prk_steps:
- # for some models like stable diffusion the prk steps can/should be skipped to
- # produce better results. When using PNDM with `self.config.skip_prk_steps` the implementation
- # is based on crowsonkb's PLMS sampler implementation: https://github.com/CompVis/latent-diffusion/pull/51
- state = state.replace(
- prk_timesteps=jnp.array([]),
- plms_timesteps=jnp.concatenate(
- [state._timesteps[:-1], state._timesteps[-2:-1], state._timesteps[-1:]]
- )[::-1],
- )
- else:
- prk_timesteps = jnp.array(state._timesteps[-self.pndm_order :]).repeat(2) + jnp.tile(
- jnp.array([0, self.config.num_train_timesteps // num_inference_steps // 2]), self.pndm_order
- )
-
- state = state.replace(
- prk_timesteps=(prk_timesteps[:-1].repeat(2)[1:-1])[::-1],
- plms_timesteps=state._timesteps[:-3][::-1],
- )
-
- return state.replace(
- timesteps=jnp.concatenate([state.prk_timesteps, state.plms_timesteps]).astype(jnp.int32),
- counter=0,
- # Reserve space for the state variables
- cur_model_output=jnp.zeros(shape),
- cur_sample=jnp.zeros(shape),
- ets=jnp.zeros((4,) + shape),
- )
-
- def scale_model_input(
- self, state: PNDMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None
- ) -> jnp.ndarray:
- """
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
- current timestep.
-
- Args:
- state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
- sample (`jnp.ndarray`): input sample
- timestep (`int`, optional): current timestep
-
- Returns:
- `jnp.ndarray`: scaled input sample
- """
- return sample
-
- def step(
- self,
- state: PNDMSchedulerState,
- model_output: jnp.ndarray,
- timestep: int,
- sample: jnp.ndarray,
- return_dict: bool = True,
- ) -> Union[FlaxPNDMSchedulerOutput, Tuple]:
- """
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
- process from the learned model outputs (most often the predicted noise).
-
- This function calls `step_prk()` or `step_plms()` depending on the internal variable `counter`.
-
- Args:
- state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
- model_output (`jnp.ndarray`): direct output from learned diffusion model.
- timestep (`int`): current discrete timestep in the diffusion chain.
- sample (`jnp.ndarray`):
- current instance of sample being created by diffusion process.
- return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class
-
- Returns:
- [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a
- `tuple`. When returning a tuple, the first element is the sample tensor.
-
- """
- if self.config.skip_prk_steps:
- prev_sample, state = self.step_plms(
- state=state, model_output=model_output, timestep=timestep, sample=sample
- )
- else:
- prev_sample, state = jax.lax.switch(
- jnp.where(state.counter < len(state.prk_timesteps), 0, 1),
- (self.step_prk, self.step_plms),
- # Args to either branch
- state,
- model_output,
- timestep,
- sample,
- )
-
- if not return_dict:
- return (prev_sample, state)
-
- return FlaxPNDMSchedulerOutput(prev_sample=prev_sample, state=state)
-
- def step_prk(
- self,
- state: PNDMSchedulerState,
- model_output: jnp.ndarray,
- timestep: int,
- sample: jnp.ndarray,
- ) -> Union[FlaxPNDMSchedulerOutput, Tuple]:
- """
- Step function propagating the sample with the Runge-Kutta method. RK takes 4 forward passes to approximate the
- solution to the differential equation.
-
- Args:
- state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
- model_output (`jnp.ndarray`): direct output from learned diffusion model.
- timestep (`int`): current discrete timestep in the diffusion chain.
- sample (`jnp.ndarray`):
- current instance of sample being created by diffusion process.
- return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class
-
- Returns:
- [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a
- `tuple`. When returning a tuple, the first element is the sample tensor.
-
- """
- if state.num_inference_steps is None:
- raise ValueError(
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
- )
-
- diff_to_prev = jnp.where(
- state.counter % 2, 0, self.config.num_train_timesteps // state.num_inference_steps // 2
- )
- prev_timestep = timestep - diff_to_prev
- timestep = state.prk_timesteps[state.counter // 4 * 4]
-
- def remainder_0(state: PNDMSchedulerState, model_output: jnp.ndarray, ets_at: int):
- return (
- state.replace(
- cur_model_output=state.cur_model_output + 1 / 6 * model_output,
- ets=state.ets.at[ets_at].set(model_output),
- cur_sample=sample,
- ),
- model_output,
- )
-
- def remainder_1(state: PNDMSchedulerState, model_output: jnp.ndarray, ets_at: int):
- return state.replace(cur_model_output=state.cur_model_output + 1 / 3 * model_output), model_output
-
- def remainder_2(state: PNDMSchedulerState, model_output: jnp.ndarray, ets_at: int):
- return state.replace(cur_model_output=state.cur_model_output + 1 / 3 * model_output), model_output
-
- def remainder_3(state: PNDMSchedulerState, model_output: jnp.ndarray, ets_at: int):
- model_output = state.cur_model_output + 1 / 6 * model_output
- return state.replace(cur_model_output=jnp.zeros_like(state.cur_model_output)), model_output
-
- state, model_output = jax.lax.switch(
- state.counter % 4,
- (remainder_0, remainder_1, remainder_2, remainder_3),
- # Args to either branch
- state,
- model_output,
- state.counter // 4,
- )
-
- cur_sample = state.cur_sample
- prev_sample = self._get_prev_sample(cur_sample, timestep, prev_timestep, model_output)
- state = state.replace(counter=state.counter + 1)
-
- return (prev_sample, state)
-
- def step_plms(
- self,
- state: PNDMSchedulerState,
- model_output: jnp.ndarray,
- timestep: int,
- sample: jnp.ndarray,
- ) -> Union[FlaxPNDMSchedulerOutput, Tuple]:
- """
- Step function propagating the sample with the linear multi-step method. This has one forward pass with multiple
- times to approximate the solution.
-
- Args:
- state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
- model_output (`jnp.ndarray`): direct output from learned diffusion model.
- timestep (`int`): current discrete timestep in the diffusion chain.
- sample (`jnp.ndarray`):
- current instance of sample being created by diffusion process.
- return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class
-
- Returns:
- [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a
- `tuple`. When returning a tuple, the first element is the sample tensor.
-
- """
- if state.num_inference_steps is None:
- raise ValueError(
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
- )
-
- if not self.config.skip_prk_steps and len(state.ets) < 3:
- raise ValueError(
- f"{self.__class__} can only be run AFTER scheduler has been run "
- "in 'prk' mode for at least 12 iterations "
- "See: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_pndm.py "
- "for more information."
- )
-
- prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps
- prev_timestep = jnp.where(prev_timestep > 0, prev_timestep, 0)
-
- # Reference:
- # if state.counter != 1:
- # state.ets.append(model_output)
- # else:
- # prev_timestep = timestep
- # timestep = timestep + self.config.num_train_timesteps // state.num_inference_steps
-
- prev_timestep = jnp.where(state.counter == 1, timestep, prev_timestep)
- timestep = jnp.where(
- state.counter == 1, timestep + self.config.num_train_timesteps // state.num_inference_steps, timestep
- )
-
- # Reference:
- # if len(state.ets) == 1 and state.counter == 0:
- # model_output = model_output
- # state.cur_sample = sample
- # elif len(state.ets) == 1 and state.counter == 1:
- # model_output = (model_output + state.ets[-1]) / 2
- # sample = state.cur_sample
- # state.cur_sample = None
- # elif len(state.ets) == 2:
- # model_output = (3 * state.ets[-1] - state.ets[-2]) / 2
- # elif len(state.ets) == 3:
- # model_output = (23 * state.ets[-1] - 16 * state.ets[-2] + 5 * state.ets[-3]) / 12
- # else:
- # model_output = (1 / 24) * (55 * state.ets[-1] - 59 * state.ets[-2] + 37 * state.ets[-3] - 9 * state.ets[-4])
-
- def counter_0(state: PNDMSchedulerState):
- ets = state.ets.at[0].set(model_output)
- return state.replace(
- ets=ets,
- cur_sample=sample,
- cur_model_output=jnp.array(model_output, dtype=jnp.float32),
- )
-
- def counter_1(state: PNDMSchedulerState):
- return state.replace(
- cur_model_output=(model_output + state.ets[0]) / 2,
- )
-
- def counter_2(state: PNDMSchedulerState):
- ets = state.ets.at[1].set(model_output)
- return state.replace(
- ets=ets,
- cur_model_output=(3 * ets[1] - ets[0]) / 2,
- cur_sample=sample,
- )
-
- def counter_3(state: PNDMSchedulerState):
- ets = state.ets.at[2].set(model_output)
- return state.replace(
- ets=ets,
- cur_model_output=(23 * ets[2] - 16 * ets[1] + 5 * ets[0]) / 12,
- cur_sample=sample,
- )
-
- def counter_other(state: PNDMSchedulerState):
- ets = state.ets.at[3].set(model_output)
- next_model_output = (1 / 24) * (55 * ets[3] - 59 * ets[2] + 37 * ets[1] - 9 * ets[0])
-
- ets = ets.at[0].set(ets[1])
- ets = ets.at[1].set(ets[2])
- ets = ets.at[2].set(ets[3])
-
- return state.replace(
- ets=ets,
- cur_model_output=next_model_output,
- cur_sample=sample,
- )
-
- counter = jnp.clip(state.counter, 0, 4)
- state = jax.lax.switch(
- counter,
- [counter_0, counter_1, counter_2, counter_3, counter_other],
- state,
- )
-
- sample = state.cur_sample
- model_output = state.cur_model_output
- prev_sample = self._get_prev_sample(sample, timestep, prev_timestep, model_output)
- state = state.replace(counter=state.counter + 1)
-
- return (prev_sample, state)
-
- def _get_prev_sample(self, sample, timestep, prev_timestep, model_output):
- # See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf
- # this function computes x_(t−δ) using the formula of (9)
- # Note that x_t needs to be added to both sides of the equation
-
- # Notation ( ->
- # alpha_prod_t -> α_t
- # alpha_prod_t_prev -> α_(t−δ)
- # beta_prod_t -> (1 - α_t)
- # beta_prod_t_prev -> (1 - α_(t−δ))
- # sample -> x_t
- # model_output -> e_θ(x_t, t)
- # prev_sample -> x_(t−δ)
- alpha_prod_t = self.alphas_cumprod[timestep]
- alpha_prod_t_prev = jnp.where(prev_timestep >= 0, self.alphas_cumprod[prev_timestep], self.final_alpha_cumprod)
- beta_prod_t = 1 - alpha_prod_t
- beta_prod_t_prev = 1 - alpha_prod_t_prev
-
- # corresponds to (α_(t−δ) - α_t) divided by
- # denominator of x_t in formula (9) and plus 1
- # Note: (α_(t−δ) - α_t) / (sqrt(α_t) * (sqrt(α_(t−δ)) + sqr(α_t))) =
- # sqrt(α_(t−δ)) / sqrt(α_t))
- sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5)
-
- # corresponds to denominator of e_θ(x_t, t) in formula (9)
- model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + (
- alpha_prod_t * beta_prod_t * alpha_prod_t_prev
- ) ** (0.5)
-
- # full formula (9)
- prev_sample = (
- sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff
- )
-
- return prev_sample
-
- def add_noise(
- self,
- original_samples: jnp.ndarray,
- noise: jnp.ndarray,
- timesteps: jnp.ndarray,
- ) -> jnp.ndarray:
- sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
- sqrt_alpha_prod = broadcast_to_shape_from_left(sqrt_alpha_prod, original_samples.shape)
-
- sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
- sqrt_one_minus_alpha_prod = broadcast_to_shape_from_left(sqrt_one_minus_alpha_prod, original_samples.shape)
-
- noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
- return noisy_samples
-
- def __len__(self):
- return self.config.num_train_timesteps
diff --git a/spaces/YlcldKlns/bing/src/components/chat-scroll-anchor.tsx b/spaces/YlcldKlns/bing/src/components/chat-scroll-anchor.tsx
deleted file mode 100644
index ac809f4486a48e134cb69314c3d0dae5e68d614e..0000000000000000000000000000000000000000
--- a/spaces/YlcldKlns/bing/src/components/chat-scroll-anchor.tsx
+++ /dev/null
@@ -1,29 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import { useInView } from 'react-intersection-observer'
-
-import { useAtBottom } from '@/lib/hooks/use-at-bottom'
-
-interface ChatScrollAnchorProps {
- trackVisibility?: boolean
-}
-
-export function ChatScrollAnchor({ trackVisibility }: ChatScrollAnchorProps) {
- const isAtBottom = useAtBottom()
- const { ref, entry, inView } = useInView({
- trackVisibility,
- delay: 100,
- rootMargin: '0px 0px -150px 0px'
- })
-
- React.useEffect(() => {
- if (isAtBottom && trackVisibility && !inView) {
- entry?.target.scrollIntoView({
- block: 'start'
- })
- }
- }, [inView, entry, isAtBottom, trackVisibility])
-
- return
-}
diff --git a/spaces/YueMafighting/FollowYourPose/app.py b/spaces/YueMafighting/FollowYourPose/app.py
deleted file mode 100644
index 0a0325751941dd7e83529c67eeaf2a025bbc2c70..0000000000000000000000000000000000000000
--- a/spaces/YueMafighting/FollowYourPose/app.py
+++ /dev/null
@@ -1,206 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import annotations
-
-import os
-
-import gradio as gr
-
-from inference_followyourpose import merge_config_then_run
-import sys
-
-sys.path.append('FollowYourPose')
-
-
-# result = subprocess.run(['bash', './data/download.sh'], stdout=subprocess.PIPE)
-import subprocess
-zip_file = './example_video.zip'
-output_dir = './data'
-subprocess.run(['unzip', zip_file, '-d', output_dir])
-
-current_dir = os.getcwd()
-print("path is :", current_dir)
-print("current_dir is :", os.listdir(current_dir))
-print("dir is :", os.listdir(os.path.join(current_dir,'data')))
-print("data/example_video is :", os.listdir(os.path.join(current_dir,'data/example_video')))
-
-HF_TOKEN = os.getenv('HF_TOKEN')
-pipe = merge_config_then_run()
-
-
-
-with gr.Blocks(css='style.css') as demo:
- gr.HTML(
- """
-
-
- 🕺🕺🕺 Follow Your Pose 💃💃💃
Pose-Guided Text-to-Video Generation using Pose-Free Videos
-
-
- Yue Ma*
- Yingqing He* , Xiaodong Cun,
- Xintao Wang ,
- Ying Shan,
- Xiu Li,
- Qifeng Chen
-
-
-
-
- [
-
-
-
- arXiv
- ]
-
-
-
-
- [
-
-
-
- Code
- ]
-
-
-
-
- [
-
-
-
- Homepage
- ]
-
-
-
- TL;DR: We tune 2D stable-diffusion to generate the character videos from pose and text description.
-
-
- """)
-
-
- gr.HTML("""
- In order to run the demo successfully, we recommend the length of video is about 3~5 seconds.
- The temporal crop offset and sampling stride are used to adjust the starting point and interval of video samples.
- Due to the GPU limit of this demo, it currently generates 8-frame videos. For generating longer videos (e.g. 32 frames) shown on our webpage, we recommend trying our GitHub code on your own GPU.
-
- You may duplicate the space and upgrade to GPU in settings for better performance and faster inference without waiting in the queue.
-
-
-
- """)
-
- with gr.Row():
- with gr.Column():
- with gr.Accordion('Input Video', open=True):
- # user_input_video = gr.File(label='Input Source Video')
- user_input_video = gr.Video(label='Input Source Video', source='upload', type='numpy', format="mp4", visible=True).style(height="auto")
- video_type = gr.Dropdown(
- label='The type of input video',
- choices=[
- "Raw Video",
- "Skeleton Video"
- ], value="Raw Video")
- with gr.Accordion('Temporal Crop offset and Sampling Stride', open=False):
- n_sample_frame = gr.Slider(label='Number of Frames',
- minimum=0,
- maximum=32,
- step=1,
- value=8)
- stride = gr.Slider(label='Temporal stride',
- minimum=0,
- maximum=20,
- step=1,
- value=1)
-
- with gr.Accordion('Spatial Crop offset', open=False):
- left_crop = gr.Number(label='Left crop',
- value=0,
- precision=0)
- right_crop = gr.Number(label='Right crop',
- value=0,
- precision=0)
- top_crop = gr.Number(label='Top crop',
- value=0,
- precision=0)
- bottom_crop = gr.Number(label='Bottom crop',
- value=0,
- precision=0)
- offset_list = [
- left_crop,
- right_crop,
- top_crop,
- bottom_crop,
- ]
-
- ImageSequenceDataset_list = [
- n_sample_frame,
- stride
- ] + offset_list
-
-
- with gr.Accordion('Text Prompt', open=True):
-
- target_prompt = gr.Textbox(label='Target Prompt',
- info='The simple background may achieve better results(e.g., "beach", "moon" prompt is better than "street" and "market")',
- max_lines=1,
- placeholder='Example: "Iron man on the beach"',
- value='Iron man on the beach')
-
-
-
-
-
- run_button = gr.Button('Generate')
-
- with gr.Column():
- result = gr.Video(label='Result')
- # result.style(height=512, width=512)
- with gr.Accordion('DDIM Parameters', open=True):
- num_steps = gr.Slider(label='Number of Steps',
- info='larger value has better editing capacity, but takes more time and memory.',
- minimum=0,
- maximum=50,
- step=1,
- value=50)
- guidance_scale = gr.Slider(label='CFG Scale',
- minimum=0,
- maximum=50,
- step=0.1,
- value=12.0)
- with gr.Row():
- from example import style_example
- examples = style_example
-
- gr.Examples(examples=examples,
- inputs = [
- user_input_video,
- target_prompt,
- num_steps,
- guidance_scale,
- video_type,
- *ImageSequenceDataset_list
- ],
- outputs=result,
- fn=pipe.run,
- cache_examples=True,
- )
- inputs = [
- user_input_video,
- target_prompt,
- num_steps,
- guidance_scale,
- video_type,
- *ImageSequenceDataset_list
- ]
- target_prompt.submit(fn=pipe.run, inputs=inputs, outputs=result)
- run_button.click(fn=pipe.run, inputs=inputs, outputs=result)
-
-demo.queue().launch()
-# demo.queue().launch(share=False, server_name='0.0.0.0', server_port=80)
\ No newline at end of file
diff --git a/spaces/Yuliang/ECON/lib/smplx/README.md b/spaces/Yuliang/ECON/lib/smplx/README.md
deleted file mode 100644
index e000e63af4569d8fae38346be370ba815662674d..0000000000000000000000000000000000000000
--- a/spaces/Yuliang/ECON/lib/smplx/README.md
+++ /dev/null
@@ -1,207 +0,0 @@
-## SMPL-X: A new joint 3D model of the human body, face and hands together
-
-[[Paper Page](https://smpl-x.is.tue.mpg.de)] [[Paper](https://ps.is.tuebingen.mpg.de/uploads_file/attachment/attachment/497/SMPL-X.pdf)]
-[[Supp. Mat.](https://ps.is.tuebingen.mpg.de/uploads_file/attachment/attachment/498/SMPL-X-supp.pdf)]
-
-
-
-## Table of Contents
- * [License](#license)
- * [Description](#description)
- * [News](#news)
- * [Installation](#installation)
- * [Downloading the model](#downloading-the-model)
- * [Loading SMPL-X, SMPL+H and SMPL](#loading-smpl-x-smplh-and-smpl)
- * [SMPL and SMPL+H setup](#smpl-and-smplh-setup)
- * [Model loading](https://github.com/vchoutas/smplx#model-loading)
- * [MANO and FLAME correspondences](#mano-and-flame-correspondences)
- * [Example](#example)
- * [Modifying the global pose of the model](#modifying-the-global-pose-of-the-model)
- * [Citation](#citation)
- * [Acknowledgments](#acknowledgments)
- * [Contact](#contact)
-
-## License
-
-Software Copyright License for **non-commercial scientific research purposes**.
-Please read carefully the [terms and conditions](https://github.com/vchoutas/smplx/blob/master/LICENSE) and any accompanying documentation before you download and/or use the SMPL-X/SMPLify-X model, data and software, (the "Model & Software"), including 3D meshes, blend weights, blend shapes, textures, software, scripts, and animations. By downloading and/or using the Model & Software (including downloading, cloning, installing, and any other use of this github repository), you acknowledge that you have read these terms and conditions, understand them, and agree to be bound by them. If you do not agree with these terms and conditions, you must not download and/or use the Model & Software. Any infringement of the terms of this agreement will automatically terminate your rights under this [License](./LICENSE).
-
-## Disclaimer
-
-The original images used for the figures 1 and 2 of the paper can be found in this link.
-The images in the paper are used under license from gettyimages.com.
-We have acquired the right to use them in the publication, but redistribution is not allowed.
-Please follow the instructions on the given link to acquire right of usage.
-Our results are obtained on the 483 × 724 pixels resolution of the original images.
-
-## Description
-
-*SMPL-X* (SMPL eXpressive) is a unified body model with shape parameters trained jointly for the
-face, hands and body. *SMPL-X* uses standard vertex based linear blend skinning with learned corrective blend
-shapes, has N = 10, 475 vertices and K = 54 joints,
-which include joints for the neck, jaw, eyeballs and fingers.
-SMPL-X is defined by a function M(θ, β, ψ), where θ is the pose parameters, β the shape parameters and
-ψ the facial expression parameters.
-
-## News
-
-- 3 November 2020: We release the code to transfer between the models in the
- SMPL family. For more details on the code, go to this [readme
- file](./transfer_model/README.md). A detailed explanation on how the mappings
- were extracted can be found [here](./transfer_model/docs/transfer.md).
-- 23 September 2020: A UV map is now available for SMPL-X, please check the
- Downloads section of the website.
-- 20 August 2020: The full shape and expression space of SMPL-X are now available.
-
-## Installation
-
-To install the model please follow the next steps in the specified order:
-1. To install from PyPi simply run:
- ```Shell
- pip install smplx[all]
- ```
-2. Clone this repository and install it using the *setup.py* script:
-```Shell
-git clone https://github.com/vchoutas/smplx
-python setup.py install
-```
-
-## Downloading the model
-
-To download the *SMPL-X* model go to [this project website](https://smpl-x.is.tue.mpg.de) and register to get access to the downloads section.
-
-To download the *SMPL+H* model go to [this project website](http://mano.is.tue.mpg.de) and register to get access to the downloads section.
-
-To download the *SMPL* model go to [this](http://smpl.is.tue.mpg.de) (male and female models) and [this](http://smplify.is.tue.mpg.de) (gender neutral model) project website and register to get access to the downloads section.
-
-## Loading SMPL-X, SMPL+H and SMPL
-
-### SMPL and SMPL+H setup
-
-The loader gives the option to use any of the SMPL-X, SMPL+H, SMPL, and MANO models. Depending on the model you want to use, please follow the respective download instructions. To switch between MANO, SMPL, SMPL+H and SMPL-X just change the *model_path* or *model_type* parameters. For more details please check the docs of the model classes.
-Before using SMPL and SMPL+H you should follow the instructions in [tools/README.md](./tools/README.md) to remove the
-Chumpy objects from both model pkls, as well as merge the MANO parameters with SMPL+H.
-
-### Model loading
-
-You can either use the [create](https://github.com/vchoutas/smplx/blob/c63c02b478c5c6f696491ed9167e3af6b08d89b1/smplx/body_models.py#L54)
-function from [body_models](./smplx/body_models.py) or directly call the constructor for the
-[SMPL](https://github.com/vchoutas/smplx/blob/c63c02b478c5c6f696491ed9167e3af6b08d89b1/smplx/body_models.py#L106),
-[SMPL+H](https://github.com/vchoutas/smplx/blob/c63c02b478c5c6f696491ed9167e3af6b08d89b1/smplx/body_models.py#L395) and
-[SMPL-X](https://github.com/vchoutas/smplx/blob/c63c02b478c5c6f696491ed9167e3af6b08d89b1/smplx/body_models.py#L628) model. The path to the model can either be the path to the file with the parameters or a directory with the following structure:
-```bash
-models
-├── smpl
-│ ├── SMPL_FEMALE.pkl
-│ └── SMPL_MALE.pkl
-│ └── SMPL_NEUTRAL.pkl
-├── smplh
-│ ├── SMPLH_FEMALE.pkl
-│ └── SMPLH_MALE.pkl
-├── mano
-| ├── MANO_RIGHT.pkl
-| └── MANO_LEFT.pkl
-└── smplx
- ├── SMPLX_FEMALE.npz
- ├── SMPLX_FEMALE.pkl
- ├── SMPLX_MALE.npz
- ├── SMPLX_MALE.pkl
- ├── SMPLX_NEUTRAL.npz
- └── SMPLX_NEUTRAL.pkl
-```
-
-
-## MANO and FLAME correspondences
-
-The vertex correspondences between SMPL-X and MANO, FLAME can be downloaded
-from [the project website](https://smpl-x.is.tue.mpg.de). If you have extracted
-the correspondence data in the folder *correspondences*, then use the following
-scripts to visualize them:
-
-1. To view MANO correspondences run the following command:
-
-```
-python examples/vis_mano_vertices.py --model-folder $SMPLX_FOLDER --corr-fname correspondences/MANO_SMPLX_vertex_ids.pkl
-```
-
-2. To view FLAME correspondences run the following command:
-
-```
-python examples/vis_flame_vertices.py --model-folder $SMPLX_FOLDER --corr-fname correspondences/SMPL-X__FLAME_vertex_ids.npy
-```
-
-## Example
-
-After installing the *smplx* package and downloading the model parameters you should be able to run the *demo.py*
-script to visualize the results. For this step you have to install the [pyrender](https://pyrender.readthedocs.io/en/latest/index.html) and [trimesh](https://trimsh.org/) packages.
-
-`python examples/demo.py --model-folder $SMPLX_FOLDER --plot-joints=True --gender="neutral"`
-
-
-
-## Modifying the global pose of the model
-
-If you want to modify the global pose of the model, i.e. the root rotation and
-translation, to a new coordinate system for example, you need to take into
-account that the model rotation uses the pelvis as the center of rotation. A
-more detailed description can be found in the following
-[link](https://www.dropbox.com/scl/fi/zkatuv5shs8d4tlwr8ecc/Change-parameters-to-new-coordinate-system.paper?dl=0&rlkey=lotq1sh6wzkmyttisc05h0in0).
-If something is not clear, please let me know so that I can update the
-description.
-
-## Citation
-
-Depending on which model is loaded for your project, i.e. SMPL-X or SMPL+H or SMPL, please cite the most relevant work below, listed in the same order:
-
-```
-@inproceedings{SMPL-X:2019,
- title = {Expressive Body Capture: 3D Hands, Face, and Body from a Single Image},
- author = {Pavlakos, Georgios and Choutas, Vasileios and Ghorbani, Nima and Bolkart, Timo and Osman, Ahmed A. A. and Tzionas, Dimitrios and Black, Michael J.},
- booktitle = {Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)},
- year = {2019}
-}
-```
-
-```
-@article{MANO:SIGGRAPHASIA:2017,
- title = {Embodied Hands: Modeling and Capturing Hands and Bodies Together},
- author = {Romero, Javier and Tzionas, Dimitrios and Black, Michael J.},
- journal = {ACM Transactions on Graphics, (Proc. SIGGRAPH Asia)},
- volume = {36},
- number = {6},
- series = {245:1--245:17},
- month = nov,
- year = {2017},
- month_numeric = {11}
- }
-```
-
-```
-@article{SMPL:2015,
- author = {Loper, Matthew and Mahmood, Naureen and Romero, Javier and Pons-Moll, Gerard and Black, Michael J.},
- title = {{SMPL}: A Skinned Multi-Person Linear Model},
- journal = {ACM Transactions on Graphics, (Proc. SIGGRAPH Asia)},
- month = oct,
- number = {6},
- pages = {248:1--248:16},
- publisher = {ACM},
- volume = {34},
- year = {2015}
-}
-```
-
-This repository was originally developed for SMPL-X / SMPLify-X (CVPR 2019), you might be interested in having a look: [https://smpl-x.is.tue.mpg.de](https://smpl-x.is.tue.mpg.de).
-
-## Acknowledgments
-
-### Facial Contour
-
-Special thanks to [Soubhik Sanyal](https://github.com/soubhiksanyal) for sharing the Tensorflow code used for the facial
-landmarks.
-
-## Contact
-The code of this repository was implemented by [Vassilis Choutas](vassilis.choutas@tuebingen.mpg.de).
-
-For questions, please contact [smplx@tue.mpg.de](smplx@tue.mpg.de).
-
-For commercial licensing (and all related questions for business applications), please contact [ps-licensing@tue.mpg.de](ps-licensing@tue.mpg.de).
diff --git a/spaces/aadnk/faster-whisper-webui/src/hooks/progressListener.py b/spaces/aadnk/faster-whisper-webui/src/hooks/progressListener.py
deleted file mode 100644
index a7852a24e237ae864bbce5f37674e1f7c817a1b3..0000000000000000000000000000000000000000
--- a/spaces/aadnk/faster-whisper-webui/src/hooks/progressListener.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from typing import Union
-
-class ProgressListener:
- def on_progress(self, current: Union[int, float], total: Union[int, float]):
- self.total = total
-
- def on_finished(self):
- pass
\ No newline at end of file
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/bricks/wrappers.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/bricks/wrappers.py
deleted file mode 100644
index 8aebf67bf52355a513f21756ee74fe510902d075..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/bricks/wrappers.py
+++ /dev/null
@@ -1,180 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-r"""Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/wrappers.py # noqa: E501
-
-Wrap some nn modules to support empty tensor input. Currently, these wrappers
-are mainly used in mask heads like fcn_mask_head and maskiou_heads since mask
-heads are trained on only positive RoIs.
-"""
-import math
-
-import torch
-import torch.nn as nn
-from torch.nn.modules.utils import _pair, _triple
-
-from .registry import CONV_LAYERS, UPSAMPLE_LAYERS
-
-if torch.__version__ == 'parrots':
- TORCH_VERSION = torch.__version__
-else:
- # torch.__version__ could be 1.3.1+cu92, we only need the first two
- # for comparison
- TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])
-
-
-def obsolete_torch_version(torch_version, version_threshold):
- return torch_version == 'parrots' or torch_version <= version_threshold
-
-
-class NewEmptyTensorOp(torch.autograd.Function):
-
- @staticmethod
- def forward(ctx, x, new_shape):
- ctx.shape = x.shape
- return x.new_empty(new_shape)
-
- @staticmethod
- def backward(ctx, grad):
- shape = ctx.shape
- return NewEmptyTensorOp.apply(grad, shape), None
-
-
-@CONV_LAYERS.register_module('Conv', force=True)
-class Conv2d(nn.Conv2d):
-
- def forward(self, x):
- if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
- out_shape = [x.shape[0], self.out_channels]
- for i, k, p, s, d in zip(x.shape[-2:], self.kernel_size,
- self.padding, self.stride, self.dilation):
- o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
- out_shape.append(o)
- empty = NewEmptyTensorOp.apply(x, out_shape)
- if self.training:
- # produce dummy gradient to avoid DDP warning.
- dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
- return empty + dummy
- else:
- return empty
-
- return super().forward(x)
-
-
-@CONV_LAYERS.register_module('Conv3d', force=True)
-class Conv3d(nn.Conv3d):
-
- def forward(self, x):
- if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
- out_shape = [x.shape[0], self.out_channels]
- for i, k, p, s, d in zip(x.shape[-3:], self.kernel_size,
- self.padding, self.stride, self.dilation):
- o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
- out_shape.append(o)
- empty = NewEmptyTensorOp.apply(x, out_shape)
- if self.training:
- # produce dummy gradient to avoid DDP warning.
- dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
- return empty + dummy
- else:
- return empty
-
- return super().forward(x)
-
-
-@CONV_LAYERS.register_module()
-@CONV_LAYERS.register_module('deconv')
-@UPSAMPLE_LAYERS.register_module('deconv', force=True)
-class ConvTranspose2d(nn.ConvTranspose2d):
-
- def forward(self, x):
- if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
- out_shape = [x.shape[0], self.out_channels]
- for i, k, p, s, d, op in zip(x.shape[-2:], self.kernel_size,
- self.padding, self.stride,
- self.dilation, self.output_padding):
- out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
- empty = NewEmptyTensorOp.apply(x, out_shape)
- if self.training:
- # produce dummy gradient to avoid DDP warning.
- dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
- return empty + dummy
- else:
- return empty
-
- return super().forward(x)
-
-
-@CONV_LAYERS.register_module()
-@CONV_LAYERS.register_module('deconv3d')
-@UPSAMPLE_LAYERS.register_module('deconv3d', force=True)
-class ConvTranspose3d(nn.ConvTranspose3d):
-
- def forward(self, x):
- if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
- out_shape = [x.shape[0], self.out_channels]
- for i, k, p, s, d, op in zip(x.shape[-3:], self.kernel_size,
- self.padding, self.stride,
- self.dilation, self.output_padding):
- out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
- empty = NewEmptyTensorOp.apply(x, out_shape)
- if self.training:
- # produce dummy gradient to avoid DDP warning.
- dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
- return empty + dummy
- else:
- return empty
-
- return super().forward(x)
-
-
-class MaxPool2d(nn.MaxPool2d):
-
- def forward(self, x):
- # PyTorch 1.9 does not support empty tensor inference yet
- if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
- out_shape = list(x.shape[:2])
- for i, k, p, s, d in zip(x.shape[-2:], _pair(self.kernel_size),
- _pair(self.padding), _pair(self.stride),
- _pair(self.dilation)):
- o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1
- o = math.ceil(o) if self.ceil_mode else math.floor(o)
- out_shape.append(o)
- empty = NewEmptyTensorOp.apply(x, out_shape)
- return empty
-
- return super().forward(x)
-
-
-class MaxPool3d(nn.MaxPool3d):
-
- def forward(self, x):
- # PyTorch 1.9 does not support empty tensor inference yet
- if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
- out_shape = list(x.shape[:2])
- for i, k, p, s, d in zip(x.shape[-3:], _triple(self.kernel_size),
- _triple(self.padding),
- _triple(self.stride),
- _triple(self.dilation)):
- o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1
- o = math.ceil(o) if self.ceil_mode else math.floor(o)
- out_shape.append(o)
- empty = NewEmptyTensorOp.apply(x, out_shape)
- return empty
-
- return super().forward(x)
-
-
-class Linear(torch.nn.Linear):
-
- def forward(self, x):
- # empty tensor forward of Linear layer is supported in Pytorch 1.6
- if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 5)):
- out_shape = [x.shape[0], self.out_features]
- empty = NewEmptyTensorOp.apply(x, out_shape)
- if self.training:
- # produce dummy gradient to avoid DDP warning.
- dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
- return empty + dummy
- else:
- return empty
-
- return super().forward(x)
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/config.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/config.py
deleted file mode 100644
index 17149353aefac6d737c67bb2f35a3a6cd2147b0a..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/config.py
+++ /dev/null
@@ -1,688 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import ast
-import copy
-import os
-import os.path as osp
-import platform
-import shutil
-import sys
-import tempfile
-import uuid
-import warnings
-from argparse import Action, ArgumentParser
-from collections import abc
-from importlib import import_module
-
-from addict import Dict
-from yapf.yapflib.yapf_api import FormatCode
-
-from .misc import import_modules_from_strings
-from .path import check_file_exist
-
-if platform.system() == 'Windows':
- import regex as re
-else:
- import re
-
-BASE_KEY = '_base_'
-DELETE_KEY = '_delete_'
-DEPRECATION_KEY = '_deprecation_'
-RESERVED_KEYS = ['filename', 'text', 'pretty_text']
-
-
-class ConfigDict(Dict):
-
- def __missing__(self, name):
- raise KeyError(name)
-
- def __getattr__(self, name):
- try:
- value = super(ConfigDict, self).__getattr__(name)
- except KeyError:
- ex = AttributeError(f"'{self.__class__.__name__}' object has no "
- f"attribute '{name}'")
- except Exception as e:
- ex = e
- else:
- return value
- raise ex
-
-
-def add_args(parser, cfg, prefix=''):
- for k, v in cfg.items():
- if isinstance(v, str):
- parser.add_argument('--' + prefix + k)
- elif isinstance(v, int):
- parser.add_argument('--' + prefix + k, type=int)
- elif isinstance(v, float):
- parser.add_argument('--' + prefix + k, type=float)
- elif isinstance(v, bool):
- parser.add_argument('--' + prefix + k, action='store_true')
- elif isinstance(v, dict):
- add_args(parser, v, prefix + k + '.')
- elif isinstance(v, abc.Iterable):
- parser.add_argument('--' + prefix + k, type=type(v[0]), nargs='+')
- else:
- print(f'cannot parse key {prefix + k} of type {type(v)}')
- return parser
-
-
-class Config:
- """A facility for config and config files.
-
- It supports common file formats as configs: python/json/yaml. The interface
- is the same as a dict object and also allows access config values as
- attributes.
-
- Example:
- >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))
- >>> cfg.a
- 1
- >>> cfg.b
- {'b1': [0, 1]}
- >>> cfg.b.b1
- [0, 1]
- >>> cfg = Config.fromfile('tests/data/config/a.py')
- >>> cfg.filename
- "/home/kchen/projects/mmcv/tests/data/config/a.py"
- >>> cfg.item4
- 'test'
- >>> cfg
- "Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: "
- "{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}"
- """
-
- @staticmethod
- def _validate_py_syntax(filename):
- with open(filename, 'r', encoding='utf-8') as f:
- # Setting encoding explicitly to resolve coding issue on windows
- content = f.read()
- try:
- ast.parse(content)
- except SyntaxError as e:
- raise SyntaxError('There are syntax errors in config '
- f'file {filename}: {e}')
-
- @staticmethod
- def _substitute_predefined_vars(filename, temp_config_name):
- file_dirname = osp.dirname(filename)
- file_basename = osp.basename(filename)
- file_basename_no_extension = osp.splitext(file_basename)[0]
- file_extname = osp.splitext(filename)[1]
- support_templates = dict(
- fileDirname=file_dirname,
- fileBasename=file_basename,
- fileBasenameNoExtension=file_basename_no_extension,
- fileExtname=file_extname)
- with open(filename, 'r', encoding='utf-8') as f:
- # Setting encoding explicitly to resolve coding issue on windows
- config_file = f.read()
- for key, value in support_templates.items():
- regexp = r'\{\{\s*' + str(key) + r'\s*\}\}'
- value = value.replace('\\', '/')
- config_file = re.sub(regexp, value, config_file)
- with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file:
- tmp_config_file.write(config_file)
-
- @staticmethod
- def _pre_substitute_base_vars(filename, temp_config_name):
- """Substitute base variable placehoders to string, so that parsing
- would work."""
- with open(filename, 'r', encoding='utf-8') as f:
- # Setting encoding explicitly to resolve coding issue on windows
- config_file = f.read()
- base_var_dict = {}
- regexp = r'\{\{\s*' + BASE_KEY + r'\.([\w\.]+)\s*\}\}'
- base_vars = set(re.findall(regexp, config_file))
- for base_var in base_vars:
- randstr = f'_{base_var}_{uuid.uuid4().hex.lower()[:6]}'
- base_var_dict[randstr] = base_var
- regexp = r'\{\{\s*' + BASE_KEY + r'\.' + base_var + r'\s*\}\}'
- config_file = re.sub(regexp, f'"{randstr}"', config_file)
- with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file:
- tmp_config_file.write(config_file)
- return base_var_dict
-
- @staticmethod
- def _substitute_base_vars(cfg, base_var_dict, base_cfg):
- """Substitute variable strings to their actual values."""
- cfg = copy.deepcopy(cfg)
-
- if isinstance(cfg, dict):
- for k, v in cfg.items():
- if isinstance(v, str) and v in base_var_dict:
- new_v = base_cfg
- for new_k in base_var_dict[v].split('.'):
- new_v = new_v[new_k]
- cfg[k] = new_v
- elif isinstance(v, (list, tuple, dict)):
- cfg[k] = Config._substitute_base_vars(
- v, base_var_dict, base_cfg)
- elif isinstance(cfg, tuple):
- cfg = tuple(
- Config._substitute_base_vars(c, base_var_dict, base_cfg)
- for c in cfg)
- elif isinstance(cfg, list):
- cfg = [
- Config._substitute_base_vars(c, base_var_dict, base_cfg)
- for c in cfg
- ]
- elif isinstance(cfg, str) and cfg in base_var_dict:
- new_v = base_cfg
- for new_k in base_var_dict[cfg].split('.'):
- new_v = new_v[new_k]
- cfg = new_v
-
- return cfg
-
- @staticmethod
- def _file2dict(filename, use_predefined_variables=True):
- filename = osp.abspath(osp.expanduser(filename))
- check_file_exist(filename)
- fileExtname = osp.splitext(filename)[1]
- if fileExtname not in ['.py', '.json', '.yaml', '.yml']:
- raise IOError('Only py/yml/yaml/json type are supported now!')
-
- with tempfile.TemporaryDirectory() as temp_config_dir:
- temp_config_file = tempfile.NamedTemporaryFile(
- dir=temp_config_dir, suffix=fileExtname)
- if platform.system() == 'Windows':
- temp_config_file.close()
- temp_config_name = osp.basename(temp_config_file.name)
- # Substitute predefined variables
- if use_predefined_variables:
- Config._substitute_predefined_vars(filename,
- temp_config_file.name)
- else:
- shutil.copyfile(filename, temp_config_file.name)
- # Substitute base variables from placeholders to strings
- base_var_dict = Config._pre_substitute_base_vars(
- temp_config_file.name, temp_config_file.name)
-
- if filename.endswith('.py'):
- temp_module_name = osp.splitext(temp_config_name)[0]
- sys.path.insert(0, temp_config_dir)
- Config._validate_py_syntax(filename)
- mod = import_module(temp_module_name)
- sys.path.pop(0)
- cfg_dict = {
- name: value
- for name, value in mod.__dict__.items()
- if not name.startswith('__')
- }
- # delete imported module
- del sys.modules[temp_module_name]
- elif filename.endswith(('.yml', '.yaml', '.json')):
- import annotator.uniformer.mmcv as mmcv
- cfg_dict = mmcv.load(temp_config_file.name)
- # close temp file
- temp_config_file.close()
-
- # check deprecation information
- if DEPRECATION_KEY in cfg_dict:
- deprecation_info = cfg_dict.pop(DEPRECATION_KEY)
- warning_msg = f'The config file {filename} will be deprecated ' \
- 'in the future.'
- if 'expected' in deprecation_info:
- warning_msg += f' Please use {deprecation_info["expected"]} ' \
- 'instead.'
- if 'reference' in deprecation_info:
- warning_msg += ' More information can be found at ' \
- f'{deprecation_info["reference"]}'
- warnings.warn(warning_msg)
-
- cfg_text = filename + '\n'
- with open(filename, 'r', encoding='utf-8') as f:
- # Setting encoding explicitly to resolve coding issue on windows
- cfg_text += f.read()
-
- if BASE_KEY in cfg_dict:
- cfg_dir = osp.dirname(filename)
- base_filename = cfg_dict.pop(BASE_KEY)
- base_filename = base_filename if isinstance(
- base_filename, list) else [base_filename]
-
- cfg_dict_list = list()
- cfg_text_list = list()
- for f in base_filename:
- _cfg_dict, _cfg_text = Config._file2dict(osp.join(cfg_dir, f))
- cfg_dict_list.append(_cfg_dict)
- cfg_text_list.append(_cfg_text)
-
- base_cfg_dict = dict()
- for c in cfg_dict_list:
- duplicate_keys = base_cfg_dict.keys() & c.keys()
- if len(duplicate_keys) > 0:
- raise KeyError('Duplicate key is not allowed among bases. '
- f'Duplicate keys: {duplicate_keys}')
- base_cfg_dict.update(c)
-
- # Substitute base variables from strings to their actual values
- cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict,
- base_cfg_dict)
-
- base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict)
- cfg_dict = base_cfg_dict
-
- # merge cfg_text
- cfg_text_list.append(cfg_text)
- cfg_text = '\n'.join(cfg_text_list)
-
- return cfg_dict, cfg_text
-
- @staticmethod
- def _merge_a_into_b(a, b, allow_list_keys=False):
- """merge dict ``a`` into dict ``b`` (non-inplace).
-
- Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid
- in-place modifications.
-
- Args:
- a (dict): The source dict to be merged into ``b``.
- b (dict): The origin dict to be fetch keys from ``a``.
- allow_list_keys (bool): If True, int string keys (e.g. '0', '1')
- are allowed in source ``a`` and will replace the element of the
- corresponding index in b if b is a list. Default: False.
-
- Returns:
- dict: The modified dict of ``b`` using ``a``.
-
- Examples:
- # Normally merge a into b.
- >>> Config._merge_a_into_b(
- ... dict(obj=dict(a=2)), dict(obj=dict(a=1)))
- {'obj': {'a': 2}}
-
- # Delete b first and merge a into b.
- >>> Config._merge_a_into_b(
- ... dict(obj=dict(_delete_=True, a=2)), dict(obj=dict(a=1)))
- {'obj': {'a': 2}}
-
- # b is a list
- >>> Config._merge_a_into_b(
- ... {'0': dict(a=2)}, [dict(a=1), dict(b=2)], True)
- [{'a': 2}, {'b': 2}]
- """
- b = b.copy()
- for k, v in a.items():
- if allow_list_keys and k.isdigit() and isinstance(b, list):
- k = int(k)
- if len(b) <= k:
- raise KeyError(f'Index {k} exceeds the length of list {b}')
- b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys)
- elif isinstance(v,
- dict) and k in b and not v.pop(DELETE_KEY, False):
- allowed_types = (dict, list) if allow_list_keys else dict
- if not isinstance(b[k], allowed_types):
- raise TypeError(
- f'{k}={v} in child config cannot inherit from base '
- f'because {k} is a dict in the child config but is of '
- f'type {type(b[k])} in base config. You may set '
- f'`{DELETE_KEY}=True` to ignore the base config')
- b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys)
- else:
- b[k] = v
- return b
-
- @staticmethod
- def fromfile(filename,
- use_predefined_variables=True,
- import_custom_modules=True):
- cfg_dict, cfg_text = Config._file2dict(filename,
- use_predefined_variables)
- if import_custom_modules and cfg_dict.get('custom_imports', None):
- import_modules_from_strings(**cfg_dict['custom_imports'])
- return Config(cfg_dict, cfg_text=cfg_text, filename=filename)
-
- @staticmethod
- def fromstring(cfg_str, file_format):
- """Generate config from config str.
-
- Args:
- cfg_str (str): Config str.
- file_format (str): Config file format corresponding to the
- config str. Only py/yml/yaml/json type are supported now!
-
- Returns:
- obj:`Config`: Config obj.
- """
- if file_format not in ['.py', '.json', '.yaml', '.yml']:
- raise IOError('Only py/yml/yaml/json type are supported now!')
- if file_format != '.py' and 'dict(' in cfg_str:
- # check if users specify a wrong suffix for python
- warnings.warn(
- 'Please check "file_format", the file format may be .py')
- with tempfile.NamedTemporaryFile(
- 'w', encoding='utf-8', suffix=file_format,
- delete=False) as temp_file:
- temp_file.write(cfg_str)
- # on windows, previous implementation cause error
- # see PR 1077 for details
- cfg = Config.fromfile(temp_file.name)
- os.remove(temp_file.name)
- return cfg
-
- @staticmethod
- def auto_argparser(description=None):
- """Generate argparser from config file automatically (experimental)"""
- partial_parser = ArgumentParser(description=description)
- partial_parser.add_argument('config', help='config file path')
- cfg_file = partial_parser.parse_known_args()[0].config
- cfg = Config.fromfile(cfg_file)
- parser = ArgumentParser(description=description)
- parser.add_argument('config', help='config file path')
- add_args(parser, cfg)
- return parser, cfg
-
- def __init__(self, cfg_dict=None, cfg_text=None, filename=None):
- if cfg_dict is None:
- cfg_dict = dict()
- elif not isinstance(cfg_dict, dict):
- raise TypeError('cfg_dict must be a dict, but '
- f'got {type(cfg_dict)}')
- for key in cfg_dict:
- if key in RESERVED_KEYS:
- raise KeyError(f'{key} is reserved for config file')
-
- super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict))
- super(Config, self).__setattr__('_filename', filename)
- if cfg_text:
- text = cfg_text
- elif filename:
- with open(filename, 'r') as f:
- text = f.read()
- else:
- text = ''
- super(Config, self).__setattr__('_text', text)
-
- @property
- def filename(self):
- return self._filename
-
- @property
- def text(self):
- return self._text
-
- @property
- def pretty_text(self):
-
- indent = 4
-
- def _indent(s_, num_spaces):
- s = s_.split('\n')
- if len(s) == 1:
- return s_
- first = s.pop(0)
- s = [(num_spaces * ' ') + line for line in s]
- s = '\n'.join(s)
- s = first + '\n' + s
- return s
-
- def _format_basic_types(k, v, use_mapping=False):
- if isinstance(v, str):
- v_str = f"'{v}'"
- else:
- v_str = str(v)
-
- if use_mapping:
- k_str = f"'{k}'" if isinstance(k, str) else str(k)
- attr_str = f'{k_str}: {v_str}'
- else:
- attr_str = f'{str(k)}={v_str}'
- attr_str = _indent(attr_str, indent)
-
- return attr_str
-
- def _format_list(k, v, use_mapping=False):
- # check if all items in the list are dict
- if all(isinstance(_, dict) for _ in v):
- v_str = '[\n'
- v_str += '\n'.join(
- f'dict({_indent(_format_dict(v_), indent)}),'
- for v_ in v).rstrip(',')
- if use_mapping:
- k_str = f"'{k}'" if isinstance(k, str) else str(k)
- attr_str = f'{k_str}: {v_str}'
- else:
- attr_str = f'{str(k)}={v_str}'
- attr_str = _indent(attr_str, indent) + ']'
- else:
- attr_str = _format_basic_types(k, v, use_mapping)
- return attr_str
-
- def _contain_invalid_identifier(dict_str):
- contain_invalid_identifier = False
- for key_name in dict_str:
- contain_invalid_identifier |= \
- (not str(key_name).isidentifier())
- return contain_invalid_identifier
-
- def _format_dict(input_dict, outest_level=False):
- r = ''
- s = []
-
- use_mapping = _contain_invalid_identifier(input_dict)
- if use_mapping:
- r += '{'
- for idx, (k, v) in enumerate(input_dict.items()):
- is_last = idx >= len(input_dict) - 1
- end = '' if outest_level or is_last else ','
- if isinstance(v, dict):
- v_str = '\n' + _format_dict(v)
- if use_mapping:
- k_str = f"'{k}'" if isinstance(k, str) else str(k)
- attr_str = f'{k_str}: dict({v_str}'
- else:
- attr_str = f'{str(k)}=dict({v_str}'
- attr_str = _indent(attr_str, indent) + ')' + end
- elif isinstance(v, list):
- attr_str = _format_list(k, v, use_mapping) + end
- else:
- attr_str = _format_basic_types(k, v, use_mapping) + end
-
- s.append(attr_str)
- r += '\n'.join(s)
- if use_mapping:
- r += '}'
- return r
-
- cfg_dict = self._cfg_dict.to_dict()
- text = _format_dict(cfg_dict, outest_level=True)
- # copied from setup.cfg
- yapf_style = dict(
- based_on_style='pep8',
- blank_line_before_nested_class_or_def=True,
- split_before_expression_after_opening_paren=True)
- text, _ = FormatCode(text, style_config=yapf_style, verify=True)
-
- return text
-
- def __repr__(self):
- return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}'
-
- def __len__(self):
- return len(self._cfg_dict)
-
- def __getattr__(self, name):
- return getattr(self._cfg_dict, name)
-
- def __getitem__(self, name):
- return self._cfg_dict.__getitem__(name)
-
- def __setattr__(self, name, value):
- if isinstance(value, dict):
- value = ConfigDict(value)
- self._cfg_dict.__setattr__(name, value)
-
- def __setitem__(self, name, value):
- if isinstance(value, dict):
- value = ConfigDict(value)
- self._cfg_dict.__setitem__(name, value)
-
- def __iter__(self):
- return iter(self._cfg_dict)
-
- def __getstate__(self):
- return (self._cfg_dict, self._filename, self._text)
-
- def __setstate__(self, state):
- _cfg_dict, _filename, _text = state
- super(Config, self).__setattr__('_cfg_dict', _cfg_dict)
- super(Config, self).__setattr__('_filename', _filename)
- super(Config, self).__setattr__('_text', _text)
-
- def dump(self, file=None):
- cfg_dict = super(Config, self).__getattribute__('_cfg_dict').to_dict()
- if self.filename.endswith('.py'):
- if file is None:
- return self.pretty_text
- else:
- with open(file, 'w', encoding='utf-8') as f:
- f.write(self.pretty_text)
- else:
- import annotator.uniformer.mmcv as mmcv
- if file is None:
- file_format = self.filename.split('.')[-1]
- return mmcv.dump(cfg_dict, file_format=file_format)
- else:
- mmcv.dump(cfg_dict, file)
-
- def merge_from_dict(self, options, allow_list_keys=True):
- """Merge list into cfg_dict.
-
- Merge the dict parsed by MultipleKVAction into this cfg.
-
- Examples:
- >>> options = {'model.backbone.depth': 50,
- ... 'model.backbone.with_cp':True}
- >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))
- >>> cfg.merge_from_dict(options)
- >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
- >>> assert cfg_dict == dict(
- ... model=dict(backbone=dict(depth=50, with_cp=True)))
-
- # Merge list element
- >>> cfg = Config(dict(pipeline=[
- ... dict(type='LoadImage'), dict(type='LoadAnnotations')]))
- >>> options = dict(pipeline={'0': dict(type='SelfLoadImage')})
- >>> cfg.merge_from_dict(options, allow_list_keys=True)
- >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
- >>> assert cfg_dict == dict(pipeline=[
- ... dict(type='SelfLoadImage'), dict(type='LoadAnnotations')])
-
- Args:
- options (dict): dict of configs to merge from.
- allow_list_keys (bool): If True, int string keys (e.g. '0', '1')
- are allowed in ``options`` and will replace the element of the
- corresponding index in the config if the config is a list.
- Default: True.
- """
- option_cfg_dict = {}
- for full_key, v in options.items():
- d = option_cfg_dict
- key_list = full_key.split('.')
- for subkey in key_list[:-1]:
- d.setdefault(subkey, ConfigDict())
- d = d[subkey]
- subkey = key_list[-1]
- d[subkey] = v
-
- cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
- super(Config, self).__setattr__(
- '_cfg_dict',
- Config._merge_a_into_b(
- option_cfg_dict, cfg_dict, allow_list_keys=allow_list_keys))
-
-
-class DictAction(Action):
- """
- argparse action to split an argument into KEY=VALUE form
- on the first = and append to a dictionary. List options can
- be passed as comma separated values, i.e 'KEY=V1,V2,V3', or with explicit
- brackets, i.e. 'KEY=[V1,V2,V3]'. It also support nested brackets to build
- list/tuple values. e.g. 'KEY=[(V1,V2),(V3,V4)]'
- """
-
- @staticmethod
- def _parse_int_float_bool(val):
- try:
- return int(val)
- except ValueError:
- pass
- try:
- return float(val)
- except ValueError:
- pass
- if val.lower() in ['true', 'false']:
- return True if val.lower() == 'true' else False
- return val
-
- @staticmethod
- def _parse_iterable(val):
- """Parse iterable values in the string.
-
- All elements inside '()' or '[]' are treated as iterable values.
-
- Args:
- val (str): Value string.
-
- Returns:
- list | tuple: The expanded list or tuple from the string.
-
- Examples:
- >>> DictAction._parse_iterable('1,2,3')
- [1, 2, 3]
- >>> DictAction._parse_iterable('[a, b, c]')
- ['a', 'b', 'c']
- >>> DictAction._parse_iterable('[(1, 2, 3), [a, b], c]')
- [(1, 2, 3), ['a', 'b'], 'c']
- """
-
- def find_next_comma(string):
- """Find the position of next comma in the string.
-
- If no ',' is found in the string, return the string length. All
- chars inside '()' and '[]' are treated as one element and thus ','
- inside these brackets are ignored.
- """
- assert (string.count('(') == string.count(')')) and (
- string.count('[') == string.count(']')), \
- f'Imbalanced brackets exist in {string}'
- end = len(string)
- for idx, char in enumerate(string):
- pre = string[:idx]
- # The string before this ',' is balanced
- if ((char == ',') and (pre.count('(') == pre.count(')'))
- and (pre.count('[') == pre.count(']'))):
- end = idx
- break
- return end
-
- # Strip ' and " characters and replace whitespace.
- val = val.strip('\'\"').replace(' ', '')
- is_tuple = False
- if val.startswith('(') and val.endswith(')'):
- is_tuple = True
- val = val[1:-1]
- elif val.startswith('[') and val.endswith(']'):
- val = val[1:-1]
- elif ',' not in val:
- # val is a single value
- return DictAction._parse_int_float_bool(val)
-
- values = []
- while len(val) > 0:
- comma_idx = find_next_comma(val)
- element = DictAction._parse_iterable(val[:comma_idx])
- values.append(element)
- val = val[comma_idx + 1:]
- if is_tuple:
- values = tuple(values)
- return values
-
- def __call__(self, parser, namespace, values, option_string=None):
- options = {}
- for kv in values:
- key, val = kv.split('=', maxsplit=1)
- options[key] = self._parse_iterable(val)
- setattr(namespace, self.dest, options)
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/iou_calculators/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/iou_calculators/__init__.py
deleted file mode 100644
index e71369a58a05fa25e6a754300875fdbb87cb26a5..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/iou_calculators/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .builder import build_iou_calculator
-from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps
-
-__all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps']
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/sabl_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/sabl_head.py
deleted file mode 100644
index 5153996aeb706d103d1ad14b61734914eddb7693..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/sabl_head.py
+++ /dev/null
@@ -1,572 +0,0 @@
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv.cnn import ConvModule, kaiming_init, normal_init, xavier_init
-from mmcv.runner import force_fp32
-
-from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
-from mmdet.models.builder import HEADS, build_loss
-from mmdet.models.losses import accuracy
-
-
-@HEADS.register_module()
-class SABLHead(nn.Module):
- """Side-Aware Boundary Localization (SABL) for RoI-Head.
-
- Side-Aware features are extracted by conv layers
- with an attention mechanism.
- Boundary Localization with Bucketing and Bucketing Guided Rescoring
- are implemented in BucketingBBoxCoder.
-
- Please refer to https://arxiv.org/abs/1912.04260 for more details.
-
- Args:
- cls_in_channels (int): Input channels of cls RoI feature. \
- Defaults to 256.
- reg_in_channels (int): Input channels of reg RoI feature. \
- Defaults to 256.
- roi_feat_size (int): Size of RoI features. Defaults to 7.
- reg_feat_up_ratio (int): Upsample ratio of reg features. \
- Defaults to 2.
- reg_pre_kernel (int): Kernel of 2D conv layers before \
- attention pooling. Defaults to 3.
- reg_post_kernel (int): Kernel of 1D conv layers after \
- attention pooling. Defaults to 3.
- reg_pre_num (int): Number of pre convs. Defaults to 2.
- reg_post_num (int): Number of post convs. Defaults to 1.
- num_classes (int): Number of classes in dataset. Defaults to 80.
- cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024.
- reg_offset_out_channels (int): Hidden and output channel \
- of reg offset branch. Defaults to 256.
- reg_cls_out_channels (int): Hidden and output channel \
- of reg cls branch. Defaults to 256.
- num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1.
- num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0.
- reg_class_agnostic (bool): Class agnostic regresion or not. \
- Defaults to True.
- norm_cfg (dict): Config of norm layers. Defaults to None.
- bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'.
- loss_cls (dict): Config of classification loss.
- loss_bbox_cls (dict): Config of classification loss for bbox branch.
- loss_bbox_reg (dict): Config of regression loss for bbox branch.
- """
-
- def __init__(self,
- num_classes,
- cls_in_channels=256,
- reg_in_channels=256,
- roi_feat_size=7,
- reg_feat_up_ratio=2,
- reg_pre_kernel=3,
- reg_post_kernel=3,
- reg_pre_num=2,
- reg_post_num=1,
- cls_out_channels=1024,
- reg_offset_out_channels=256,
- reg_cls_out_channels=256,
- num_cls_fcs=1,
- num_reg_fcs=0,
- reg_class_agnostic=True,
- norm_cfg=None,
- bbox_coder=dict(
- type='BucketingBBoxCoder',
- num_buckets=14,
- scale_factor=1.7),
- loss_cls=dict(
- type='CrossEntropyLoss',
- use_sigmoid=False,
- loss_weight=1.0),
- loss_bbox_cls=dict(
- type='CrossEntropyLoss',
- use_sigmoid=True,
- loss_weight=1.0),
- loss_bbox_reg=dict(
- type='SmoothL1Loss', beta=0.1, loss_weight=1.0)):
- super(SABLHead, self).__init__()
- self.cls_in_channels = cls_in_channels
- self.reg_in_channels = reg_in_channels
- self.roi_feat_size = roi_feat_size
- self.reg_feat_up_ratio = int(reg_feat_up_ratio)
- self.num_buckets = bbox_coder['num_buckets']
- assert self.reg_feat_up_ratio // 2 >= 1
- self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio
- assert self.up_reg_feat_size == bbox_coder['num_buckets']
- self.reg_pre_kernel = reg_pre_kernel
- self.reg_post_kernel = reg_post_kernel
- self.reg_pre_num = reg_pre_num
- self.reg_post_num = reg_post_num
- self.num_classes = num_classes
- self.cls_out_channels = cls_out_channels
- self.reg_offset_out_channels = reg_offset_out_channels
- self.reg_cls_out_channels = reg_cls_out_channels
- self.num_cls_fcs = num_cls_fcs
- self.num_reg_fcs = num_reg_fcs
- self.reg_class_agnostic = reg_class_agnostic
- assert self.reg_class_agnostic
- self.norm_cfg = norm_cfg
-
- self.bbox_coder = build_bbox_coder(bbox_coder)
- self.loss_cls = build_loss(loss_cls)
- self.loss_bbox_cls = build_loss(loss_bbox_cls)
- self.loss_bbox_reg = build_loss(loss_bbox_reg)
-
- self.cls_fcs = self._add_fc_branch(self.num_cls_fcs,
- self.cls_in_channels,
- self.roi_feat_size,
- self.cls_out_channels)
-
- self.side_num = int(np.ceil(self.num_buckets / 2))
-
- if self.reg_feat_up_ratio > 1:
- self.upsample_x = nn.ConvTranspose1d(
- reg_in_channels,
- reg_in_channels,
- self.reg_feat_up_ratio,
- stride=self.reg_feat_up_ratio)
- self.upsample_y = nn.ConvTranspose1d(
- reg_in_channels,
- reg_in_channels,
- self.reg_feat_up_ratio,
- stride=self.reg_feat_up_ratio)
-
- self.reg_pre_convs = nn.ModuleList()
- for i in range(self.reg_pre_num):
- reg_pre_conv = ConvModule(
- reg_in_channels,
- reg_in_channels,
- kernel_size=reg_pre_kernel,
- padding=reg_pre_kernel // 2,
- norm_cfg=norm_cfg,
- act_cfg=dict(type='ReLU'))
- self.reg_pre_convs.append(reg_pre_conv)
-
- self.reg_post_conv_xs = nn.ModuleList()
- for i in range(self.reg_post_num):
- reg_post_conv_x = ConvModule(
- reg_in_channels,
- reg_in_channels,
- kernel_size=(1, reg_post_kernel),
- padding=(0, reg_post_kernel // 2),
- norm_cfg=norm_cfg,
- act_cfg=dict(type='ReLU'))
- self.reg_post_conv_xs.append(reg_post_conv_x)
- self.reg_post_conv_ys = nn.ModuleList()
- for i in range(self.reg_post_num):
- reg_post_conv_y = ConvModule(
- reg_in_channels,
- reg_in_channels,
- kernel_size=(reg_post_kernel, 1),
- padding=(reg_post_kernel // 2, 0),
- norm_cfg=norm_cfg,
- act_cfg=dict(type='ReLU'))
- self.reg_post_conv_ys.append(reg_post_conv_y)
-
- self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1)
- self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1)
-
- self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1)
- self.relu = nn.ReLU(inplace=True)
-
- self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs,
- self.reg_in_channels, 1,
- self.reg_cls_out_channels)
- self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs,
- self.reg_in_channels, 1,
- self.reg_offset_out_channels)
- self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1)
- self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1)
-
- def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size,
- fc_out_channels):
- in_channels = in_channels * roi_feat_size * roi_feat_size
- branch_fcs = nn.ModuleList()
- for i in range(num_branch_fcs):
- fc_in_channels = (in_channels if i == 0 else fc_out_channels)
- branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels))
- return branch_fcs
-
- def init_weights(self):
- for module_list in [
- self.reg_cls_fcs, self.reg_offset_fcs, self.cls_fcs
- ]:
- for m in module_list.modules():
- if isinstance(m, nn.Linear):
- xavier_init(m, distribution='uniform')
- if self.reg_feat_up_ratio > 1:
- kaiming_init(self.upsample_x, distribution='normal')
- kaiming_init(self.upsample_y, distribution='normal')
-
- normal_init(self.reg_conv_att_x, 0, 0.01)
- normal_init(self.reg_conv_att_y, 0, 0.01)
- normal_init(self.fc_reg_offset, 0, 0.001)
- normal_init(self.fc_reg_cls, 0, 0.01)
- normal_init(self.fc_cls, 0, 0.01)
-
- def cls_forward(self, cls_x):
- cls_x = cls_x.view(cls_x.size(0), -1)
- for fc in self.cls_fcs:
- cls_x = self.relu(fc(cls_x))
- cls_score = self.fc_cls(cls_x)
- return cls_score
-
- def attention_pool(self, reg_x):
- """Extract direction-specific features fx and fy with attention
- methanism."""
- reg_fx = reg_x
- reg_fy = reg_x
- reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid()
- reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid()
- reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2)
- reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3)
- reg_fx = (reg_fx * reg_fx_att).sum(dim=2)
- reg_fy = (reg_fy * reg_fy_att).sum(dim=3)
- return reg_fx, reg_fy
-
- def side_aware_feature_extractor(self, reg_x):
- """Refine and extract side-aware features without split them."""
- for reg_pre_conv in self.reg_pre_convs:
- reg_x = reg_pre_conv(reg_x)
- reg_fx, reg_fy = self.attention_pool(reg_x)
-
- if self.reg_post_num > 0:
- reg_fx = reg_fx.unsqueeze(2)
- reg_fy = reg_fy.unsqueeze(3)
- for i in range(self.reg_post_num):
- reg_fx = self.reg_post_conv_xs[i](reg_fx)
- reg_fy = self.reg_post_conv_ys[i](reg_fy)
- reg_fx = reg_fx.squeeze(2)
- reg_fy = reg_fy.squeeze(3)
- if self.reg_feat_up_ratio > 1:
- reg_fx = self.relu(self.upsample_x(reg_fx))
- reg_fy = self.relu(self.upsample_y(reg_fy))
- reg_fx = torch.transpose(reg_fx, 1, 2)
- reg_fy = torch.transpose(reg_fy, 1, 2)
- return reg_fx.contiguous(), reg_fy.contiguous()
-
- def reg_pred(self, x, offset_fcs, cls_fcs):
- """Predict bucketing estimation (cls_pred) and fine regression (offset
- pred) with side-aware features."""
- x_offset = x.view(-1, self.reg_in_channels)
- x_cls = x.view(-1, self.reg_in_channels)
-
- for fc in offset_fcs:
- x_offset = self.relu(fc(x_offset))
- for fc in cls_fcs:
- x_cls = self.relu(fc(x_cls))
- offset_pred = self.fc_reg_offset(x_offset)
- cls_pred = self.fc_reg_cls(x_cls)
-
- offset_pred = offset_pred.view(x.size(0), -1)
- cls_pred = cls_pred.view(x.size(0), -1)
-
- return offset_pred, cls_pred
-
- def side_aware_split(self, feat):
- """Split side-aware features aligned with orders of bucketing
- targets."""
- l_end = int(np.ceil(self.up_reg_feat_size / 2))
- r_start = int(np.floor(self.up_reg_feat_size / 2))
- feat_fl = feat[:, :l_end]
- feat_fr = feat[:, r_start:].flip(dims=(1, ))
- feat_fl = feat_fl.contiguous()
- feat_fr = feat_fr.contiguous()
- feat = torch.cat([feat_fl, feat_fr], dim=-1)
- return feat
-
- def bbox_pred_split(self, bbox_pred, num_proposals_per_img):
- """Split batch bbox prediction back to each image."""
- bucket_cls_preds, bucket_offset_preds = bbox_pred
- bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0)
- bucket_offset_preds = bucket_offset_preds.split(
- num_proposals_per_img, 0)
- bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds))
- return bbox_pred
-
- def reg_forward(self, reg_x):
- outs = self.side_aware_feature_extractor(reg_x)
- edge_offset_preds = []
- edge_cls_preds = []
- reg_fx = outs[0]
- reg_fy = outs[1]
- offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs,
- self.reg_cls_fcs)
- offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs,
- self.reg_cls_fcs)
- offset_pred_x = self.side_aware_split(offset_pred_x)
- offset_pred_y = self.side_aware_split(offset_pred_y)
- cls_pred_x = self.side_aware_split(cls_pred_x)
- cls_pred_y = self.side_aware_split(cls_pred_y)
- edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1)
- edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1)
-
- return (edge_cls_preds, edge_offset_preds)
-
- def forward(self, x):
-
- bbox_pred = self.reg_forward(x)
- cls_score = self.cls_forward(x)
-
- return cls_score, bbox_pred
-
- def get_targets(self, sampling_results, gt_bboxes, gt_labels,
- rcnn_train_cfg):
- pos_proposals = [res.pos_bboxes for res in sampling_results]
- neg_proposals = [res.neg_bboxes for res in sampling_results]
- pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]
- pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
- cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals,
- pos_gt_bboxes, pos_gt_labels,
- rcnn_train_cfg)
- (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
- bucket_offset_targets, bucket_offset_weights) = cls_reg_targets
- return (labels, label_weights, (bucket_cls_targets,
- bucket_offset_targets),
- (bucket_cls_weights, bucket_offset_weights))
-
- def bucket_target(self,
- pos_proposals_list,
- neg_proposals_list,
- pos_gt_bboxes_list,
- pos_gt_labels_list,
- rcnn_train_cfg,
- concat=True):
- (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
- bucket_offset_targets, bucket_offset_weights) = multi_apply(
- self._bucket_target_single,
- pos_proposals_list,
- neg_proposals_list,
- pos_gt_bboxes_list,
- pos_gt_labels_list,
- cfg=rcnn_train_cfg)
-
- if concat:
- labels = torch.cat(labels, 0)
- label_weights = torch.cat(label_weights, 0)
- bucket_cls_targets = torch.cat(bucket_cls_targets, 0)
- bucket_cls_weights = torch.cat(bucket_cls_weights, 0)
- bucket_offset_targets = torch.cat(bucket_offset_targets, 0)
- bucket_offset_weights = torch.cat(bucket_offset_weights, 0)
- return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
- bucket_offset_targets, bucket_offset_weights)
-
- def _bucket_target_single(self, pos_proposals, neg_proposals,
- pos_gt_bboxes, pos_gt_labels, cfg):
- """Compute bucketing estimation targets and fine regression targets for
- a single image.
-
- Args:
- pos_proposals (Tensor): positive proposals of a single image,
- Shape (n_pos, 4)
- neg_proposals (Tensor): negative proposals of a single image,
- Shape (n_neg, 4).
- pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals
- of a single image, Shape (n_pos, 4).
- pos_gt_labels (Tensor): gt labels assigned to positive proposals
- of a single image, Shape (n_pos, ).
- cfg (dict): Config of calculating targets
-
- Returns:
- tuple:
-
- - labels (Tensor): Labels in a single image. \
- Shape (n,).
- - label_weights (Tensor): Label weights in a single image.\
- Shape (n,)
- - bucket_cls_targets (Tensor): Bucket cls targets in \
- a single image. Shape (n, num_buckets*2).
- - bucket_cls_weights (Tensor): Bucket cls weights in \
- a single image. Shape (n, num_buckets*2).
- - bucket_offset_targets (Tensor): Bucket offset targets \
- in a single image. Shape (n, num_buckets*2).
- - bucket_offset_targets (Tensor): Bucket offset weights \
- in a single image. Shape (n, num_buckets*2).
- """
- num_pos = pos_proposals.size(0)
- num_neg = neg_proposals.size(0)
- num_samples = num_pos + num_neg
- labels = pos_gt_bboxes.new_full((num_samples, ),
- self.num_classes,
- dtype=torch.long)
- label_weights = pos_proposals.new_zeros(num_samples)
- bucket_cls_targets = pos_proposals.new_zeros(num_samples,
- 4 * self.side_num)
- bucket_cls_weights = pos_proposals.new_zeros(num_samples,
- 4 * self.side_num)
- bucket_offset_targets = pos_proposals.new_zeros(
- num_samples, 4 * self.side_num)
- bucket_offset_weights = pos_proposals.new_zeros(
- num_samples, 4 * self.side_num)
- if num_pos > 0:
- labels[:num_pos] = pos_gt_labels
- label_weights[:num_pos] = 1.0
- (pos_bucket_offset_targets, pos_bucket_offset_weights,
- pos_bucket_cls_targets,
- pos_bucket_cls_weights) = self.bbox_coder.encode(
- pos_proposals, pos_gt_bboxes)
- bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets
- bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights
- bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets
- bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights
- if num_neg > 0:
- label_weights[-num_neg:] = 1.0
- return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
- bucket_offset_targets, bucket_offset_weights)
-
- def loss(self,
- cls_score,
- bbox_pred,
- rois,
- labels,
- label_weights,
- bbox_targets,
- bbox_weights,
- reduction_override=None):
- losses = dict()
- if cls_score is not None:
- avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
- losses['loss_cls'] = self.loss_cls(
- cls_score,
- labels,
- label_weights,
- avg_factor=avg_factor,
- reduction_override=reduction_override)
- losses['acc'] = accuracy(cls_score, labels)
-
- if bbox_pred is not None:
- bucket_cls_preds, bucket_offset_preds = bbox_pred
- bucket_cls_targets, bucket_offset_targets = bbox_targets
- bucket_cls_weights, bucket_offset_weights = bbox_weights
- # edge cls
- bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num)
- bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num)
- bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num)
- losses['loss_bbox_cls'] = self.loss_bbox_cls(
- bucket_cls_preds,
- bucket_cls_targets,
- bucket_cls_weights,
- avg_factor=bucket_cls_targets.size(0),
- reduction_override=reduction_override)
-
- losses['loss_bbox_reg'] = self.loss_bbox_reg(
- bucket_offset_preds,
- bucket_offset_targets,
- bucket_offset_weights,
- avg_factor=bucket_offset_targets.size(0),
- reduction_override=reduction_override)
-
- return losses
-
- @force_fp32(apply_to=('cls_score', 'bbox_pred'))
- def get_bboxes(self,
- rois,
- cls_score,
- bbox_pred,
- img_shape,
- scale_factor,
- rescale=False,
- cfg=None):
- if isinstance(cls_score, list):
- cls_score = sum(cls_score) / float(len(cls_score))
- scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
-
- if bbox_pred is not None:
- bboxes, confids = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
- img_shape)
- else:
- bboxes = rois[:, 1:].clone()
- confids = None
- if img_shape is not None:
- bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
- bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
-
- if rescale and bboxes.size(0) > 0:
- if isinstance(scale_factor, float):
- bboxes /= scale_factor
- else:
- bboxes /= torch.from_numpy(scale_factor).to(bboxes.device)
-
- if cfg is None:
- return bboxes, scores
- else:
- det_bboxes, det_labels = multiclass_nms(
- bboxes,
- scores,
- cfg.score_thr,
- cfg.nms,
- cfg.max_per_img,
- score_factors=confids)
-
- return det_bboxes, det_labels
-
- @force_fp32(apply_to=('bbox_preds', ))
- def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
- """Refine bboxes during training.
-
- Args:
- rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
- and bs is the sampled RoIs per image.
- labels (Tensor): Shape (n*bs, ).
- bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \
- (n*bs, num_buckets*2)].
- pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
- is a gt bbox.
- img_metas (list[dict]): Meta info of each image.
-
- Returns:
- list[Tensor]: Refined bboxes of each image in a mini-batch.
- """
- img_ids = rois[:, 0].long().unique(sorted=True)
- assert img_ids.numel() == len(img_metas)
-
- bboxes_list = []
- for i in range(len(img_metas)):
- inds = torch.nonzero(
- rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
- num_rois = inds.numel()
-
- bboxes_ = rois[inds, 1:]
- label_ = labels[inds]
- edge_cls_preds, edge_offset_preds = bbox_preds
- edge_cls_preds_ = edge_cls_preds[inds]
- edge_offset_preds_ = edge_offset_preds[inds]
- bbox_pred_ = [edge_cls_preds_, edge_offset_preds_]
- img_meta_ = img_metas[i]
- pos_is_gts_ = pos_is_gts[i]
-
- bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
- img_meta_)
- # filter gt bboxes
- pos_keep = 1 - pos_is_gts_
- keep_inds = pos_is_gts_.new_ones(num_rois)
- keep_inds[:len(pos_is_gts_)] = pos_keep
-
- bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
-
- return bboxes_list
-
- @force_fp32(apply_to=('bbox_pred', ))
- def regress_by_class(self, rois, label, bbox_pred, img_meta):
- """Regress the bbox for the predicted class. Used in Cascade R-CNN.
-
- Args:
- rois (Tensor): shape (n, 4) or (n, 5)
- label (Tensor): shape (n, )
- bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \
- (n, num_buckets *2)]
- img_meta (dict): Image meta info.
-
- Returns:
- Tensor: Regressed bboxes, the same shape as input rois.
- """
- assert rois.size(1) == 4 or rois.size(1) == 5
-
- if rois.size(1) == 4:
- new_rois, _ = self.bbox_coder.decode(rois, bbox_pred,
- img_meta['img_shape'])
- else:
- bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
- img_meta['img_shape'])
- new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
-
- return new_rois
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/gfl_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/gfl_head.py
deleted file mode 100644
index 961bc92237663ad5343d3d08eb9c0e4e811ada05..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/gfl_head.py
+++ /dev/null
@@ -1,647 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
-from mmcv.runner import force_fp32
-
-from mmdet.core import (anchor_inside_flags, bbox2distance, bbox_overlaps,
- build_assigner, build_sampler, distance2bbox,
- images_to_levels, multi_apply, multiclass_nms,
- reduce_mean, unmap)
-from ..builder import HEADS, build_loss
-from .anchor_head import AnchorHead
-
-
-class Integral(nn.Module):
- """A fixed layer for calculating integral result from distribution.
-
- This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,
- P(y_i) denotes the softmax vector that represents the discrete distribution
- y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}
-
- Args:
- reg_max (int): The maximal value of the discrete set. Default: 16. You
- may want to reset it according to your new dataset or related
- settings.
- """
-
- def __init__(self, reg_max=16):
- super(Integral, self).__init__()
- self.reg_max = reg_max
- self.register_buffer('project',
- torch.linspace(0, self.reg_max, self.reg_max + 1))
-
- def forward(self, x):
- """Forward feature from the regression head to get integral result of
- bounding box location.
-
- Args:
- x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
- n is self.reg_max.
-
- Returns:
- x (Tensor): Integral result of box locations, i.e., distance
- offsets from the box center in four directions, shape (N, 4).
- """
- x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)
- x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)
- return x
-
-
-@HEADS.register_module()
-class GFLHead(AnchorHead):
- """Generalized Focal Loss: Learning Qualified and Distributed Bounding
- Boxes for Dense Object Detection.
-
- GFL head structure is similar with ATSS, however GFL uses
- 1) joint representation for classification and localization quality, and
- 2) flexible General distribution for bounding box locations,
- which are supervised by
- Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively
-
- https://arxiv.org/abs/2006.04388
-
- Args:
- num_classes (int): Number of categories excluding the background
- category.
- in_channels (int): Number of channels in the input feature map.
- stacked_convs (int): Number of conv layers in cls and reg tower.
- Default: 4.
- conv_cfg (dict): dictionary to construct and config conv layer.
- Default: None.
- norm_cfg (dict): dictionary to construct and config norm layer.
- Default: dict(type='GN', num_groups=32, requires_grad=True).
- loss_qfl (dict): Config of Quality Focal Loss (QFL).
- reg_max (int): Max value of integral set :math: `{0, ..., reg_max}`
- in QFL setting. Default: 16.
- Example:
- >>> self = GFLHead(11, 7)
- >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
- >>> cls_quality_score, bbox_pred = self.forward(feats)
- >>> assert len(cls_quality_score) == len(self.scales)
- """
-
- def __init__(self,
- num_classes,
- in_channels,
- stacked_convs=4,
- conv_cfg=None,
- norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
- loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
- reg_max=16,
- **kwargs):
- self.stacked_convs = stacked_convs
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.reg_max = reg_max
- super(GFLHead, self).__init__(num_classes, in_channels, **kwargs)
-
- self.sampling = False
- if self.train_cfg:
- self.assigner = build_assigner(self.train_cfg.assigner)
- # SSD sampling=False so use PseudoSampler
- sampler_cfg = dict(type='PseudoSampler')
- self.sampler = build_sampler(sampler_cfg, context=self)
-
- self.integral = Integral(self.reg_max)
- self.loss_dfl = build_loss(loss_dfl)
-
- def _init_layers(self):
- """Initialize layers of the head."""
- self.relu = nn.ReLU(inplace=True)
- self.cls_convs = nn.ModuleList()
- self.reg_convs = nn.ModuleList()
- for i in range(self.stacked_convs):
- chn = self.in_channels if i == 0 else self.feat_channels
- self.cls_convs.append(
- ConvModule(
- chn,
- self.feat_channels,
- 3,
- stride=1,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg))
- self.reg_convs.append(
- ConvModule(
- chn,
- self.feat_channels,
- 3,
- stride=1,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg))
- assert self.num_anchors == 1, 'anchor free version'
- self.gfl_cls = nn.Conv2d(
- self.feat_channels, self.cls_out_channels, 3, padding=1)
- self.gfl_reg = nn.Conv2d(
- self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1)
- self.scales = nn.ModuleList(
- [Scale(1.0) for _ in self.anchor_generator.strides])
-
- def init_weights(self):
- """Initialize weights of the head."""
- for m in self.cls_convs:
- normal_init(m.conv, std=0.01)
- for m in self.reg_convs:
- normal_init(m.conv, std=0.01)
- bias_cls = bias_init_with_prob(0.01)
- normal_init(self.gfl_cls, std=0.01, bias=bias_cls)
- normal_init(self.gfl_reg, std=0.01)
-
- def forward(self, feats):
- """Forward features from the upstream network.
-
- Args:
- feats (tuple[Tensor]): Features from the upstream network, each is
- a 4D-tensor.
-
- Returns:
- tuple: Usually a tuple of classification scores and bbox prediction
- cls_scores (list[Tensor]): Classification and quality (IoU)
- joint scores for all scale levels, each is a 4D-tensor,
- the channel number is num_classes.
- bbox_preds (list[Tensor]): Box distribution logits for all
- scale levels, each is a 4D-tensor, the channel number is
- 4*(n+1), n is max value of integral set.
- """
- return multi_apply(self.forward_single, feats, self.scales)
-
- def forward_single(self, x, scale):
- """Forward feature of a single scale level.
-
- Args:
- x (Tensor): Features of a single scale level.
- scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
- the bbox prediction.
-
- Returns:
- tuple:
- cls_score (Tensor): Cls and quality joint scores for a single
- scale level the channel number is num_classes.
- bbox_pred (Tensor): Box distribution logits for a single scale
- level, the channel number is 4*(n+1), n is max value of
- integral set.
- """
- cls_feat = x
- reg_feat = x
- for cls_conv in self.cls_convs:
- cls_feat = cls_conv(cls_feat)
- for reg_conv in self.reg_convs:
- reg_feat = reg_conv(reg_feat)
- cls_score = self.gfl_cls(cls_feat)
- bbox_pred = scale(self.gfl_reg(reg_feat)).float()
- return cls_score, bbox_pred
-
- def anchor_center(self, anchors):
- """Get anchor centers from anchors.
-
- Args:
- anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format.
-
- Returns:
- Tensor: Anchor centers with shape (N, 2), "xy" format.
- """
- anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2
- anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2
- return torch.stack([anchors_cx, anchors_cy], dim=-1)
-
- def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights,
- bbox_targets, stride, num_total_samples):
- """Compute loss of a single scale level.
-
- Args:
- anchors (Tensor): Box reference for each scale level with shape
- (N, num_total_anchors, 4).
- cls_score (Tensor): Cls and quality joint scores for each scale
- level has shape (N, num_classes, H, W).
- bbox_pred (Tensor): Box distribution logits for each scale
- level with shape (N, 4*(n+1), H, W), n is max value of integral
- set.
- labels (Tensor): Labels of each anchors with shape
- (N, num_total_anchors).
- label_weights (Tensor): Label weights of each anchor with shape
- (N, num_total_anchors)
- bbox_targets (Tensor): BBox regression targets of each anchor wight
- shape (N, num_total_anchors, 4).
- stride (tuple): Stride in this scale level.
- num_total_samples (int): Number of positive samples that is
- reduced over all GPUs.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
- assert stride[0] == stride[1], 'h stride is not equal to w stride!'
- anchors = anchors.reshape(-1, 4)
- cls_score = cls_score.permute(0, 2, 3,
- 1).reshape(-1, self.cls_out_channels)
- bbox_pred = bbox_pred.permute(0, 2, 3,
- 1).reshape(-1, 4 * (self.reg_max + 1))
- bbox_targets = bbox_targets.reshape(-1, 4)
- labels = labels.reshape(-1)
- label_weights = label_weights.reshape(-1)
-
- # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
- bg_class_ind = self.num_classes
- pos_inds = ((labels >= 0)
- & (labels < bg_class_ind)).nonzero().squeeze(1)
- score = label_weights.new_zeros(labels.shape)
-
- if len(pos_inds) > 0:
- pos_bbox_targets = bbox_targets[pos_inds]
- pos_bbox_pred = bbox_pred[pos_inds]
- pos_anchors = anchors[pos_inds]
- pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]
-
- weight_targets = cls_score.detach().sigmoid()
- weight_targets = weight_targets.max(dim=1)[0][pos_inds]
- pos_bbox_pred_corners = self.integral(pos_bbox_pred)
- pos_decode_bbox_pred = distance2bbox(pos_anchor_centers,
- pos_bbox_pred_corners)
- pos_decode_bbox_targets = pos_bbox_targets / stride[0]
- score[pos_inds] = bbox_overlaps(
- pos_decode_bbox_pred.detach(),
- pos_decode_bbox_targets,
- is_aligned=True)
- pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)
- target_corners = bbox2distance(pos_anchor_centers,
- pos_decode_bbox_targets,
- self.reg_max).reshape(-1)
-
- # regression loss
- loss_bbox = self.loss_bbox(
- pos_decode_bbox_pred,
- pos_decode_bbox_targets,
- weight=weight_targets,
- avg_factor=1.0)
-
- # dfl loss
- loss_dfl = self.loss_dfl(
- pred_corners,
- target_corners,
- weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
- avg_factor=4.0)
- else:
- loss_bbox = bbox_pred.sum() * 0
- loss_dfl = bbox_pred.sum() * 0
- weight_targets = bbox_pred.new_tensor(0)
-
- # cls (qfl) loss
- loss_cls = self.loss_cls(
- cls_score, (labels, score),
- weight=label_weights,
- avg_factor=num_total_samples)
-
- return loss_cls, loss_bbox, loss_dfl, weight_targets.sum()
-
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
- def loss(self,
- cls_scores,
- bbox_preds,
- gt_bboxes,
- gt_labels,
- img_metas,
- gt_bboxes_ignore=None):
- """Compute losses of the head.
-
- Args:
- cls_scores (list[Tensor]): Cls and quality scores for each scale
- level has shape (N, num_classes, H, W).
- bbox_preds (list[Tensor]): Box distribution logits for each scale
- level with shape (N, 4*(n+1), H, W), n is max value of integral
- set.
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
- gt_labels (list[Tensor]): class indices corresponding to each box
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- gt_bboxes_ignore (list[Tensor] | None): specify which bounding
- boxes can be ignored when computing the loss.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
-
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
- assert len(featmap_sizes) == self.anchor_generator.num_levels
-
- device = cls_scores[0].device
- anchor_list, valid_flag_list = self.get_anchors(
- featmap_sizes, img_metas, device=device)
- label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
-
- cls_reg_targets = self.get_targets(
- anchor_list,
- valid_flag_list,
- gt_bboxes,
- img_metas,
- gt_bboxes_ignore_list=gt_bboxes_ignore,
- gt_labels_list=gt_labels,
- label_channels=label_channels)
- if cls_reg_targets is None:
- return None
-
- (anchor_list, labels_list, label_weights_list, bbox_targets_list,
- bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
-
- num_total_samples = reduce_mean(
- torch.tensor(num_total_pos, dtype=torch.float,
- device=device)).item()
- num_total_samples = max(num_total_samples, 1.0)
-
- losses_cls, losses_bbox, losses_dfl,\
- avg_factor = multi_apply(
- self.loss_single,
- anchor_list,
- cls_scores,
- bbox_preds,
- labels_list,
- label_weights_list,
- bbox_targets_list,
- self.anchor_generator.strides,
- num_total_samples=num_total_samples)
-
- avg_factor = sum(avg_factor)
- avg_factor = reduce_mean(avg_factor).item()
- losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))
- losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl))
- return dict(
- loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl)
-
- def _get_bboxes(self,
- cls_scores,
- bbox_preds,
- mlvl_anchors,
- img_shapes,
- scale_factors,
- cfg,
- rescale=False,
- with_nms=True):
- """Transform outputs for a single batch item into labeled boxes.
-
- Args:
- cls_scores (list[Tensor]): Box scores for a single scale level
- has shape (N, num_classes, H, W).
- bbox_preds (list[Tensor]): Box distribution logits for a single
- scale level with shape (N, 4*(n+1), H, W), n is max value of
- integral set.
- mlvl_anchors (list[Tensor]): Box reference for a single scale level
- with shape (num_total_anchors, 4).
- img_shapes (list[tuple[int]]): Shape of the input image,
- list[(height, width, 3)].
- scale_factors (list[ndarray]): Scale factor of the image arange as
- (w_scale, h_scale, w_scale, h_scale).
- cfg (mmcv.Config | None): Test / postprocessing configuration,
- if None, test_cfg would be used.
- rescale (bool): If True, return boxes in original image space.
- Default: False.
- with_nms (bool): If True, do nms before return boxes.
- Default: True.
-
- Returns:
- list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
- The first item is an (n, 5) tensor, where 5 represent
- (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
- The shape of the second tensor in the tuple is (n,), and
- each element represents the class label of the corresponding
- box.
- """
- cfg = self.test_cfg if cfg is None else cfg
- assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
- batch_size = cls_scores[0].shape[0]
-
- mlvl_bboxes = []
- mlvl_scores = []
- for cls_score, bbox_pred, stride, anchors in zip(
- cls_scores, bbox_preds, self.anchor_generator.strides,
- mlvl_anchors):
- assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
- assert stride[0] == stride[1]
- scores = cls_score.permute(0, 2, 3, 1).reshape(
- batch_size, -1, self.cls_out_channels).sigmoid()
- bbox_pred = bbox_pred.permute(0, 2, 3, 1)
-
- bbox_pred = self.integral(bbox_pred) * stride[0]
- bbox_pred = bbox_pred.reshape(batch_size, -1, 4)
-
- nms_pre = cfg.get('nms_pre', -1)
- if nms_pre > 0 and scores.shape[1] > nms_pre:
- max_scores, _ = scores.max(-1)
- _, topk_inds = max_scores.topk(nms_pre)
- batch_inds = torch.arange(batch_size).view(
- -1, 1).expand_as(topk_inds).long()
- anchors = anchors[topk_inds, :]
- bbox_pred = bbox_pred[batch_inds, topk_inds, :]
- scores = scores[batch_inds, topk_inds, :]
- else:
- anchors = anchors.expand_as(bbox_pred)
-
- bboxes = distance2bbox(
- self.anchor_center(anchors), bbox_pred, max_shape=img_shapes)
- mlvl_bboxes.append(bboxes)
- mlvl_scores.append(scores)
-
- batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
- if rescale:
- batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
- scale_factors).unsqueeze(1)
-
- batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
- # Add a dummy background class to the backend when using sigmoid
- # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
- # BG cat_id: num_class
- padding = batch_mlvl_scores.new_zeros(batch_size,
- batch_mlvl_scores.shape[1], 1)
- batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
-
- if with_nms:
- det_results = []
- for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes,
- batch_mlvl_scores):
- det_bbox, det_label = multiclass_nms(mlvl_bboxes, mlvl_scores,
- cfg.score_thr, cfg.nms,
- cfg.max_per_img)
- det_results.append(tuple([det_bbox, det_label]))
- else:
- det_results = [
- tuple(mlvl_bs)
- for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores)
- ]
- return det_results
-
- def get_targets(self,
- anchor_list,
- valid_flag_list,
- gt_bboxes_list,
- img_metas,
- gt_bboxes_ignore_list=None,
- gt_labels_list=None,
- label_channels=1,
- unmap_outputs=True):
- """Get targets for GFL head.
-
- This method is almost the same as `AnchorHead.get_targets()`. Besides
- returning the targets as the parent method does, it also returns the
- anchors as the first element of the returned tuple.
- """
- num_imgs = len(img_metas)
- assert len(anchor_list) == len(valid_flag_list) == num_imgs
-
- # anchor number of multi levels
- num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
- num_level_anchors_list = [num_level_anchors] * num_imgs
-
- # concat all level anchors and flags to a single tensor
- for i in range(num_imgs):
- assert len(anchor_list[i]) == len(valid_flag_list[i])
- anchor_list[i] = torch.cat(anchor_list[i])
- valid_flag_list[i] = torch.cat(valid_flag_list[i])
-
- # compute targets for each image
- if gt_bboxes_ignore_list is None:
- gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
- if gt_labels_list is None:
- gt_labels_list = [None for _ in range(num_imgs)]
- (all_anchors, all_labels, all_label_weights, all_bbox_targets,
- all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(
- self._get_target_single,
- anchor_list,
- valid_flag_list,
- num_level_anchors_list,
- gt_bboxes_list,
- gt_bboxes_ignore_list,
- gt_labels_list,
- img_metas,
- label_channels=label_channels,
- unmap_outputs=unmap_outputs)
- # no valid anchors
- if any([labels is None for labels in all_labels]):
- return None
- # sampled anchors of all images
- num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
- num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
- # split targets to a list w.r.t. multiple levels
- anchors_list = images_to_levels(all_anchors, num_level_anchors)
- labels_list = images_to_levels(all_labels, num_level_anchors)
- label_weights_list = images_to_levels(all_label_weights,
- num_level_anchors)
- bbox_targets_list = images_to_levels(all_bbox_targets,
- num_level_anchors)
- bbox_weights_list = images_to_levels(all_bbox_weights,
- num_level_anchors)
- return (anchors_list, labels_list, label_weights_list,
- bbox_targets_list, bbox_weights_list, num_total_pos,
- num_total_neg)
-
- def _get_target_single(self,
- flat_anchors,
- valid_flags,
- num_level_anchors,
- gt_bboxes,
- gt_bboxes_ignore,
- gt_labels,
- img_meta,
- label_channels=1,
- unmap_outputs=True):
- """Compute regression, classification targets for anchors in a single
- image.
-
- Args:
- flat_anchors (Tensor): Multi-level anchors of the image, which are
- concatenated into a single tensor of shape (num_anchors, 4)
- valid_flags (Tensor): Multi level valid flags of the image,
- which are concatenated into a single tensor of
- shape (num_anchors,).
- num_level_anchors Tensor): Number of anchors of each scale level.
- gt_bboxes (Tensor): Ground truth bboxes of the image,
- shape (num_gts, 4).
- gt_bboxes_ignore (Tensor): Ground truth bboxes to be
- ignored, shape (num_ignored_gts, 4).
- gt_labels (Tensor): Ground truth labels of each box,
- shape (num_gts,).
- img_meta (dict): Meta info of the image.
- label_channels (int): Channel of label.
- unmap_outputs (bool): Whether to map outputs back to the original
- set of anchors.
-
- Returns:
- tuple: N is the number of total anchors in the image.
- anchors (Tensor): All anchors in the image with shape (N, 4).
- labels (Tensor): Labels of all anchors in the image with shape
- (N,).
- label_weights (Tensor): Label weights of all anchor in the
- image with shape (N,).
- bbox_targets (Tensor): BBox targets of all anchors in the
- image with shape (N, 4).
- bbox_weights (Tensor): BBox weights of all anchors in the
- image with shape (N, 4).
- pos_inds (Tensor): Indices of positive anchor with shape
- (num_pos,).
- neg_inds (Tensor): Indices of negative anchor with shape
- (num_neg,).
- """
- inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
- img_meta['img_shape'][:2],
- self.train_cfg.allowed_border)
- if not inside_flags.any():
- return (None, ) * 7
- # assign gt and sample anchors
- anchors = flat_anchors[inside_flags, :]
-
- num_level_anchors_inside = self.get_num_level_anchors_inside(
- num_level_anchors, inside_flags)
- assign_result = self.assigner.assign(anchors, num_level_anchors_inside,
- gt_bboxes, gt_bboxes_ignore,
- gt_labels)
-
- sampling_result = self.sampler.sample(assign_result, anchors,
- gt_bboxes)
-
- num_valid_anchors = anchors.shape[0]
- bbox_targets = torch.zeros_like(anchors)
- bbox_weights = torch.zeros_like(anchors)
- labels = anchors.new_full((num_valid_anchors, ),
- self.num_classes,
- dtype=torch.long)
- label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
-
- pos_inds = sampling_result.pos_inds
- neg_inds = sampling_result.neg_inds
- if len(pos_inds) > 0:
- pos_bbox_targets = sampling_result.pos_gt_bboxes
- bbox_targets[pos_inds, :] = pos_bbox_targets
- bbox_weights[pos_inds, :] = 1.0
- if gt_labels is None:
- # Only rpn gives gt_labels as None
- # Foreground is the first class
- labels[pos_inds] = 0
- else:
- labels[pos_inds] = gt_labels[
- sampling_result.pos_assigned_gt_inds]
- if self.train_cfg.pos_weight <= 0:
- label_weights[pos_inds] = 1.0
- else:
- label_weights[pos_inds] = self.train_cfg.pos_weight
- if len(neg_inds) > 0:
- label_weights[neg_inds] = 1.0
-
- # map up to original set of anchors
- if unmap_outputs:
- num_total_anchors = flat_anchors.size(0)
- anchors = unmap(anchors, num_total_anchors, inside_flags)
- labels = unmap(
- labels, num_total_anchors, inside_flags, fill=self.num_classes)
- label_weights = unmap(label_weights, num_total_anchors,
- inside_flags)
- bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
- bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
-
- return (anchors, labels, label_weights, bbox_targets, bbox_weights,
- pos_inds, neg_inds)
-
- def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
- split_inside_flags = torch.split(inside_flags, num_level_anchors)
- num_level_anchors_inside = [
- int(flags.sum()) for flags in split_inside_flags
- ]
- return num_level_anchors_inside
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/decode_heads/ann_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/decode_heads/ann_head.py
deleted file mode 100644
index 30aaacc2cafc568d3de71d1477b4de0dc0fea9d3..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/decode_heads/ann_head.py
+++ /dev/null
@@ -1,245 +0,0 @@
-import torch
-import torch.nn as nn
-from annotator.uniformer.mmcv.cnn import ConvModule
-
-from ..builder import HEADS
-from ..utils import SelfAttentionBlock as _SelfAttentionBlock
-from .decode_head import BaseDecodeHead
-
-
-class PPMConcat(nn.ModuleList):
- """Pyramid Pooling Module that only concat the features of each layer.
-
- Args:
- pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
- Module.
- """
-
- def __init__(self, pool_scales=(1, 3, 6, 8)):
- super(PPMConcat, self).__init__(
- [nn.AdaptiveAvgPool2d(pool_scale) for pool_scale in pool_scales])
-
- def forward(self, feats):
- """Forward function."""
- ppm_outs = []
- for ppm in self:
- ppm_out = ppm(feats)
- ppm_outs.append(ppm_out.view(*feats.shape[:2], -1))
- concat_outs = torch.cat(ppm_outs, dim=2)
- return concat_outs
-
-
-class SelfAttentionBlock(_SelfAttentionBlock):
- """Make a ANN used SelfAttentionBlock.
-
- Args:
- low_in_channels (int): Input channels of lower level feature,
- which is the key feature for self-attention.
- high_in_channels (int): Input channels of higher level feature,
- which is the query feature for self-attention.
- channels (int): Output channels of key/query transform.
- out_channels (int): Output channels.
- share_key_query (bool): Whether share projection weight between key
- and query projection.
- query_scale (int): The scale of query feature map.
- key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
- Module of key feature.
- conv_cfg (dict|None): Config of conv layers.
- norm_cfg (dict|None): Config of norm layers.
- act_cfg (dict|None): Config of activation layers.
- """
-
- def __init__(self, low_in_channels, high_in_channels, channels,
- out_channels, share_key_query, query_scale, key_pool_scales,
- conv_cfg, norm_cfg, act_cfg):
- key_psp = PPMConcat(key_pool_scales)
- if query_scale > 1:
- query_downsample = nn.MaxPool2d(kernel_size=query_scale)
- else:
- query_downsample = None
- super(SelfAttentionBlock, self).__init__(
- key_in_channels=low_in_channels,
- query_in_channels=high_in_channels,
- channels=channels,
- out_channels=out_channels,
- share_key_query=share_key_query,
- query_downsample=query_downsample,
- key_downsample=key_psp,
- key_query_num_convs=1,
- key_query_norm=True,
- value_out_num_convs=1,
- value_out_norm=False,
- matmul_norm=True,
- with_out=True,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg)
-
-
-class AFNB(nn.Module):
- """Asymmetric Fusion Non-local Block(AFNB)
-
- Args:
- low_in_channels (int): Input channels of lower level feature,
- which is the key feature for self-attention.
- high_in_channels (int): Input channels of higher level feature,
- which is the query feature for self-attention.
- channels (int): Output channels of key/query transform.
- out_channels (int): Output channels.
- and query projection.
- query_scales (tuple[int]): The scales of query feature map.
- Default: (1,)
- key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
- Module of key feature.
- conv_cfg (dict|None): Config of conv layers.
- norm_cfg (dict|None): Config of norm layers.
- act_cfg (dict|None): Config of activation layers.
- """
-
- def __init__(self, low_in_channels, high_in_channels, channels,
- out_channels, query_scales, key_pool_scales, conv_cfg,
- norm_cfg, act_cfg):
- super(AFNB, self).__init__()
- self.stages = nn.ModuleList()
- for query_scale in query_scales:
- self.stages.append(
- SelfAttentionBlock(
- low_in_channels=low_in_channels,
- high_in_channels=high_in_channels,
- channels=channels,
- out_channels=out_channels,
- share_key_query=False,
- query_scale=query_scale,
- key_pool_scales=key_pool_scales,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg))
- self.bottleneck = ConvModule(
- out_channels + high_in_channels,
- out_channels,
- 1,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=None)
-
- def forward(self, low_feats, high_feats):
- """Forward function."""
- priors = [stage(high_feats, low_feats) for stage in self.stages]
- context = torch.stack(priors, dim=0).sum(dim=0)
- output = self.bottleneck(torch.cat([context, high_feats], 1))
- return output
-
-
-class APNB(nn.Module):
- """Asymmetric Pyramid Non-local Block (APNB)
-
- Args:
- in_channels (int): Input channels of key/query feature,
- which is the key feature for self-attention.
- channels (int): Output channels of key/query transform.
- out_channels (int): Output channels.
- query_scales (tuple[int]): The scales of query feature map.
- Default: (1,)
- key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
- Module of key feature.
- conv_cfg (dict|None): Config of conv layers.
- norm_cfg (dict|None): Config of norm layers.
- act_cfg (dict|None): Config of activation layers.
- """
-
- def __init__(self, in_channels, channels, out_channels, query_scales,
- key_pool_scales, conv_cfg, norm_cfg, act_cfg):
- super(APNB, self).__init__()
- self.stages = nn.ModuleList()
- for query_scale in query_scales:
- self.stages.append(
- SelfAttentionBlock(
- low_in_channels=in_channels,
- high_in_channels=in_channels,
- channels=channels,
- out_channels=out_channels,
- share_key_query=True,
- query_scale=query_scale,
- key_pool_scales=key_pool_scales,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg))
- self.bottleneck = ConvModule(
- 2 * in_channels,
- out_channels,
- 1,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg)
-
- def forward(self, feats):
- """Forward function."""
- priors = [stage(feats, feats) for stage in self.stages]
- context = torch.stack(priors, dim=0).sum(dim=0)
- output = self.bottleneck(torch.cat([context, feats], 1))
- return output
-
-
-@HEADS.register_module()
-class ANNHead(BaseDecodeHead):
- """Asymmetric Non-local Neural Networks for Semantic Segmentation.
-
- This head is the implementation of `ANNNet
- `_.
-
- Args:
- project_channels (int): Projection channels for Nonlocal.
- query_scales (tuple[int]): The scales of query feature map.
- Default: (1,)
- key_pool_scales (tuple[int]): The pooling scales of key feature map.
- Default: (1, 3, 6, 8).
- """
-
- def __init__(self,
- project_channels,
- query_scales=(1, ),
- key_pool_scales=(1, 3, 6, 8),
- **kwargs):
- super(ANNHead, self).__init__(
- input_transform='multiple_select', **kwargs)
- assert len(self.in_channels) == 2
- low_in_channels, high_in_channels = self.in_channels
- self.project_channels = project_channels
- self.fusion = AFNB(
- low_in_channels=low_in_channels,
- high_in_channels=high_in_channels,
- out_channels=high_in_channels,
- channels=project_channels,
- query_scales=query_scales,
- key_pool_scales=key_pool_scales,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- self.bottleneck = ConvModule(
- high_in_channels,
- self.channels,
- 3,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- self.context = APNB(
- in_channels=self.channels,
- out_channels=self.channels,
- channels=project_channels,
- query_scales=query_scales,
- key_pool_scales=key_pool_scales,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- def forward(self, inputs):
- """Forward function."""
- low_feats, high_feats = self._transform_inputs(inputs)
- output = self.fusion(low_feats, high_feats)
- output = self.dropout(output)
- output = self.bottleneck(output)
- output = self.context(output)
- output = self.cls_seg(output)
-
- return output
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/models/lraspp_m-v3-d8.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/models/lraspp_m-v3-d8.py
deleted file mode 100644
index 93258242a90695cc94a7c6bd41562d6a75988771..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/models/lraspp_m-v3-d8.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- backbone=dict(
- type='MobileNetV3',
- arch='large',
- out_indices=(1, 3, 16),
- norm_cfg=norm_cfg),
- decode_head=dict(
- type='LRASPPHead',
- in_channels=(16, 24, 960),
- in_index=(0, 1, 2),
- channels=128,
- input_transform='multiple_select',
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- act_cfg=dict(type='ReLU'),
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/akhaliq/animeganv2-onnx/README.md b/spaces/akhaliq/animeganv2-onnx/README.md
deleted file mode 100644
index 91f49bdfeaffb8d5dd540d43c7a4923dc07b0869..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/animeganv2-onnx/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: Animeganv2 Onnx
-emoji: 📉
-colorFrom: pink
-colorTo: green
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/0.html b/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/0.html
deleted file mode 100644
index d57fc73a56f75bfccf59adf1325cb36d67a7a017..0000000000000000000000000000000000000000
--- a/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/0.html
+++ /dev/null
@@ -1,48 +0,0 @@
-
-
-
- brax visualizer
-
-
-
-
-
-
-
-
diff --git a/spaces/algomuffin/jojo_fork/e4e/criteria/id_loss.py b/spaces/algomuffin/jojo_fork/e4e/criteria/id_loss.py
deleted file mode 100644
index bab806172eff18c0630536ae96817508c3197b8b..0000000000000000000000000000000000000000
--- a/spaces/algomuffin/jojo_fork/e4e/criteria/id_loss.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import torch
-from torch import nn
-from configs.paths_config import model_paths
-from models.encoders.model_irse import Backbone
-
-
-class IDLoss(nn.Module):
- def __init__(self):
- super(IDLoss, self).__init__()
- print('Loading ResNet ArcFace')
- self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se')
- self.facenet.load_state_dict(torch.load(model_paths['ir_se50']))
- self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112))
- self.facenet.eval()
- for module in [self.facenet, self.face_pool]:
- for param in module.parameters():
- param.requires_grad = False
-
- def extract_feats(self, x):
- x = x[:, :, 35:223, 32:220] # Crop interesting region
- x = self.face_pool(x)
- x_feats = self.facenet(x)
- return x_feats
-
- def forward(self, y_hat, y, x):
- n_samples = x.shape[0]
- x_feats = self.extract_feats(x)
- y_feats = self.extract_feats(y) # Otherwise use the feature from there
- y_hat_feats = self.extract_feats(y_hat)
- y_feats = y_feats.detach()
- loss = 0
- sim_improvement = 0
- id_logs = []
- count = 0
- for i in range(n_samples):
- diff_target = y_hat_feats[i].dot(y_feats[i])
- diff_input = y_hat_feats[i].dot(x_feats[i])
- diff_views = y_feats[i].dot(x_feats[i])
- id_logs.append({'diff_target': float(diff_target),
- 'diff_input': float(diff_input),
- 'diff_views': float(diff_views)})
- loss += 1 - diff_target
- id_diff = float(diff_target) - float(diff_views)
- sim_improvement += id_diff
- count += 1
-
- return loss / count, sim_improvement / count, id_logs
diff --git a/spaces/ali-ghamdan/deoldify/fastai/general_optimizer.py b/spaces/ali-ghamdan/deoldify/fastai/general_optimizer.py
deleted file mode 100644
index f6a0487d582fe6264627d302d6580364affdf754..0000000000000000000000000000000000000000
--- a/spaces/ali-ghamdan/deoldify/fastai/general_optimizer.py
+++ /dev/null
@@ -1,139 +0,0 @@
-from .torch_core import *
-from torch.optim import Optimizer
-import types
-
-__all__ = ['StatScope', 'Statistic', 'ConstStatistic', 'AvgStatistic', 'AvgSquare', 'GeneralOptimizer']
-
-StatScope = Enum('StatScope', 'Global Group Layer Channel Weight')
-
-@dataclass
-class Statistic():
- name:str
- param:float=0.9 # e.g. for exp moving average
- scope:StatScope=StatScope.Weight
- init:float=0. # starting value
-
- @property
- def buf(self): return f'{self.name}_buffer'
-
- def new_step(self):
- "Set state when computing statistics for Global or Group"
- raise NotImplementedError
-
- def accumulate(self, val):
- "Add `val` to statistic"
- raise NotImplementedError
-
- def update(self, state, param, val=None, step=None):
- "Update state with accumlated, or `val` (if `Weight` or `Layer` scope)"
- raise NotImplementedError
-
-class ConstStatistic(Statistic):
- @property
- def buf(self): return None
- def new_step(self): pass
- def accumulate(self): pass
- def update(self, state, param, val=None, step=None): return param
-
-@dataclass
-class CounterStat(Statistic):
- def __post_init__(self): self.init,self._buf,self.name = 0,self.name,None
- @property
- def buf(self): return self._buf
- def new_step(self): pass
- def accumulate(self, val): pass
- def update(self, state, param, val=None, step=None): return state + 1
-
-@dataclass
-class AvgStatistic(Statistic):
- decay:bool=False
- debias:bool=False
- def new_step(self): self.val,self.count = 0.,0
-
- def accumulate(self, val):
- self.count += 1
- self.val += self._get_val1(val)
-
- def _get_val1(self, val): return val.mean()
- def _get_val2(self, state, val, param): return state.add_(1-param, val) if self.decay else state.add_(val)
- def _get_val3(self, state, val, param):
- v = val.view(val.size(0), -1).mean(1)
- return state.add_(1-param, v) if self.decay else state.add_(v)
-
- def update(self, state, param, val=None, step=None):
- if self.scope == StatScope.Weight:
- # `state` is a tensor
- res = self._get_val2(state.mul_(param), val, param)
- elif self.scope == StatScope.Channel:
- # `state` is a tensor of size n_channels
- res = self._get_val3(state.mul_(param), val, param)
- # For everything else, `state` is a scalar
- elif self.scope == StatScope.Layer: res = state*param + self._get_val1(val) * (1-param if self.decay else 1.)
- elif self.count != 0: res = state*param + self.val/self.count * (1-param if self.decay else 1.)
- else: return state
- if self.debias and step is not None: res /= (1 - param ** step)
- return res
-
-class AvgSquare(AvgStatistic):
-
- def __init__(self, name:str, param:float=0.9, scope=StatScope.Weight, init:float=0., decay:bool=True, debias:bool=False):
- super().__init__(name, param=param, scope=scope, init=init, decay=decay, debias=debias)
-
- def _get_val1(self, val): return torch.norm(val).pow(2)/val.numel()
- def _get_val2(self, state, val, param):
- return state.addcmul_(1-param, val, val) if self.decay else state.addcmul_(val, val)
- def _get_val3(self, state, val, param):
- v = val.view(val.size(0), -1).mean(1)
- return state.addcmul_(1-param, v, v) if self.decay else state.addcmul_(v, v)
-
-class GeneralOptimizer(Optimizer):
- def __init__(self, params, stats=None, on_step:Callable=None):
- defaults = {s.name:s.param for s in listify(stats) if s.name is not None}
- super().__init__(params, defaults)
- self.global_stats,self.group_stats,self.layer_stats,self.channel_stats,self.weight_stats = self._split_stats(stats)
- self.init_stats()
- if on_step is not None: self.on_step = types.MethodType(on_step, self)
-
- def step(self, closure=None):
- self.update_stats()
- for i,pg in enumerate(self.param_groups):
- for p in pg['params']:
- if p.grad is not None: self.on_step(p, pg, i)
-
- def on_step(self, p, group, group_idx): p.data.add_(-group['lr'], p.grad.data)
-
- def _split_stats(self, stats):
- splits = [[stat for stat in listify(stats) if stat.scope==scope] for scope in StatScope]
- for split,s in zip([splits[0], splits[1], splits[2]+splits[3]+splits[4]], StatScope):
- if np.any([getattr(s, 'debias', False) for s in split]): split.insert(0, CounterStat('step', scope=s))
- return splits
-
- def _init_stats(self, stats, data=None):
- return {stat.buf: stat.init if data is None
- else torch.zeros_like(data) + stat.init for stat in stats if stat.buf is not None}
-
- def init_stats(self):
- self.state['global'] = self._init_stats(self.global_stats)
- for i,pg in enumerate(self.param_groups):
- self.state[f'group{i}'] = self._init_stats(self.group_stats)
- for p in pg['params']:
- self.state[p] = self._init_stats(self.layer_stats)
- self.state[p].update(self._init_stats(self.channel_stats, p.data.view(p.data.size(0), -1).mean(1)))
- self.state[p].update(self._init_stats(self.weight_stats, p.data))
-
- def _set_bufs(self, p, stats, pg, val=None):
- d = self.state[p]
- for stat in stats:
- if stat.buf is not None: d[stat.buf] = stat.update(d[stat.buf], pg[stat.name], val=val, step=d.get('step', None))
-
- def update_stats(self):
- for stat in self.global_stats: stat.new_step()
- for i,pg in enumerate(self.param_groups):
- for stat in self.group_stats: stat.new_step()
- for p in pg['params']:
- if p.grad is not None:
- for stat in self.global_stats + self.group_stats: stat.accumulate(p.grad.data)
- self._set_bufs(p, self.layer_stats+self.channel_stats+self.weight_stats, pg, p.grad.data)
- self._set_bufs(f'group{i}', self.group_stats, pg)
- self._set_bufs('global', self.global_stats, self.param_groups[0])
-
diff --git a/spaces/aliabid94/AutoGPT/autogpt/js/overlay.js b/spaces/aliabid94/AutoGPT/autogpt/js/overlay.js
deleted file mode 100644
index 1c99c72673330b8ea8cf037ef889233f2d4326be..0000000000000000000000000000000000000000
--- a/spaces/aliabid94/AutoGPT/autogpt/js/overlay.js
+++ /dev/null
@@ -1,29 +0,0 @@
-const overlay = document.createElement('div');
-Object.assign(overlay.style, {
- position: 'fixed',
- zIndex: 999999,
- top: 0,
- left: 0,
- width: '100%',
- height: '100%',
- background: 'rgba(0, 0, 0, 0.7)',
- color: '#fff',
- fontSize: '24px',
- fontWeight: 'bold',
- display: 'flex',
- justifyContent: 'center',
- alignItems: 'center',
-});
-const textContent = document.createElement('div');
-Object.assign(textContent.style, {
- textAlign: 'center',
-});
-textContent.textContent = 'AutoGPT Analyzing Page';
-overlay.appendChild(textContent);
-document.body.append(overlay);
-document.body.style.overflow = 'hidden';
-let dotCount = 0;
-setInterval(() => {
- textContent.textContent = 'AutoGPT Analyzing Page' + '.'.repeat(dotCount);
- dotCount = (dotCount + 1) % 4;
-}, 1000);
diff --git a/spaces/allieannez/NLPContextQASquad2Demo/app.py b/spaces/allieannez/NLPContextQASquad2Demo/app.py
deleted file mode 100644
index ebe8500e5f871271c2b9f7d0b570936b33dbc530..0000000000000000000000000000000000000000
--- a/spaces/allieannez/NLPContextQASquad2Demo/app.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import gradio as gr
-from transformers import pipeline
-title = 'NLP Context QA with Transformers and Roberta Base Squad2'
-
-question1T = "What pressures do teens face?"
-question2T = "What do teens deal with?"
-question3T = "What persistent fears might teens face?"
-
-question1A = "What do half of American adults suffer from?"
-question2A = "What cognitive issues do adults face after COVID?"
-question3A = "What anxiety and changes are faced by adults?"
-
-question1E = "What problems do elderly have due to medical issues?"
-question2E = "What helps mental health for elderly?"
-question3E = "How many older adultsexperience mental disorders?"
-
-context1 = "Pressures teens face: Youth mental health expert have raised concerns about the extreme pressures on children and teens throughout the COVID-19 pandemic. Lingering effects of school closures and COVID-related stressors are key factors in teen stress. Many young people are also dealing with overwhelming pressure to achieve good grades in school or gain admission to elite colleges and universities. The need to be superstars in sports, the performing arts or other extracurricular activities. Tough schedules that don't allow enough time for rest, relaxation and unstructured fun. They deal with Bullying whether in person, via social media or both. They face persistent fears about climate change, global conflict and other weighty issues. They may face discrimination based on race, gender, sexual orientation, weight, religion, disability or other factors. Teens also face problems related to a poverty or lack of money for safe, stable housing and enough nutritious food."
-context2 = "Pressures adults face: Nearly half of Americans surveyed reported recent symptoms of an anxiety or depressive disorder, and 10% feel their mental health needs are not being met. Rates of anxiety, depression, and substance use disorder have increased since the beginning of the pandemic. People who have mental illnesses or disorders and then get COVID-19 are more likely to die than those who don’t have mental illnesses or disorders. Adults face a number of symptoms related to brain and mental health including cognitive and attention deficits like brain fog, anxiety and depression, seizures, and suicidal behavior. Stressors caused by the COVID-19 pandemic is not yet fully understood but include changes to daily routines, virtual office and schooling, mask wearing, caregiver absence, loss and grief, and financial instability. People more likely to experience difficulties include people from racial and ethnic minority groups, mothers and pregnant women, people with finanical or housing insecurity, children, people with disabilities, people with pre-existing mental illnesses or substance use problems and health care workers."
-context3 = "Pressures facing elderly: Anxiety and depression have increased for older adults since the start of the pandemic. Elders cope with uncertainty better than younger generations, however depression and anxiety have negative impacts on quality of life, function and general health. Due to medical vulnerability elders face isolation with sacrifices and pain to endure including loneliness. At least one in four older adults experience mental disorders such as depression, anxiety and dementia. Number of seniors is expected to double by 2030. Isolation, affective and anxiety disorders, dementia, and psychosis are common as well as sleep disorders. Behavioral disorders, cognitive deterioration or confusion states as a result of physical disorders and surgical interventions occur for elderly. Health care providers including those in primary care can play a key role in promoting mental health by working with mental health professionals, local governments, civil society organizations, families and communities to provide comprehensive mental health care and supportive environments. Elderly should be encouraged to participate in communities and society while policy makers should ensure health concerns are addressed in national health planning and policies."
-
-# Model (autotrain compatible) https://huggingface.co/deepset/roberta-base-squad2/tree/main
-# Model Card: https://huggingface.co/deepset/roberta-base-squad2
-model_name = "deepset/roberta-base-squad2"
-question_answerer = pipeline("question-answering", model=model_name, tokenizer=model_name)
-
-interface = gr.Interface.from_pipeline(question_answerer,
- title = title,
- theme = "peach",
- examples = [
- [context1, question1T],[context1, question2T],[context1, question3T],
- [context2, question1A],[context2, question2A],[context2, question3A],
- [context3, question1E],[context3, question2E],[context3, question3E]
- ]).launch()
\ No newline at end of file
diff --git a/spaces/allknowingroger/Image-Models-Test178/README.md b/spaces/allknowingroger/Image-Models-Test178/README.md
deleted file mode 100644
index f91e4b31ab345f987b425de029c057bfb69d9e1b..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test178/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: More Image Models
-emoji: 😻
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: true
-duplicated_from: allknowingroger/Image-Models-Test
----
-
-
\ No newline at end of file
diff --git a/spaces/allknowingroger/text-generation-webui-space-1/extensions/silero_tts/script.py b/spaces/allknowingroger/text-generation-webui-space-1/extensions/silero_tts/script.py
deleted file mode 100644
index f611dc27b7480cd357b77c0c407fcc2bd6df2679..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/text-generation-webui-space-1/extensions/silero_tts/script.py
+++ /dev/null
@@ -1,169 +0,0 @@
-import time
-from pathlib import Path
-
-import gradio as gr
-import torch
-
-import modules.chat as chat
-import modules.shared as shared
-
-torch._C._jit_set_profiling_mode(False)
-
-params = {
- 'activate': True,
- 'speaker': 'en_56',
- 'language': 'en',
- 'model_id': 'v3_en',
- 'sample_rate': 48000,
- 'device': 'cpu',
- 'show_text': False,
- 'autoplay': True,
- 'voice_pitch': 'medium',
- 'voice_speed': 'medium',
-}
-
-current_params = params.copy()
-voices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115']
-voice_pitches = ['x-low', 'low', 'medium', 'high', 'x-high']
-voice_speeds = ['x-slow', 'slow', 'medium', 'fast', 'x-fast']
-
-# Used for making text xml compatible, needed for voice pitch and speed control
-table = str.maketrans({
- "<": "<",
- ">": ">",
- "&": "&",
- "'": "'",
- '"': """,
-})
-
-def xmlesc(txt):
- return txt.translate(table)
-
-def load_model():
- model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id'])
- model.to(params['device'])
- return model
-model = load_model()
-
-def remove_surrounded_chars(string):
- new_string = ""
- in_star = False
- for char in string:
- if char == '*':
- in_star = not in_star
- elif not in_star:
- new_string += char
- return new_string
-
-def remove_tts_from_history(name1, name2):
- for i, entry in enumerate(shared.history['internal']):
- shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]
- return chat.generate_chat_output(shared.history['visible'], name1, name2, shared.character)
-
-def toggle_text_in_history(name1, name2):
- for i, entry in enumerate(shared.history['visible']):
- visible_reply = entry[1]
- if visible_reply.startswith('\n\n{reply}"]
- else:
- shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('')[0]}"]
- return chat.generate_chat_output(shared.history['visible'], name1, name2, shared.character)
-
-def input_modifier(string):
- """
- This function is applied to your text inputs before
- they are fed into the model.
- """
-
- # Remove autoplay from the last reply
- if (shared.args.chat or shared.args.cai_chat) and len(shared.history['internal']) > 0:
- shared.history['visible'][-1] = [shared.history['visible'][-1][0], shared.history['visible'][-1][1].replace('controls autoplay>','controls>')]
-
- shared.processing_message = "*Is recording a voice message...*"
- return string
-
-def output_modifier(string):
- """
- This function is applied to the model outputs.
- """
-
- global model, current_params
-
- for i in params:
- if params[i] != current_params[i]:
- model = load_model()
- current_params = params.copy()
- break
-
- if params['activate'] == False:
- return string
-
- original_string = string
- string = remove_surrounded_chars(string)
- string = string.replace('"', '')
- string = string.replace('“', '')
- string = string.replace('\n', ' ')
- string = string.strip()
-
- if string == '':
- string = '*Empty reply, try regenerating*'
- else:
- output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{int(time.time())}.wav')
- prosody = ''.format(params['voice_speed'], params['voice_pitch'])
- silero_input = f'{prosody}{xmlesc(string)} '
- model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
-
- autoplay = 'autoplay' if params['autoplay'] else ''
- string = f''
- if params['show_text']:
- string += f'\n\n{original_string}'
-
- shared.processing_message = "*Is typing...*"
- return string
-
-def bot_prefix_modifier(string):
- """
- This function is only applied in chat mode. It modifies
- the prefix text for the Bot and can be used to bias its
- behavior.
- """
-
- return string
-
-def ui():
- # Gradio elements
- with gr.Accordion("Silero TTS"):
- with gr.Row():
- activate = gr.Checkbox(value=params['activate'], label='Activate TTS')
- autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')
- show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')
- voice = gr.Dropdown(value=params['speaker'], choices=voices_by_gender, label='TTS voice')
- with gr.Row():
- v_pitch = gr.Dropdown(value=params['voice_pitch'], choices=voice_pitches, label='Voice pitch')
- v_speed = gr.Dropdown(value=params['voice_speed'], choices=voice_speeds, label='Voice speed')
- with gr.Row():
- convert = gr.Button('Permanently replace audios with the message texts')
- convert_cancel = gr.Button('Cancel', visible=False)
- convert_confirm = gr.Button('Confirm (cannot be undone)', variant="stop", visible=False)
-
- # Convert history with confirmation
- convert_arr = [convert_confirm, convert, convert_cancel]
- convert.click(lambda :[gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
- convert_confirm.click(lambda :[gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
- convert_confirm.click(remove_tts_from_history, [shared.gradio['name1'], shared.gradio['name2']], shared.gradio['display'])
- convert_confirm.click(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
- convert_cancel.click(lambda :[gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
-
- # Toggle message text in history
- show_text.change(lambda x: params.update({"show_text": x}), show_text, None)
- show_text.change(toggle_text_in_history, [shared.gradio['name1'], shared.gradio['name2']], shared.gradio['display'])
- show_text.change(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
-
- # Event functions to update the parameters in the backend
- activate.change(lambda x: params.update({"activate": x}), activate, None)
- autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None)
- voice.change(lambda x: params.update({"speaker": x}), voice, None)
- v_pitch.change(lambda x: params.update({"voice_pitch": x}), v_pitch, None)
- v_speed.change(lambda x: params.update({"voice_speed": x}), v_speed, None)
diff --git a/spaces/amanatid/Adi_The_ArxivGPT_with_Voice/faq.py b/spaces/amanatid/Adi_The_ArxivGPT_with_Voice/faq.py
deleted file mode 100644
index 59592c5dd7088da2edc1eedd05a12b04e307735d..0000000000000000000000000000000000000000
--- a/spaces/amanatid/Adi_The_ArxivGPT_with_Voice/faq.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# flake8: noqa
-import streamlit as st
-
-
-def faq():
- st.markdown(
- """
-# FAQ
-## How does ArxivGPT works work?
-When you load do it will be divided into smaller chunks
-and stored in a special type of database called a vector index
-that allows for semantic search and retrieval.
-
-When you ask a question, ArxivGPT will search through the
-pdf chunks and find the most relevant ones using the vector index.
-Then, it will use the powerful language model GPT4 to generate a final answer.
-
-## Why does ArxivGPT take time to index my document?
-The reason is the following one. In the case of a free OpenAI API key
-takes time to index the loaded pdf files since a free API key has a
-restricted [rate limits](https://platform.openai.com/docs/guides/rate-limits/overview).
-To make the process fast, you can use a paid API key.
-
-
-## How accurate is ArxivGPT?
-To our experience and our tests, it seems impressive accurate but keep in mind the
-following since GPT-4 is language model is keen to mistakes. It is
-based on semantic search and extracts the most relevant chuncks from the pdf
-files.
-"""
- )
diff --git a/spaces/anaclaudia13ct/insect_detection/models/yolo.py b/spaces/anaclaudia13ct/insect_detection/models/yolo.py
deleted file mode 100644
index ed21c067ee9337bf534bfc908574362a61ad3207..0000000000000000000000000000000000000000
--- a/spaces/anaclaudia13ct/insect_detection/models/yolo.py
+++ /dev/null
@@ -1,391 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-"""
-YOLO-specific modules
-
-Usage:
- $ python models/yolo.py --cfg yolov5s.yaml
-"""
-
-import argparse
-import contextlib
-import os
-import platform
-import sys
-from copy import deepcopy
-from pathlib import Path
-
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[1] # YOLOv5 root directory
-if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
-if platform.system() != 'Windows':
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
-
-from models.common import *
-from models.experimental import *
-from utils.autoanchor import check_anchor_order
-from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
-from utils.plots import feature_visualization
-from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,
- time_sync)
-
-try:
- import thop # for FLOPs computation
-except ImportError:
- thop = None
-
-
-class Detect(nn.Module):
- # YOLOv5 Detect head for detection models
- stride = None # strides computed during build
- dynamic = False # force grid reconstruction
- export = False # export mode
-
- def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
- super().__init__()
- self.nc = nc # number of classes
- self.no = nc + 5 # number of outputs per anchor
- self.nl = len(anchors) # number of detection layers
- self.na = len(anchors[0]) // 2 # number of anchors
- self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid
- self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid
- self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
- self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
- self.inplace = inplace # use inplace ops (e.g. slice assignment)
-
- def forward(self, x):
- z = [] # inference output
- for i in range(self.nl):
- x[i] = self.m[i](x[i]) # conv
- bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
- x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
-
- if not self.training: # inference
- if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
- self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
-
- if isinstance(self, Segment): # (boxes + masks)
- xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)
- xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy
- wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh
- y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)
- else: # Detect (boxes only)
- xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)
- xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy
- wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh
- y = torch.cat((xy, wh, conf), 4)
- z.append(y.view(bs, self.na * nx * ny, self.no))
-
- return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
-
- def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
- d = self.anchors[i].device
- t = self.anchors[i].dtype
- shape = 1, self.na, ny, nx, 2 # grid shape
- y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)
- yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility
- grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5
- anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)
- return grid, anchor_grid
-
-
-class Segment(Detect):
- # YOLOv5 Segment head for segmentation models
- def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
- super().__init__(nc, anchors, ch, inplace)
- self.nm = nm # number of masks
- self.npr = npr # number of protos
- self.no = 5 + nc + self.nm # number of outputs per anchor
- self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
- self.proto = Proto(ch[0], self.npr, self.nm) # protos
- self.detect = Detect.forward
-
- def forward(self, x):
- p = self.proto(x[0])
- x = self.detect(self, x)
- return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])
-
-
-class BaseModel(nn.Module):
- # YOLOv5 base model
- def forward(self, x, profile=False, visualize=False):
- return self._forward_once(x, profile, visualize) # single-scale inference, train
-
- def _forward_once(self, x, profile=False, visualize=False):
- y, dt = [], [] # outputs
- for m in self.model:
- if m.f != -1: # if not from previous layer
- x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
- if profile:
- self._profile_one_layer(m, x, dt)
- x = m(x) # run
- y.append(x if m.i in self.save else None) # save output
- if visualize:
- feature_visualization(x, m.type, m.i, save_dir=visualize)
- return x
-
- def _profile_one_layer(self, m, x, dt):
- c = m == self.model[-1] # is final layer, copy input as inplace fix
- o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
- t = time_sync()
- for _ in range(10):
- m(x.copy() if c else x)
- dt.append((time_sync() - t) * 100)
- if m == self.model[0]:
- LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module")
- LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
- if c:
- LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total")
-
- def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
- LOGGER.info('Fusing layers... ')
- for m in self.model.modules():
- if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
- m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
- delattr(m, 'bn') # remove batchnorm
- m.forward = m.forward_fuse # update forward
- self.info()
- return self
-
- def info(self, verbose=False, img_size=640): # print model information
- model_info(self, verbose, img_size)
-
- def _apply(self, fn):
- # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
- self = super()._apply(fn)
- m = self.model[-1] # Detect()
- if isinstance(m, (Detect, Segment)):
- m.stride = fn(m.stride)
- m.grid = list(map(fn, m.grid))
- if isinstance(m.anchor_grid, list):
- m.anchor_grid = list(map(fn, m.anchor_grid))
- return self
-
-
-class DetectionModel(BaseModel):
- # YOLOv5 detection model
- def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
- super().__init__()
- if isinstance(cfg, dict):
- self.yaml = cfg # model dict
- else: # is *.yaml
- import yaml # for torch hub
- self.yaml_file = Path(cfg).name
- with open(cfg, encoding='ascii', errors='ignore') as f:
- self.yaml = yaml.safe_load(f) # model dict
-
- # Define model
- ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
- if nc and nc != self.yaml['nc']:
- LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
- self.yaml['nc'] = nc # override yaml value
- if anchors:
- LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}')
- self.yaml['anchors'] = round(anchors) # override yaml value
- self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
- self.names = [str(i) for i in range(self.yaml['nc'])] # default names
- self.inplace = self.yaml.get('inplace', True)
-
- # Build strides, anchors
- m = self.model[-1] # Detect()
- if isinstance(m, (Detect, Segment)):
- s = 256 # 2x min stride
- m.inplace = self.inplace
- forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
- m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
- check_anchor_order(m)
- m.anchors /= m.stride.view(-1, 1, 1)
- self.stride = m.stride
- self._initialize_biases() # only run once
-
- # Init weights, biases
- initialize_weights(self)
- self.info()
- LOGGER.info('')
-
- def forward(self, x, augment=False, profile=False, visualize=False):
- if augment:
- return self._forward_augment(x) # augmented inference, None
- return self._forward_once(x, profile, visualize) # single-scale inference, train
-
- def _forward_augment(self, x):
- img_size = x.shape[-2:] # height, width
- s = [1, 0.83, 0.67] # scales
- f = [None, 3, None] # flips (2-ud, 3-lr)
- y = [] # outputs
- for si, fi in zip(s, f):
- xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
- yi = self._forward_once(xi)[0] # forward
- # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
- yi = self._descale_pred(yi, fi, si, img_size)
- y.append(yi)
- y = self._clip_augmented(y) # clip augmented tails
- return torch.cat(y, 1), None # augmented inference, train
-
- def _descale_pred(self, p, flips, scale, img_size):
- # de-scale predictions following augmented inference (inverse operation)
- if self.inplace:
- p[..., :4] /= scale # de-scale
- if flips == 2:
- p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
- elif flips == 3:
- p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
- else:
- x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
- if flips == 2:
- y = img_size[0] - y # de-flip ud
- elif flips == 3:
- x = img_size[1] - x # de-flip lr
- p = torch.cat((x, y, wh, p[..., 4:]), -1)
- return p
-
- def _clip_augmented(self, y):
- # Clip YOLOv5 augmented inference tails
- nl = self.model[-1].nl # number of detection layers (P3-P5)
- g = sum(4 ** x for x in range(nl)) # grid points
- e = 1 # exclude layer count
- i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices
- y[0] = y[0][:, :-i] # large
- i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices
- y[-1] = y[-1][:, i:] # small
- return y
-
- def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
- # https://arxiv.org/abs/1708.02002 section 3.3
- # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
- m = self.model[-1] # Detect() module
- for mi, s in zip(m.m, m.stride): # from
- b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
- b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
- b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls
- mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-
-
-Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility
-
-
-class SegmentationModel(DetectionModel):
- # YOLOv5 segmentation model
- def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):
- super().__init__(cfg, ch, nc, anchors)
-
-
-class ClassificationModel(BaseModel):
- # YOLOv5 classification model
- def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index
- super().__init__()
- self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg)
-
- def _from_detection_model(self, model, nc=1000, cutoff=10):
- # Create a YOLOv5 classification model from a YOLOv5 detection model
- if isinstance(model, DetectMultiBackend):
- model = model.model # unwrap DetectMultiBackend
- model.model = model.model[:cutoff] # backbone
- m = model.model[-1] # last layer
- ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module
- c = Classify(ch, nc) # Classify()
- c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type
- model.model[-1] = c # replace
- self.model = model.model
- self.stride = model.stride
- self.save = []
- self.nc = nc
-
- def _from_yaml(self, cfg):
- # Create a YOLOv5 classification model from a *.yaml file
- self.model = None
-
-
-def parse_model(d, ch): # model_dict, input_channels(3)
- # Parse a YOLOv5 model.yaml dictionary
- LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
- anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
- if act:
- Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
- LOGGER.info(f"{colorstr('activation:')} {act}") # print
- na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
- no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
-
- layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
- for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
- m = eval(m) if isinstance(m, str) else m # eval strings
- for j, a in enumerate(args):
- with contextlib.suppress(NameError):
- args[j] = eval(a) if isinstance(a, str) else a # eval strings
-
- n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
- if m in {
- Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
- BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}:
- c1, c2 = ch[f], args[0]
- if c2 != no: # if not output
- c2 = make_divisible(c2 * gw, 8)
-
- args = [c1, c2, *args[1:]]
- if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}:
- args.insert(2, n) # number of repeats
- n = 1
- elif m is nn.BatchNorm2d:
- args = [ch[f]]
- elif m is Concat:
- c2 = sum(ch[x] for x in f)
- # TODO: channel, gw, gd
- elif m in {Detect, Segment}:
- args.append([ch[x] for x in f])
- if isinstance(args[1], int): # number of anchors
- args[1] = [list(range(args[1] * 2))] * len(f)
- if m is Segment:
- args[3] = make_divisible(args[3] * gw, 8)
- elif m is Contract:
- c2 = ch[f] * args[0] ** 2
- elif m is Expand:
- c2 = ch[f] // args[0] ** 2
- else:
- c2 = ch[f]
-
- m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
- t = str(m)[8:-2].replace('__main__.', '') # module type
- np = sum(x.numel() for x in m_.parameters()) # number params
- m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
- LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
- save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
- layers.append(m_)
- if i == 0:
- ch = []
- ch.append(c2)
- return nn.Sequential(*layers), sorted(save)
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
- parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs')
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
- parser.add_argument('--profile', action='store_true', help='profile model speed')
- parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer')
- parser.add_argument('--test', action='store_true', help='test all yolo*.yaml')
- opt = parser.parse_args()
- opt.cfg = check_yaml(opt.cfg) # check YAML
- print_args(vars(opt))
- device = select_device(opt.device)
-
- # Create model
- im = torch.rand(opt.batch_size, 3, 640, 640).to(device)
- model = Model(opt.cfg).to(device)
-
- # Options
- if opt.line_profile: # profile layer by layer
- model(im, profile=True)
-
- elif opt.profile: # profile forward-backward
- results = profile(input=im, ops=[model], n=3)
-
- elif opt.test: # test all models
- for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'):
- try:
- _ = Model(cfg)
- except Exception as e:
- print(f'Error in {cfg}: {e}')
-
- else: # report fused model summary
- model.fuse()
diff --git a/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/client/css/options.css b/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/client/css/options.css
deleted file mode 100644
index fb015a54e0a7f7ac521517357d812c994621592e..0000000000000000000000000000000000000000
--- a/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/client/css/options.css
+++ /dev/null
@@ -1,10 +0,0 @@
-.options-container {
- display: flex;
- flex-wrap: wrap;
-}
-
-@media screen and (max-width: 990px) {
- .options-container {
- justify-content: space-between;
- }
-}
diff --git a/spaces/antonovmaxim/text-generation-webui-space/extensions/sd_api_pictures/script.py b/spaces/antonovmaxim/text-generation-webui-space/extensions/sd_api_pictures/script.py
deleted file mode 100644
index 949531c9ea3578c67bad7be7fe694836e04ef03c..0000000000000000000000000000000000000000
--- a/spaces/antonovmaxim/text-generation-webui-space/extensions/sd_api_pictures/script.py
+++ /dev/null
@@ -1,332 +0,0 @@
-import base64
-import io
-import re
-import time
-from datetime import date
-from pathlib import Path
-
-import gradio as gr
-import requests
-import torch
-from PIL import Image
-
-import modules.shared as shared
-from modules.models import reload_model, unload_model
-
-torch._C._jit_set_profiling_mode(False)
-
-# parameters which can be customized in settings.json of webui
-params = {
- 'address': 'http://127.0.0.1:7860',
- 'mode': 0, # modes of operation: 0 (Manual only), 1 (Immersive/Interactive - looks for words to trigger), 2 (Picturebook Adventure - Always on)
- 'manage_VRAM': False,
- 'save_img': False,
- 'SD_model': 'NeverEndingDream', # not used right now
- 'prompt_prefix': '(Masterpiece:1.1), detailed, intricate, colorful',
- 'negative_prompt': '(worst quality, low quality:1.3)',
- 'width': 512,
- 'height': 512,
- 'denoising_strength': 0.61,
- 'restore_faces': False,
- 'enable_hr': False,
- 'hr_upscaler': 'ESRGAN_4x',
- 'hr_scale': '1.0',
- 'seed': -1,
- 'sampler_name': 'DDIM',
- 'steps': 32,
- 'cfg_scale': 7
-}
-
-
-def give_VRAM_priority(actor):
- global shared, params
-
- if actor == 'SD':
- unload_model()
- print("Requesting Auto1111 to re-load last checkpoint used...")
- response = requests.post(url=f'{params["address"]}/sdapi/v1/reload-checkpoint', json='')
- response.raise_for_status()
-
- elif actor == 'LLM':
- print("Requesting Auto1111 to vacate VRAM...")
- response = requests.post(url=f'{params["address"]}/sdapi/v1/unload-checkpoint', json='')
- response.raise_for_status()
- reload_model()
-
- elif actor == 'set':
- print("VRAM mangement activated -- requesting Auto1111 to vacate VRAM...")
- response = requests.post(url=f'{params["address"]}/sdapi/v1/unload-checkpoint', json='')
- response.raise_for_status()
-
- elif actor == 'reset':
- print("VRAM mangement deactivated -- requesting Auto1111 to reload checkpoint")
- response = requests.post(url=f'{params["address"]}/sdapi/v1/reload-checkpoint', json='')
- response.raise_for_status()
-
- else:
- raise RuntimeError(f'Managing VRAM: "{actor}" is not a known state!')
-
- response.raise_for_status()
- del response
-
-
-if params['manage_VRAM']:
- give_VRAM_priority('set')
-
-samplers = ['DDIM', 'DPM++ 2M Karras'] # TODO: get the availible samplers with http://{address}}/sdapi/v1/samplers
-SD_models = ['NeverEndingDream'] # TODO: get with http://{address}}/sdapi/v1/sd-models and allow user to select
-
-picture_response = False # specifies if the next model response should appear as a picture
-
-
-def remove_surrounded_chars(string):
- # this expression matches to 'as few symbols as possible (0 upwards) between any asterisks' OR
- # 'as few symbols as possible (0 upwards) between an asterisk and the end of the string'
- return re.sub('\*[^\*]*?(\*|$)', '', string)
-
-
-def triggers_are_in(string):
- string = remove_surrounded_chars(string)
- # regex searches for send|main|message|me (at the end of the word) followed by
- # a whole word of image|pic|picture|photo|snap|snapshot|selfie|meme(s),
- # (?aims) are regex parser flags
- return bool(re.search('(?aims)(send|mail|message|me)\\b.+?\\b(image|pic(ture)?|photo|snap(shot)?|selfie|meme)s?\\b', string))
-
-
-def state_modifier(state):
- if picture_response:
- state['stream'] = False
-
- return state
-
-
-def input_modifier(string):
- """
- This function is applied to your text inputs before
- they are fed into the model.
- """
-
- global params
-
- if not params['mode'] == 1: # if not in immersive/interactive mode, do nothing
- return string
-
- if triggers_are_in(string): # if we're in it, check for trigger words
- toggle_generation(True)
- string = string.lower()
- if "of" in string:
- subject = string.split('of', 1)[1] # subdivide the string once by the first 'of' instance and get what's coming after it
- string = "Please provide a detailed and vivid description of " + subject
- else:
- string = "Please provide a detailed description of your appearance, your surroundings and what you are doing right now"
-
- return string
-
-# Get and save the Stable Diffusion-generated picture
-def get_SD_pictures(description):
- global params
-
- if params['manage_VRAM']:
- give_VRAM_priority('SD')
-
- payload = {
- "prompt": params['prompt_prefix'] + description,
- "seed": params['seed'],
- "sampler_name": params['sampler_name'],
- "enable_hr": params['enable_hr'],
- "hr_scale": params['hr_scale'],
- "hr_upscaler": params['hr_upscaler'],
- "denoising_strength": params['denoising_strength'],
- "steps": params['steps'],
- "cfg_scale": params['cfg_scale'],
- "width": params['width'],
- "height": params['height'],
- "restore_faces": params['restore_faces'],
- "override_settings_restore_afterwards": True,
- "negative_prompt": params['negative_prompt']
- }
-
- print(f'Prompting the image generator via the API on {params["address"]}...')
- response = requests.post(url=f'{params["address"]}/sdapi/v1/txt2img', json=payload)
- response.raise_for_status()
- r = response.json()
-
- visible_result = ""
- for img_str in r['images']:
- if params['save_img']:
- img_data = base64.b64decode(img_str)
-
- variadic = f'{date.today().strftime("%Y_%m_%d")}/{shared.character}_{int(time.time())}'
- output_file = Path(f'extensions/sd_api_pictures/outputs/{variadic}.png')
- output_file.parent.mkdir(parents=True, exist_ok=True)
-
- with open(output_file.as_posix(), 'wb') as f:
- f.write(img_data)
-
- visible_result = visible_result + f'
\n'
- else:
- image = Image.open(io.BytesIO(base64.b64decode(img_str.split(",", 1)[0])))
- # lower the resolution of received images for the chat, otherwise the log size gets out of control quickly with all the base64 values in visible history
- image.thumbnail((300, 300))
- buffered = io.BytesIO()
- image.save(buffered, format="JPEG")
- buffered.seek(0)
- image_bytes = buffered.getvalue()
- img_str = "data:image/jpeg;base64," + base64.b64encode(image_bytes).decode()
- visible_result = visible_result + f'
\n'
-
- if params['manage_VRAM']:
- give_VRAM_priority('LLM')
-
- return visible_result
-
-# TODO: how do I make the UI history ignore the resulting pictures (I don't want HTML to appear in history)
-# and replace it with 'text' for the purposes of logging?
-def output_modifier(string):
- """
- This function is applied to the model outputs.
- """
-
- global picture_response, params
-
- if not picture_response:
- return string
-
- string = remove_surrounded_chars(string)
- string = string.replace('"', '')
- string = string.replace('“', '')
- string = string.replace('\n', ' ')
- string = string.strip()
-
- if string == '':
- string = 'no viable description in reply, try regenerating'
- return string
-
- text = ""
- if (params['mode'] < 2):
- toggle_generation(False)
- text = f'*Sends a picture which portrays: “{string}”*'
- else:
- text = string
-
- string = get_SD_pictures(string) + "\n" + text
-
- return string
-
-
-def bot_prefix_modifier(string):
- """
- This function is only applied in chat mode. It modifies
- the prefix text for the Bot and can be used to bias its
- behavior.
- """
-
- return string
-
-
-def toggle_generation(*args):
- global picture_response, shared
-
- if not args:
- picture_response = not picture_response
- else:
- picture_response = args[0]
-
- shared.processing_message = "*Is sending a picture...*" if picture_response else "*Is typing...*"
-
-
-def filter_address(address):
- address = address.strip()
- # address = re.sub('http(s)?:\/\/|\/$','',address) # remove starting http:// OR https:// OR trailing slash
- address = re.sub('\/$', '', address) # remove trailing /s
- if not address.startswith('http'):
- address = 'http://' + address
- return address
-
-
-def SD_api_address_update(address):
-
- global params
-
- msg = "✔️ SD API is found on:"
- address = filter_address(address)
- params.update({"address": address})
- try:
- response = requests.get(url=f'{params["address"]}/sdapi/v1/sd-models')
- response.raise_for_status()
- # r = response.json()
- except:
- msg = "❌ No SD API endpoint on:"
-
- return gr.Textbox.update(label=msg)
-
-
-def custom_css():
- path_to_css = Path(__file__).parent.resolve() / 'style.css'
- return open(path_to_css, 'r').read()
-
-
-def ui():
-
- # Gradio elements
- # gr.Markdown('### Stable Diffusion API Pictures') # Currently the name of extension is shown as the title
- with gr.Accordion("Parameters", open=True, elem_classes="SDAP"):
- with gr.Row():
- address = gr.Textbox(placeholder=params['address'], value=params['address'], label='Auto1111\'s WebUI address')
- modes_list = ["Manual", "Immersive/Interactive", "Picturebook/Adventure"]
- mode = gr.Dropdown(modes_list, value=modes_list[params['mode']], label="Mode of operation", type="index")
- with gr.Column(scale=1, min_width=300):
- manage_VRAM = gr.Checkbox(value=params['manage_VRAM'], label='Manage VRAM')
- save_img = gr.Checkbox(value=params['save_img'], label='Keep original images and use them in chat')
-
- force_pic = gr.Button("Force the picture response")
- suppr_pic = gr.Button("Suppress the picture response")
-
- with gr.Accordion("Generation parameters", open=False):
- prompt_prefix = gr.Textbox(placeholder=params['prompt_prefix'], value=params['prompt_prefix'], label='Prompt Prefix (best used to describe the look of the character)')
- negative_prompt = gr.Textbox(placeholder=params['negative_prompt'], value=params['negative_prompt'], label='Negative Prompt')
- with gr.Row():
- with gr.Column():
- width = gr.Slider(256, 768, value=params['width'], step=64, label='Width')
- height = gr.Slider(256, 768, value=params['height'], step=64, label='Height')
- with gr.Column():
- sampler_name = gr.Textbox(placeholder=params['sampler_name'], value=params['sampler_name'], label='Sampling method', elem_id="sampler_box")
- steps = gr.Slider(1, 150, value=params['steps'], step=1, label="Sampling steps")
- with gr.Row():
- seed = gr.Number(label="Seed", value=params['seed'], elem_id="seed_box")
- cfg_scale = gr.Number(label="CFG Scale", value=params['cfg_scale'], elem_id="cfg_box")
- with gr.Column() as hr_options:
- restore_faces = gr.Checkbox(value=params['restore_faces'], label='Restore faces')
- enable_hr = gr.Checkbox(value=params['enable_hr'], label='Hires. fix')
- with gr.Row(visible=params['enable_hr'], elem_classes="hires_opts") as hr_options:
- hr_scale = gr.Slider(1, 4, value=params['hr_scale'], step=0.1, label='Upscale by')
- denoising_strength = gr.Slider(0, 1, value=params['denoising_strength'], step=0.01, label='Denoising strength')
- hr_upscaler = gr.Textbox(placeholder=params['hr_upscaler'], value=params['hr_upscaler'], label='Upscaler')
-
- # Event functions to update the parameters in the backend
- address.change(lambda x: params.update({"address": filter_address(x)}), address, None)
- mode.select(lambda x: params.update({"mode": x}), mode, None)
- mode.select(lambda x: toggle_generation(x > 1), inputs=mode, outputs=None)
- manage_VRAM.change(lambda x: params.update({"manage_VRAM": x}), manage_VRAM, None)
- manage_VRAM.change(lambda x: give_VRAM_priority('set' if x else 'reset'), inputs=manage_VRAM, outputs=None)
- save_img.change(lambda x: params.update({"save_img": x}), save_img, None)
-
- address.submit(fn=SD_api_address_update, inputs=address, outputs=address)
- prompt_prefix.change(lambda x: params.update({"prompt_prefix": x}), prompt_prefix, None)
- negative_prompt.change(lambda x: params.update({"negative_prompt": x}), negative_prompt, None)
- width.change(lambda x: params.update({"width": x}), width, None)
- height.change(lambda x: params.update({"height": x}), height, None)
- hr_scale.change(lambda x: params.update({"hr_scale": x}), hr_scale, None)
- denoising_strength.change(lambda x: params.update({"denoising_strength": x}), denoising_strength, None)
- restore_faces.change(lambda x: params.update({"restore_faces": x}), restore_faces, None)
- hr_upscaler.change(lambda x: params.update({"hr_upscaler": x}), hr_upscaler, None)
- enable_hr.change(lambda x: params.update({"enable_hr": x}), enable_hr, None)
- enable_hr.change(lambda x: hr_options.update(visible=params["enable_hr"]), enable_hr, hr_options)
-
- sampler_name.change(lambda x: params.update({"sampler_name": x}), sampler_name, None)
- steps.change(lambda x: params.update({"steps": x}), steps, None)
- seed.change(lambda x: params.update({"seed": x}), seed, None)
- cfg_scale.change(lambda x: params.update({"cfg_scale": x}), cfg_scale, None)
-
- force_pic.click(lambda x: toggle_generation(True), inputs=force_pic, outputs=None)
- suppr_pic.click(lambda x: toggle_generation(False), inputs=suppr_pic, outputs=None)
diff --git a/spaces/antonovmaxim/text-generation-webui-space/extensions/sd_api_pictures/style.css b/spaces/antonovmaxim/text-generation-webui-space/extensions/sd_api_pictures/style.css
deleted file mode 100644
index a10e6397a1a0f5e0fbd98b98d65a572a2290807b..0000000000000000000000000000000000000000
--- a/spaces/antonovmaxim/text-generation-webui-space/extensions/sd_api_pictures/style.css
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Align the elements for SD_api_picture extension */
-.SDAP #sampler_box {
- padding-top: var(--spacing-sm);
- padding-bottom: var(--spacing-sm);
-}
-
-.SDAP #seed_box,
-.SDAP #cfg_box {
- padding-top: var(--spacing-md);
-}
-
-.SDAP #sampler_box span,
-.SDAP #seed_box span,
-.SDAP #cfg_box span{
- margin-bottom: var(--spacing-sm);
-}
-
-.SDAP svg.dropdown-arrow {
- flex-shrink: 0 !important;
- margin: 0px !important;
-}
-
-.SDAP .hires_opts input[type="number"] {
- width: 6em !important;
-}
diff --git a/spaces/apsys/hetfit/nets/opti/__init__.py b/spaces/apsys/hetfit/nets/opti/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/arnavkartikeya/SCRIPture-final/pretrain.py b/spaces/arnavkartikeya/SCRIPture-final/pretrain.py
deleted file mode 100644
index c9490ec8eb8ff5f074b5772ada55cd27ec673a12..0000000000000000000000000000000000000000
--- a/spaces/arnavkartikeya/SCRIPture-final/pretrain.py
+++ /dev/null
@@ -1,173 +0,0 @@
-'''
- * Copyright (c) 2022, salesforce.com, inc.
- * All rights reserved.
- * SPDX-License-Identifier: BSD-3-Clause
- * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
- * By Junnan Li
-'''
-import argparse
-import os
-import ruamel_yaml as yaml
-import numpy as np
-import random
-import time
-import datetime
-import json
-from pathlib import Path
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.backends.cudnn as cudnn
-import torch.distributed as dist
-from torch.utils.data import DataLoader
-
-from models.blip_pretrain import blip_pretrain
-import utils
-from utils import warmup_lr_schedule, step_lr_schedule
-from data import create_dataset, create_sampler, create_loader
-
-def train(model, data_loader, optimizer, epoch, device, config):
- # train
- model.train()
-
- metric_logger = utils.MetricLogger(delimiter=" ")
- metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
- metric_logger.add_meter('loss_ita', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
- metric_logger.add_meter('loss_itm', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
- metric_logger.add_meter('loss_lm', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
-
- header = 'Train Epoch: [{}]'.format(epoch)
- print_freq = 50
-
- if config['laion_path']:
- data_loader.dataset.reload_laion(epoch)
-
- data_loader.sampler.set_epoch(epoch)
-
- for i, (image, caption) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
-
- if epoch==0:
- warmup_lr_schedule(optimizer, i, config['warmup_steps'], config['warmup_lr'], config['init_lr'])
-
- optimizer.zero_grad()
-
- image = image.to(device,non_blocking=True)
-
- # ramp up alpha in the first 2 epochs
- alpha = config['alpha']*min(1,(epoch*len(data_loader)+i)/(2*len(data_loader)))
-
- loss_ita, loss_itm, loss_lm = model(image, caption, alpha = alpha)
- loss = loss_ita + loss_itm + loss_lm
-
- loss.backward()
- optimizer.step()
-
- metric_logger.update(loss_ita=loss_ita.item())
- metric_logger.update(loss_itm=loss_itm.item())
- metric_logger.update(loss_lm=loss_lm.item())
- metric_logger.update(lr=optimizer.param_groups[0]["lr"])
-
-
- # gather the stats from all processes
- metric_logger.synchronize_between_processes()
- print("Averaged stats:", metric_logger.global_avg())
- return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
-
-
-def main(args, config):
- utils.init_distributed_mode(args)
-
- device = torch.device(args.device)
-
- # fix the seed for reproducibility
- seed = args.seed + utils.get_rank()
- torch.manual_seed(seed)
- np.random.seed(seed)
- random.seed(seed)
- cudnn.benchmark = True
-
- #### Dataset ####
- print("Creating dataset")
- datasets = [create_dataset('pretrain', config, min_scale=0.2)]
- print('number of training samples: %d'%len(datasets[0]))
-
- num_tasks = utils.get_world_size()
- global_rank = utils.get_rank()
- samplers = create_sampler(datasets, [True], num_tasks, global_rank)
-
- data_loader = create_loader(datasets,samplers,batch_size=[config['batch_size']], num_workers=[4], is_trains=[True], collate_fns=[None])[0]
-
- #### Model ####
- print("Creating model")
- model = blip_pretrain(image_size=config['image_size'], vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'],
- vit_ckpt_layer=config['vit_ckpt_layer'], queue_size=config['queue_size'])
-
- model = model.to(device)
-
- optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay'])
-
- start_epoch = 0
- if args.checkpoint:
- checkpoint = torch.load(args.checkpoint, map_location='cpu')
- state_dict = checkpoint['model']
- model.load_state_dict(state_dict)
-
- optimizer.load_state_dict(checkpoint['optimizer'])
- start_epoch = checkpoint['epoch']+1
- print('resume checkpoint from %s'%args.checkpoint)
-
- model_without_ddp = model
- if args.distributed:
- model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
- model_without_ddp = model.module
-
- print("Start training")
- start_time = time.time()
- for epoch in range(start_epoch, config['max_epoch']):
-
- step_lr_schedule(optimizer, epoch, config['init_lr'], config['min_lr'], config['lr_decay_rate'])
-
- train_stats = train(model, data_loader, optimizer, epoch, device, config)
- if utils.is_main_process():
- log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
- 'epoch': epoch,
- }
- save_obj = {
- 'model': model_without_ddp.state_dict(),
- 'optimizer': optimizer.state_dict(),
- 'config': config,
- 'epoch': epoch,
- }
- torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_%02d.pth'%epoch))
-
- with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
- f.write(json.dumps(log_stats) + "\n")
-
- dist.barrier()
-
- total_time = time.time() - start_time
- total_time_str = str(datetime.timedelta(seconds=int(total_time)))
- print('Training time {}'.format(total_time_str))
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--config', default='./configs/pretrain.yaml')
- parser.add_argument('--output_dir', default='output/Pretrain')
- parser.add_argument('--checkpoint', default='')
- parser.add_argument('--evaluate', action='store_true')
- parser.add_argument('--device', default='cuda')
- parser.add_argument('--seed', default=42, type=int)
- parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
- parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
- parser.add_argument('--distributed', default=True, type=bool)
- args = parser.parse_args()
-
- config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
-
- Path(args.output_dir).mkdir(parents=True, exist_ok=True)
-
- yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
-
- main(args, config)
\ No newline at end of file
diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/utils/audio/processor.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/utils/audio/processor.py
deleted file mode 100644
index 4ceb7da4b37f65510c0e7977ba43eb0f8bfde4b6..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/TTS/TTS/utils/audio/processor.py
+++ /dev/null
@@ -1,777 +0,0 @@
-from io import BytesIO
-from typing import Dict, Tuple
-
-import librosa
-import numpy as np
-import scipy.io.wavfile
-import scipy.signal
-import soundfile as sf
-
-from TTS.tts.utils.helpers import StandardScaler
-from TTS.utils.audio.numpy_transforms import compute_f0
-
-# pylint: disable=too-many-public-methods
-
-
-class AudioProcessor(object):
- """Audio Processor for TTS.
-
- Note:
- All the class arguments are set to default values to enable a flexible initialization
- of the class with the model config. They are not meaningful for all the arguments.
-
- Args:
- sample_rate (int, optional):
- target audio sampling rate. Defaults to None.
-
- resample (bool, optional):
- enable/disable resampling of the audio clips when the target sampling rate does not match the original sampling rate. Defaults to False.
-
- num_mels (int, optional):
- number of melspectrogram dimensions. Defaults to None.
-
- log_func (int, optional):
- log exponent used for converting spectrogram aplitude to DB.
-
- min_level_db (int, optional):
- minimum db threshold for the computed melspectrograms. Defaults to None.
-
- frame_shift_ms (int, optional):
- milliseconds of frames between STFT columns. Defaults to None.
-
- frame_length_ms (int, optional):
- milliseconds of STFT window length. Defaults to None.
-
- hop_length (int, optional):
- number of frames between STFT columns. Used if ```frame_shift_ms``` is None. Defaults to None.
-
- win_length (int, optional):
- STFT window length. Used if ```frame_length_ms``` is None. Defaults to None.
-
- ref_level_db (int, optional):
- reference DB level to avoid background noise. In general <20DB corresponds to the air noise. Defaults to None.
-
- fft_size (int, optional):
- FFT window size for STFT. Defaults to 1024.
-
- power (int, optional):
- Exponent value applied to the spectrogram before GriffinLim. Defaults to None.
-
- preemphasis (float, optional):
- Preemphasis coefficient. Preemphasis is disabled if == 0.0. Defaults to 0.0.
-
- signal_norm (bool, optional):
- enable/disable signal normalization. Defaults to None.
-
- symmetric_norm (bool, optional):
- enable/disable symmetric normalization. If set True normalization is performed in the range [-k, k] else [0, k], Defaults to None.
-
- max_norm (float, optional):
- ```k``` defining the normalization range. Defaults to None.
-
- mel_fmin (int, optional):
- minimum filter frequency for computing melspectrograms. Defaults to None.
-
- mel_fmax (int, optional):
- maximum filter frequency for computing melspectrograms. Defaults to None.
-
- pitch_fmin (int, optional):
- minimum filter frequency for computing pitch. Defaults to None.
-
- pitch_fmax (int, optional):
- maximum filter frequency for computing pitch. Defaults to None.
-
- spec_gain (int, optional):
- gain applied when converting amplitude to DB. Defaults to 20.
-
- stft_pad_mode (str, optional):
- Padding mode for STFT. Defaults to 'reflect'.
-
- clip_norm (bool, optional):
- enable/disable clipping the our of range values in the normalized audio signal. Defaults to True.
-
- griffin_lim_iters (int, optional):
- Number of GriffinLim iterations. Defaults to None.
-
- do_trim_silence (bool, optional):
- enable/disable silence trimming when loading the audio signal. Defaults to False.
-
- trim_db (int, optional):
- DB threshold used for silence trimming. Defaults to 60.
-
- do_sound_norm (bool, optional):
- enable/disable signal normalization. Defaults to False.
-
- do_amp_to_db_linear (bool, optional):
- enable/disable amplitude to dB conversion of linear spectrograms. Defaults to True.
-
- do_amp_to_db_mel (bool, optional):
- enable/disable amplitude to dB conversion of mel spectrograms. Defaults to True.
-
- do_rms_norm (bool, optional):
- enable/disable RMS volume normalization when loading an audio file. Defaults to False.
-
- db_level (int, optional):
- dB level used for rms normalization. The range is -99 to 0. Defaults to None.
-
- stats_path (str, optional):
- Path to the computed stats file. Defaults to None.
-
- verbose (bool, optional):
- enable/disable logging. Defaults to True.
-
- """
-
- def __init__(
- self,
- sample_rate=None,
- resample=False,
- num_mels=None,
- log_func="np.log10",
- min_level_db=None,
- frame_shift_ms=None,
- frame_length_ms=None,
- hop_length=None,
- win_length=None,
- ref_level_db=None,
- fft_size=1024,
- power=None,
- preemphasis=0.0,
- signal_norm=None,
- symmetric_norm=None,
- max_norm=None,
- mel_fmin=None,
- mel_fmax=None,
- pitch_fmax=None,
- pitch_fmin=None,
- spec_gain=20,
- stft_pad_mode="reflect",
- clip_norm=True,
- griffin_lim_iters=None,
- do_trim_silence=False,
- trim_db=60,
- do_sound_norm=False,
- do_amp_to_db_linear=True,
- do_amp_to_db_mel=True,
- do_rms_norm=False,
- db_level=None,
- stats_path=None,
- verbose=True,
- **_,
- ):
- # setup class attributed
- self.sample_rate = sample_rate
- self.resample = resample
- self.num_mels = num_mels
- self.log_func = log_func
- self.min_level_db = min_level_db or 0
- self.frame_shift_ms = frame_shift_ms
- self.frame_length_ms = frame_length_ms
- self.ref_level_db = ref_level_db
- self.fft_size = fft_size
- self.power = power
- self.preemphasis = preemphasis
- self.griffin_lim_iters = griffin_lim_iters
- self.signal_norm = signal_norm
- self.symmetric_norm = symmetric_norm
- self.mel_fmin = mel_fmin or 0
- self.mel_fmax = mel_fmax
- self.pitch_fmin = pitch_fmin
- self.pitch_fmax = pitch_fmax
- self.spec_gain = float(spec_gain)
- self.stft_pad_mode = stft_pad_mode
- self.max_norm = 1.0 if max_norm is None else float(max_norm)
- self.clip_norm = clip_norm
- self.do_trim_silence = do_trim_silence
- self.trim_db = trim_db
- self.do_sound_norm = do_sound_norm
- self.do_amp_to_db_linear = do_amp_to_db_linear
- self.do_amp_to_db_mel = do_amp_to_db_mel
- self.do_rms_norm = do_rms_norm
- self.db_level = db_level
- self.stats_path = stats_path
- # setup exp_func for db to amp conversion
- if log_func == "np.log":
- self.base = np.e
- elif log_func == "np.log10":
- self.base = 10
- else:
- raise ValueError(" [!] unknown `log_func` value.")
- # setup stft parameters
- if hop_length is None:
- # compute stft parameters from given time values
- self.hop_length, self.win_length = self._stft_parameters()
- else:
- # use stft parameters from config file
- self.hop_length = hop_length
- self.win_length = win_length
- assert min_level_db != 0.0, " [!] min_level_db is 0"
- assert (
- self.win_length <= self.fft_size
- ), f" [!] win_length cannot be larger than fft_size - {self.win_length} vs {self.fft_size}"
- members = vars(self)
- if verbose:
- print(" > Setting up Audio Processor...")
- for key, value in members.items():
- print(" | > {}:{}".format(key, value))
- # create spectrogram utils
- self.mel_basis = self._build_mel_basis()
- self.inv_mel_basis = np.linalg.pinv(self._build_mel_basis())
- # setup scaler
- if stats_path and signal_norm:
- mel_mean, mel_std, linear_mean, linear_std, _ = self.load_stats(stats_path)
- self.setup_scaler(mel_mean, mel_std, linear_mean, linear_std)
- self.signal_norm = True
- self.max_norm = None
- self.clip_norm = None
- self.symmetric_norm = None
-
- @staticmethod
- def init_from_config(config: "Coqpit", verbose=True):
- if "audio" in config:
- return AudioProcessor(verbose=verbose, **config.audio)
- return AudioProcessor(verbose=verbose, **config)
-
- ### setting up the parameters ###
- def _build_mel_basis(
- self,
- ) -> np.ndarray:
- """Build melspectrogram basis.
-
- Returns:
- np.ndarray: melspectrogram basis.
- """
- if self.mel_fmax is not None:
- assert self.mel_fmax <= self.sample_rate // 2
- return librosa.filters.mel(
- sr=self.sample_rate, n_fft=self.fft_size, n_mels=self.num_mels, fmin=self.mel_fmin, fmax=self.mel_fmax
- )
-
- def _stft_parameters(
- self,
- ) -> Tuple[int, int]:
- """Compute the real STFT parameters from the time values.
-
- Returns:
- Tuple[int, int]: hop length and window length for STFT.
- """
- factor = self.frame_length_ms / self.frame_shift_ms
- assert (factor).is_integer(), " [!] frame_shift_ms should divide frame_length_ms"
- hop_length = int(self.frame_shift_ms / 1000.0 * self.sample_rate)
- win_length = int(hop_length * factor)
- return hop_length, win_length
-
- ### normalization ###
- def normalize(self, S: np.ndarray) -> np.ndarray:
- """Normalize values into `[0, self.max_norm]` or `[-self.max_norm, self.max_norm]`
-
- Args:
- S (np.ndarray): Spectrogram to normalize.
-
- Raises:
- RuntimeError: Mean and variance is computed from incompatible parameters.
-
- Returns:
- np.ndarray: Normalized spectrogram.
- """
- # pylint: disable=no-else-return
- S = S.copy()
- if self.signal_norm:
- # mean-var scaling
- if hasattr(self, "mel_scaler"):
- if S.shape[0] == self.num_mels:
- return self.mel_scaler.transform(S.T).T
- elif S.shape[0] == self.fft_size / 2:
- return self.linear_scaler.transform(S.T).T
- else:
- raise RuntimeError(" [!] Mean-Var stats does not match the given feature dimensions.")
- # range normalization
- S -= self.ref_level_db # discard certain range of DB assuming it is air noise
- S_norm = (S - self.min_level_db) / (-self.min_level_db)
- if self.symmetric_norm:
- S_norm = ((2 * self.max_norm) * S_norm) - self.max_norm
- if self.clip_norm:
- S_norm = np.clip(
- S_norm, -self.max_norm, self.max_norm # pylint: disable=invalid-unary-operand-type
- )
- return S_norm
- else:
- S_norm = self.max_norm * S_norm
- if self.clip_norm:
- S_norm = np.clip(S_norm, 0, self.max_norm)
- return S_norm
- else:
- return S
-
- def denormalize(self, S: np.ndarray) -> np.ndarray:
- """Denormalize spectrogram values.
-
- Args:
- S (np.ndarray): Spectrogram to denormalize.
-
- Raises:
- RuntimeError: Mean and variance are incompatible.
-
- Returns:
- np.ndarray: Denormalized spectrogram.
- """
- # pylint: disable=no-else-return
- S_denorm = S.copy()
- if self.signal_norm:
- # mean-var scaling
- if hasattr(self, "mel_scaler"):
- if S_denorm.shape[0] == self.num_mels:
- return self.mel_scaler.inverse_transform(S_denorm.T).T
- elif S_denorm.shape[0] == self.fft_size / 2:
- return self.linear_scaler.inverse_transform(S_denorm.T).T
- else:
- raise RuntimeError(" [!] Mean-Var stats does not match the given feature dimensions.")
- if self.symmetric_norm:
- if self.clip_norm:
- S_denorm = np.clip(
- S_denorm, -self.max_norm, self.max_norm # pylint: disable=invalid-unary-operand-type
- )
- S_denorm = ((S_denorm + self.max_norm) * -self.min_level_db / (2 * self.max_norm)) + self.min_level_db
- return S_denorm + self.ref_level_db
- else:
- if self.clip_norm:
- S_denorm = np.clip(S_denorm, 0, self.max_norm)
- S_denorm = (S_denorm * -self.min_level_db / self.max_norm) + self.min_level_db
- return S_denorm + self.ref_level_db
- else:
- return S_denorm
-
- ### Mean-STD scaling ###
- def load_stats(self, stats_path: str) -> Tuple[np.array, np.array, np.array, np.array, Dict]:
- """Loading mean and variance statistics from a `npy` file.
-
- Args:
- stats_path (str): Path to the `npy` file containing
-
- Returns:
- Tuple[np.array, np.array, np.array, np.array, Dict]: loaded statistics and the config used to
- compute them.
- """
- stats = np.load(stats_path, allow_pickle=True).item() # pylint: disable=unexpected-keyword-arg
- mel_mean = stats["mel_mean"]
- mel_std = stats["mel_std"]
- linear_mean = stats["linear_mean"]
- linear_std = stats["linear_std"]
- stats_config = stats["audio_config"]
- # check all audio parameters used for computing stats
- skip_parameters = ["griffin_lim_iters", "stats_path", "do_trim_silence", "ref_level_db", "power"]
- for key in stats_config.keys():
- if key in skip_parameters:
- continue
- if key not in ["sample_rate", "trim_db"]:
- assert (
- stats_config[key] == self.__dict__[key]
- ), f" [!] Audio param {key} does not match the value used for computing mean-var stats. {stats_config[key]} vs {self.__dict__[key]}"
- return mel_mean, mel_std, linear_mean, linear_std, stats_config
-
- # pylint: disable=attribute-defined-outside-init
- def setup_scaler(
- self, mel_mean: np.ndarray, mel_std: np.ndarray, linear_mean: np.ndarray, linear_std: np.ndarray
- ) -> None:
- """Initialize scaler objects used in mean-std normalization.
-
- Args:
- mel_mean (np.ndarray): Mean for melspectrograms.
- mel_std (np.ndarray): STD for melspectrograms.
- linear_mean (np.ndarray): Mean for full scale spectrograms.
- linear_std (np.ndarray): STD for full scale spectrograms.
- """
- self.mel_scaler = StandardScaler()
- self.mel_scaler.set_stats(mel_mean, mel_std)
- self.linear_scaler = StandardScaler()
- self.linear_scaler.set_stats(linear_mean, linear_std)
-
- ### DB and AMP conversion ###
- # pylint: disable=no-self-use
- def _amp_to_db(self, x: np.ndarray) -> np.ndarray:
- """Convert amplitude values to decibels.
-
- Args:
- x (np.ndarray): Amplitude spectrogram.
-
- Returns:
- np.ndarray: Decibels spectrogram.
- """
- return self.spec_gain * _log(np.maximum(1e-5, x), self.base)
-
- # pylint: disable=no-self-use
- def _db_to_amp(self, x: np.ndarray) -> np.ndarray:
- """Convert decibels spectrogram to amplitude spectrogram.
-
- Args:
- x (np.ndarray): Decibels spectrogram.
-
- Returns:
- np.ndarray: Amplitude spectrogram.
- """
- return _exp(x / self.spec_gain, self.base)
-
- ### Preemphasis ###
- def apply_preemphasis(self, x: np.ndarray) -> np.ndarray:
- """Apply pre-emphasis to the audio signal. Useful to reduce the correlation between neighbouring signal values.
-
- Args:
- x (np.ndarray): Audio signal.
-
- Raises:
- RuntimeError: Preemphasis coeff is set to 0.
-
- Returns:
- np.ndarray: Decorrelated audio signal.
- """
- if self.preemphasis == 0:
- raise RuntimeError(" [!] Preemphasis is set 0.0.")
- return scipy.signal.lfilter([1, -self.preemphasis], [1], x)
-
- def apply_inv_preemphasis(self, x: np.ndarray) -> np.ndarray:
- """Reverse pre-emphasis."""
- if self.preemphasis == 0:
- raise RuntimeError(" [!] Preemphasis is set 0.0.")
- return scipy.signal.lfilter([1], [1, -self.preemphasis], x)
-
- ### SPECTROGRAMs ###
- def _linear_to_mel(self, spectrogram: np.ndarray) -> np.ndarray:
- """Project a full scale spectrogram to a melspectrogram.
-
- Args:
- spectrogram (np.ndarray): Full scale spectrogram.
-
- Returns:
- np.ndarray: Melspectrogram
- """
- return np.dot(self.mel_basis, spectrogram)
-
- def _mel_to_linear(self, mel_spec: np.ndarray) -> np.ndarray:
- """Convert a melspectrogram to full scale spectrogram."""
- return np.maximum(1e-10, np.dot(self.inv_mel_basis, mel_spec))
-
- def spectrogram(self, y: np.ndarray) -> np.ndarray:
- """Compute a spectrogram from a waveform.
-
- Args:
- y (np.ndarray): Waveform.
-
- Returns:
- np.ndarray: Spectrogram.
- """
- if self.preemphasis != 0:
- D = self._stft(self.apply_preemphasis(y))
- else:
- D = self._stft(y)
- if self.do_amp_to_db_linear:
- S = self._amp_to_db(np.abs(D))
- else:
- S = np.abs(D)
- return self.normalize(S).astype(np.float32)
-
- def melspectrogram(self, y: np.ndarray) -> np.ndarray:
- """Compute a melspectrogram from a waveform."""
- if self.preemphasis != 0:
- D = self._stft(self.apply_preemphasis(y))
- else:
- D = self._stft(y)
- if self.do_amp_to_db_mel:
- S = self._amp_to_db(self._linear_to_mel(np.abs(D)))
- else:
- S = self._linear_to_mel(np.abs(D))
- return self.normalize(S).astype(np.float32)
-
- def inv_spectrogram(self, spectrogram: np.ndarray) -> np.ndarray:
- """Convert a spectrogram to a waveform using Griffi-Lim vocoder."""
- S = self.denormalize(spectrogram)
- S = self._db_to_amp(S)
- # Reconstruct phase
- if self.preemphasis != 0:
- return self.apply_inv_preemphasis(self._griffin_lim(S**self.power))
- return self._griffin_lim(S**self.power)
-
- def inv_melspectrogram(self, mel_spectrogram: np.ndarray) -> np.ndarray:
- """Convert a melspectrogram to a waveform using Griffi-Lim vocoder."""
- D = self.denormalize(mel_spectrogram)
- S = self._db_to_amp(D)
- S = self._mel_to_linear(S) # Convert back to linear
- if self.preemphasis != 0:
- return self.apply_inv_preemphasis(self._griffin_lim(S**self.power))
- return self._griffin_lim(S**self.power)
-
- def out_linear_to_mel(self, linear_spec: np.ndarray) -> np.ndarray:
- """Convert a full scale linear spectrogram output of a network to a melspectrogram.
-
- Args:
- linear_spec (np.ndarray): Normalized full scale linear spectrogram.
-
- Returns:
- np.ndarray: Normalized melspectrogram.
- """
- S = self.denormalize(linear_spec)
- S = self._db_to_amp(S)
- S = self._linear_to_mel(np.abs(S))
- S = self._amp_to_db(S)
- mel = self.normalize(S)
- return mel
-
- ### STFT and ISTFT ###
- def _stft(self, y: np.ndarray) -> np.ndarray:
- """Librosa STFT wrapper.
-
- Args:
- y (np.ndarray): Audio signal.
-
- Returns:
- np.ndarray: Complex number array.
- """
- return librosa.stft(
- y=y,
- n_fft=self.fft_size,
- hop_length=self.hop_length,
- win_length=self.win_length,
- pad_mode=self.stft_pad_mode,
- window="hann",
- center=True,
- )
-
- def _istft(self, y: np.ndarray) -> np.ndarray:
- """Librosa iSTFT wrapper."""
- return librosa.istft(y, hop_length=self.hop_length, win_length=self.win_length)
-
- def _griffin_lim(self, S):
- angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
- try:
- S_complex = np.abs(S).astype(np.complex)
- except AttributeError: # np.complex is deprecated since numpy 1.20.0
- S_complex = np.abs(S).astype(complex)
- y = self._istft(S_complex * angles)
- if not np.isfinite(y).all():
- print(" [!] Waveform is not finite everywhere. Skipping the GL.")
- return np.array([0.0])
- for _ in range(self.griffin_lim_iters):
- angles = np.exp(1j * np.angle(self._stft(y)))
- y = self._istft(S_complex * angles)
- return y
-
- def compute_stft_paddings(self, x, pad_sides=1):
- """Compute paddings used by Librosa's STFT. Compute right padding (final frame) or both sides padding
- (first and final frames)"""
- assert pad_sides in (1, 2)
- pad = (x.shape[0] // self.hop_length + 1) * self.hop_length - x.shape[0]
- if pad_sides == 1:
- return 0, pad
- return pad // 2, pad // 2 + pad % 2
-
- def compute_f0(self, x: np.ndarray) -> np.ndarray:
- """Compute pitch (f0) of a waveform using the same parameters used for computing melspectrogram.
-
- Args:
- x (np.ndarray): Waveform.
-
- Returns:
- np.ndarray: Pitch.
-
- Examples:
- >>> WAV_FILE = filename = librosa.example('vibeace')
- >>> from TTS.config import BaseAudioConfig
- >>> from TTS.utils.audio import AudioProcessor
- >>> conf = BaseAudioConfig(pitch_fmax=640, pitch_fmin=1)
- >>> ap = AudioProcessor(**conf)
- >>> wav = ap.load_wav(WAV_FILE, sr=ap.sample_rate)[:5 * ap.sample_rate]
- >>> pitch = ap.compute_f0(wav)
- """
- assert self.pitch_fmax is not None, " [!] Set `pitch_fmax` before caling `compute_f0`."
- assert self.pitch_fmin is not None, " [!] Set `pitch_fmin` before caling `compute_f0`."
- # align F0 length to the spectrogram length
- if len(x) % self.hop_length == 0:
- x = np.pad(x, (0, self.hop_length // 2), mode=self.stft_pad_mode)
-
- f0 = compute_f0(
- x=x,
- pitch_fmax=self.pitch_fmax,
- pitch_fmin=self.pitch_fmin,
- hop_length=self.hop_length,
- win_length=self.win_length,
- sample_rate=self.sample_rate,
- stft_pad_mode=self.stft_pad_mode,
- center=True,
- )
-
- return f0
-
- ### Audio Processing ###
- def find_endpoint(self, wav: np.ndarray, min_silence_sec=0.8) -> int:
- """Find the last point without silence at the end of a audio signal.
-
- Args:
- wav (np.ndarray): Audio signal.
- threshold_db (int, optional): Silence threshold in decibels. Defaults to -40.
- min_silence_sec (float, optional): Ignore silences that are shorter then this in secs. Defaults to 0.8.
-
- Returns:
- int: Last point without silence.
- """
- window_length = int(self.sample_rate * min_silence_sec)
- hop_length = int(window_length / 4)
- threshold = self._db_to_amp(-self.trim_db)
- for x in range(hop_length, len(wav) - window_length, hop_length):
- if np.max(wav[x : x + window_length]) < threshold:
- return x + hop_length
- return len(wav)
-
- def trim_silence(self, wav):
- """Trim silent parts with a threshold and 0.01 sec margin"""
- margin = int(self.sample_rate * 0.01)
- wav = wav[margin:-margin]
- return librosa.effects.trim(wav, top_db=self.trim_db, frame_length=self.win_length, hop_length=self.hop_length)[
- 0
- ]
-
- @staticmethod
- def sound_norm(x: np.ndarray) -> np.ndarray:
- """Normalize the volume of an audio signal.
-
- Args:
- x (np.ndarray): Raw waveform.
-
- Returns:
- np.ndarray: Volume normalized waveform.
- """
- return x / abs(x).max() * 0.95
-
- @staticmethod
- def _rms_norm(wav, db_level=-27):
- r = 10 ** (db_level / 20)
- a = np.sqrt((len(wav) * (r**2)) / np.sum(wav**2))
- return wav * a
-
- def rms_volume_norm(self, x: np.ndarray, db_level: float = None) -> np.ndarray:
- """Normalize the volume based on RMS of the signal.
-
- Args:
- x (np.ndarray): Raw waveform.
-
- Returns:
- np.ndarray: RMS normalized waveform.
- """
- if db_level is None:
- db_level = self.db_level
- assert -99 <= db_level <= 0, " [!] db_level should be between -99 and 0"
- wav = self._rms_norm(x, db_level)
- return wav
-
- ### save and load ###
- def load_wav(self, filename: str, sr: int = None) -> np.ndarray:
- """Read a wav file using Librosa and optionally resample, silence trim, volume normalize.
-
- Resampling slows down loading the file significantly. Therefore it is recommended to resample the file before.
-
- Args:
- filename (str): Path to the wav file.
- sr (int, optional): Sampling rate for resampling. Defaults to None.
-
- Returns:
- np.ndarray: Loaded waveform.
- """
- if self.resample:
- # loading with resampling. It is significantly slower.
- x, sr = librosa.load(filename, sr=self.sample_rate)
- elif sr is None:
- # SF is faster than librosa for loading files
- x, sr = sf.read(filename)
- assert self.sample_rate == sr, "%s vs %s" % (self.sample_rate, sr)
- else:
- x, sr = librosa.load(filename, sr=sr)
- if self.do_trim_silence:
- try:
- x = self.trim_silence(x)
- except ValueError:
- print(f" [!] File cannot be trimmed for silence - {filename}")
- if self.do_sound_norm:
- x = self.sound_norm(x)
- if self.do_rms_norm:
- x = self.rms_volume_norm(x, self.db_level)
- return x
-
- def save_wav(self, wav: np.ndarray, path: str, sr: int = None, pipe_out=None) -> None:
- """Save a waveform to a file using Scipy.
-
- Args:
- wav (np.ndarray): Waveform to save.
- path (str): Path to a output file.
- sr (int, optional): Sampling rate used for saving to the file. Defaults to None.
- pipe_out (BytesIO, optional): Flag to stdout the generated TTS wav file for shell pipe.
- """
- if self.do_rms_norm:
- wav_norm = self.rms_volume_norm(wav, self.db_level) * 32767
- else:
- wav_norm = wav * (32767 / max(0.01, np.max(np.abs(wav))))
-
- wav_norm = wav_norm.astype(np.int16)
- if pipe_out:
- wav_buffer = BytesIO()
- scipy.io.wavfile.write(wav_buffer, sr if sr else self.sample_rate, wav_norm)
- wav_buffer.seek(0)
- pipe_out.buffer.write(wav_buffer.read())
- scipy.io.wavfile.write(path, sr if sr else self.sample_rate, wav_norm)
-
- def get_duration(self, filename: str) -> float:
- """Get the duration of a wav file using Librosa.
-
- Args:
- filename (str): Path to the wav file.
- """
- return librosa.get_duration(filename=filename)
-
- @staticmethod
- def mulaw_encode(wav: np.ndarray, qc: int) -> np.ndarray:
- mu = 2**qc - 1
- # wav_abs = np.minimum(np.abs(wav), 1.0)
- signal = np.sign(wav) * np.log(1 + mu * np.abs(wav)) / np.log(1.0 + mu)
- # Quantize signal to the specified number of levels.
- signal = (signal + 1) / 2 * mu + 0.5
- return np.floor(
- signal,
- )
-
- @staticmethod
- def mulaw_decode(wav, qc):
- """Recovers waveform from quantized values."""
- mu = 2**qc - 1
- x = np.sign(wav) / mu * ((1 + mu) ** np.abs(wav) - 1)
- return x
-
- @staticmethod
- def encode_16bits(x):
- return np.clip(x * 2**15, -(2**15), 2**15 - 1).astype(np.int16)
-
- @staticmethod
- def quantize(x: np.ndarray, bits: int) -> np.ndarray:
- """Quantize a waveform to a given number of bits.
-
- Args:
- x (np.ndarray): Waveform to quantize. Must be normalized into the range `[-1, 1]`.
- bits (int): Number of quantization bits.
-
- Returns:
- np.ndarray: Quantized waveform.
- """
- return (x + 1.0) * (2**bits - 1) / 2
-
- @staticmethod
- def dequantize(x, bits):
- """Dequantize a waveform from the given number of bits."""
- return 2 * x / (2**bits - 1) - 1
-
-
-def _log(x, base):
- if base == 10:
- return np.log10(x)
- return np.log(x)
-
-
-def _exp(x, base):
- if base == 10:
- return np.power(10, x)
- return np.exp(x)
diff --git a/spaces/ashuonnet/skillrecommender/skill_recommender.py b/spaces/ashuonnet/skillrecommender/skill_recommender.py
deleted file mode 100644
index 528ba457c4d14a09914255844cac5d25bb0ea30b..0000000000000000000000000000000000000000
--- a/spaces/ashuonnet/skillrecommender/skill_recommender.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# Import the necessary libraries
-import pandas as pd
-
-class skills_recommender:
-
- # Constructor of the class
- def __init__(self):
- # Read the data from an excel file and store it in a pandas dataframe
- self.df = pd.read_excel('./job_market_skills.xlsx')
-
- # Create a dictionary to map each vertical with its name
- self.vertical_mapping = {
- "vertical_0":"Embedded Development",
- "vertical_1":"Cloud Computing",
- "vertical_2":"Project Management",
- "vertical_3":"DevOps",
- "vertical_4":"Hardware Asic",
- "vertical_5":"IT Support",
- "vertical_6":"Networking",
- "vertical_7":"Web Development",
- "vertical_8":"Information Security"
- }
-
- # Method to get the matching vertical skills
- def get_matching_vertical_skills(self, skill_list):
- # Create an empty pandas dataframe to store the matching vertical skills
- return_df = pd.DataFrame(columns=['vertical','skill','score'])
- # Initialize a row index variable
- row_index=0
- # Loop through the skill_list
- for skill in skill_list:
- # Remove leading/trailing whitespaces from the skill
- skill.strip()
-
- # If the skill is not empty, then check if it matches with any of the skills in the dataframe
- if skill != '':
- for index, row in self.df.iterrows():
- # Get the list of skills in the current row and loop through them
- v_skills = row['skill'].split(' ')
- for _skill in v_skills:
- # Check if the skill matches with the current skill in the row
- if _skill.lower().strip() == skill.lower().strip():
- # If the skill matches, then add the corresponding vertical, skill, and score to the return dataframe
- return_df.loc[row_index] = [row['vertical'],row['skill'],row['score']]
- row_index = row_index+1
- #values_list.append([row['vertical'],row['skill'],row['score']])
- # Return the dataframe containing the matching vertical skills
- return return_df
-
- # Method to get the top 10 skills for a given vertical
- def get_top_10_skills(self,vertical,org_skill_list):
- # Create an empty list to store the recommended skills
- skill_list = []
- # Initialize a skill count variable
- skill_count = 0
- # Get the rows from the dataframe corresponding to the given vertical and sort them by score in descending order
- temp_df = self.df[self.df['vertical'] == vertical]
- temp_df = temp_df.sort_values(['score'], ascending=[False])
-
- # Loop through the rows in the dataframe
- for index, row in temp_df.iterrows():
- # Get the current skill from the row
- new_skill = row['skill']
- # Check if the current skill is not already present in the org_skill_list and add it to the skill_list
- if new_skill not in org_skill_list:
- skill_list.append(new_skill)
- skill_count = skill_count+1
- # If the skill_count reaches 10, then break the loop
- if skill_count == 10:
- break
- # Return the recommended skill_list
- return skill_list
-
- # Method to get the name of a vertical given its code
- def get_vertical_name(self, vertical):
- return self.vertical_mapping[vertical]
-
- # Method to suggest skills based on a given list of skills
- def suggest_skills(self, org_skill_list):
- # Clean skill list if needed
- skill_list = []
- for _skill in org_skill_list:
- # If there are newline characters in the skill, replace them with commas
- if "\n" in _skill:
- _skill = _skill.replace("\n", ",")
- # Add each resulting skill to the skill_list
- skill_list = skill_list + _skill.split(',')
- else:
- skill_list.append(_skill)
-
- # Print the cleaned skill list for debugging purposes
- print("checking for skill list = ", skill_list)
-
- # Get a DataFrame with matching skills for each vertical
- df = self.get_matching_vertical_skills(skill_list)
-
- # Get the count of how many times each vertical appears in the DataFrame
- vertical_counts = df['vertical'].value_counts()
-
- # Get the unique list of verticals in the DataFrame
- verticals = df['vertical'].unique()
-
- # Create an empty dictionary to store scores for each vertical
- scores = {}
-
- # Calculate a score for each vertical based on the sum of the scores of the matching skills
- for vertical in verticals:
- # Initialize the vertical score to 0
- vertical_score = 0
- # Get a DataFrame with only the rows corresponding to the current vertical
- temp_df = df[df['vertical'] == vertical]
- # Calculate the sum of the scores for the matching skills
- vertical_score = temp_df['score'].sum()
- # Store the vertical score in the scores dictionary
- scores[vertical] = vertical_score
-
- # Sort the scores dictionary in descending order by value
- _scores = dict(sorted(scores.items(), key=lambda item: item[1], reverse=True))
-
- # Create a string that lists the original skills
- my_string = ','.join(skill_list)
- text = "Your current skills are: " + my_string
- text = text + "\n\n"
-
- # Define a dictionary to map index to recommended domain text
- rec_dict = {
- 1: "Highest Recommended Domain: ",
- 2: "\nSecond Best Recommended Domain: ",
- 3: "\nAlternate Recommended Domain: ",
- 4: "\nLess Preferable Recommended Domain: ",
- 5: "\nVery Less Preferable Domain, But Is An Option: "
- }
-
- # Print the sorted scores for debugging purposes
- print(_scores)
-
- # Initialize the recommended domain index to 0
- rec_index = 0
-
- # Loop over the recommended domains and print them along with the top ten missing skills for each domain
- for key in _scores:
- # Increment the recommended domain index
- rec_index = rec_index + 1
- # If we have already printed the top five recommended domains, exit the loop
- if rec_index == 6:
- break
- # Add the recommended domain text to the output string
- text = text + rec_dict[rec_index] + self.get_vertical_name(key) + "\n"
- # Get the top ten skills missing from the original skill list for the current domain
- rec_list = self.get_top_10_skills(key, skill_list)
- i = 1
- # Print each missing skill along with its rank
- for _skill in rec_list:
- text = text + " " + str(i) + ". " + _skill + "\n"
- i = i + 1
-
- # Return the output string
- return text
-
-
-def print_db(self):
- # Print the DataFrame containing skill information
- print(self.df)
-
-
diff --git a/spaces/atticus/image-text-retrival-huster/pred_retrieval.py b/spaces/atticus/image-text-retrival-huster/pred_retrieval.py
deleted file mode 100644
index 5ce063f1b8d1410f64c0dee32e8ae55f2f8a3eeb..0000000000000000000000000000000000000000
--- a/spaces/atticus/image-text-retrival-huster/pred_retrieval.py
+++ /dev/null
@@ -1,112 +0,0 @@
-"""
-****************** COPYRIGHT AND CONFIDENTIALITY INFORMATION ******************
-Copyright (c) 2018 [Thomson Licensing]
-All Rights Reserved
-This program contains proprietary information which is a trade secret/business \
-secret of [Thomson Licensing] and is protected, even if unpublished, under \
-applicable Copyright laws (including French droit d'auteur) and/or may be \
-subject to one or more patent(s).
-Recipient is to retain this program in confidence and is not permitted to use \
-or make copies thereof other than as permitted in a written agreement with \
-[Thomson Licensing] unless otherwise expressly allowed by applicable laws or \
-by [Thomson Licensing] under express agreement.
-Thomson Licensing is a company of the group TECHNICOLOR
-*******************************************************************************
-This scripts permits one to reproduce training and experiments of:
- Engilberge, M., Chevallier, L., Pérez, P., & Cord, M. (2018, April).
- Finding beans in burgers: Deep semantic-visual embedding with localization.
- In Proceedings of CVPR (pp. 3984-3993)
-
-Author: Martin Engilberge
-"""
-
-import argparse
-import re
-import time
-
-import numpy as np
-from numpy.__config__ import show
-import torch
-
-
-from misc.model import img_embedding, joint_embedding
-from torch.utils.data import DataLoader, dataset
-
-from misc.dataset import TextDataset
-from misc.utils import collate_fn_cap_padded
-from torch.utils.data import DataLoader
-from misc.utils import load_obj
-from misc.evaluation import recallTopK
-
-from misc.utils import show_imgs
-import sys
-from misc.dataset import TextEncoder
-
-device = torch.device("cuda")
-# device = torch.device("cpu") # uncomment to run with cpu
-
-if __name__ == '__main__':
-
- parser = argparse.ArgumentParser(description='Extract embedding representation for images')
- parser.add_argument("-p", '--path', dest="model_path", help='Path to the weights of the model to evaluate')
- parser.add_argument("-d", '--data', dest="data_path", help='path to the file containing the sentence to embed')
- parser.add_argument("-bs", "--batch_size", help="The size of the batches", type=int, default=1)
-
- args = parser.parse_args()
-
- print("Loading model from:", args.model_path)
- checkpoint = torch.load(args.model_path, map_location=lambda storage, loc: storage)
-
- join_emb = joint_embedding(checkpoint['args_dict'])
- join_emb.load_state_dict(checkpoint["state_dict"])
-
- for param in join_emb.parameters():
- param.requires_grad = False
-
- join_emb.to(device)
- join_emb.eval()
-
- encoder = TextEncoder()
- print("Loading model done")
- # (4) design intersection mode.
- print("Please input your description of the image that you wanna search >>>")
- for line in sys.stdin:
-
- t0 = time.time()
- cap_str = line.strip()
- # with open(args.data_path, 'w') as cap_file:
- # cap_file.writelines(cap_str)
- t1 = time.time()
- print("text is embedding ...")
- dataset = torch.Tensor(encoder.encode(cap_str)).unsqueeze(dim=0)
- t111 = time.time()
- dataset_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=1, pin_memory=True, collate_fn=collate_fn_cap_padded)
- t11 = time.time()
- caps_enc = list()
- for i, (caps, length) in enumerate(dataset_loader, 0):
- input_caps = caps.to(device)
- with torch.no_grad():
- _, output_emb = join_emb(None, input_caps, length)
- caps_enc.append(output_emb.cpu().data.numpy())
-
- t12 = time.time()
- caps_stack = np.vstack(caps_enc)
- # print(t11 - t1, t12 - t11, t111 - t1)
-
- t2 = time.time()
- print("recall from resources ...")
- # (1) load candidate imgs from saved embeding pkl file.
- imgs_emb_file_path = "/home/atticus/proj/matching/DSVE/imgs_embed/v20210915_01_9408/allImg"
- # imgs_emb(40775, 2400)
- imgs_emb, imgs_path = load_obj(imgs_emb_file_path)
- # (2) calculate the sim between cap and imgs.
- # (3) rank imgs and display the searching result.
- recall_imgs = recallTopK(caps_stack, imgs_emb, imgs_path, ks=5)
-
- t3 = time.time()
- show_imgs(imgs_path=recall_imgs)
-
- # print("input stage time: {} \n text embedding stage time: {} \n recall stage time: {}".format(t1 - t0, t2 - t1, t3 - t2))
-
- print("======== current epoch done ========")
- print("Please input your description of the image that you wanna search >>>")
diff --git a/spaces/autoevaluate/model-evaluator/app.py b/spaces/autoevaluate/model-evaluator/app.py
deleted file mode 100644
index 42e73e70ab3f4da543a14eb5fc26e60b4462804e..0000000000000000000000000000000000000000
--- a/spaces/autoevaluate/model-evaluator/app.py
+++ /dev/null
@@ -1,693 +0,0 @@
-import os
-import time
-from pathlib import Path
-
-import pandas as pd
-import streamlit as st
-import yaml
-from datasets import get_dataset_config_names
-from dotenv import load_dotenv
-from huggingface_hub import list_datasets
-
-from evaluation import filter_evaluated_models
-from utils import (
- AUTOTRAIN_TASK_TO_HUB_TASK,
- commit_evaluation_log,
- create_autotrain_project_name,
- format_col_mapping,
- get_compatible_models,
- get_config_metadata,
- get_dataset_card_url,
- get_key,
- get_metadata,
- http_get,
- http_post,
-)
-
-if Path(".env").is_file():
- load_dotenv(".env")
-
-HF_TOKEN = os.getenv("HF_TOKEN")
-AUTOTRAIN_USERNAME = os.getenv("AUTOTRAIN_USERNAME")
-AUTOTRAIN_BACKEND_API = os.getenv("AUTOTRAIN_BACKEND_API")
-DATASETS_PREVIEW_API = os.getenv("DATASETS_PREVIEW_API")
-
-# Put image tasks on top
-TASK_TO_ID = {
- "image_binary_classification": 17,
- "image_multi_class_classification": 18,
- "binary_classification": 1,
- "multi_class_classification": 2,
- "natural_language_inference": 22,
- "entity_extraction": 4,
- "extractive_question_answering": 5,
- "translation": 6,
- "summarization": 8,
- "text_zero_shot_classification": 23,
-}
-
-TASK_TO_DEFAULT_METRICS = {
- "binary_classification": ["f1", "precision", "recall", "auc", "accuracy"],
- "multi_class_classification": [
- "f1",
- "precision",
- "recall",
- "accuracy",
- ],
- "natural_language_inference": ["f1", "precision", "recall", "auc", "accuracy"],
- "entity_extraction": ["precision", "recall", "f1", "accuracy"],
- "extractive_question_answering": ["f1", "exact_match"],
- "translation": ["sacrebleu"],
- "summarization": ["rouge1", "rouge2", "rougeL", "rougeLsum"],
- "image_binary_classification": ["f1", "precision", "recall", "auc", "accuracy"],
- "image_multi_class_classification": [
- "f1",
- "precision",
- "recall",
- "accuracy",
- ],
- "text_zero_shot_classification": ["accuracy", "loss"],
-}
-
-AUTOTRAIN_TASK_TO_LANG = {
- "translation": "en2de",
- "image_binary_classification": "unk",
- "image_multi_class_classification": "unk",
-}
-
-AUTOTRAIN_MACHINE = {"text_zero_shot_classification": "r5.16x"}
-
-
-SUPPORTED_TASKS = list(TASK_TO_ID.keys())
-
-# Extracted from utils.get_supported_metrics
-# Hardcoded for now due to speed / caching constraints
-SUPPORTED_METRICS = [
- "accuracy",
- "bertscore",
- "bleu",
- "cer",
- "chrf",
- "code_eval",
- "comet",
- "competition_math",
- "coval",
- "cuad",
- "exact_match",
- "f1",
- "frugalscore",
- "google_bleu",
- "mae",
- "mahalanobis",
- "matthews_correlation",
- "mean_iou",
- "meteor",
- "mse",
- "pearsonr",
- "perplexity",
- "precision",
- "recall",
- "roc_auc",
- "rouge",
- "sacrebleu",
- "sari",
- "seqeval",
- "spearmanr",
- "squad",
- "squad_v2",
- "ter",
- "trec_eval",
- "wer",
- "wiki_split",
- "xnli",
- "angelina-wang/directional_bias_amplification",
- "jordyvl/ece",
- "lvwerra/ai4code",
- "lvwerra/amex",
-]
-
-
-#######
-# APP #
-#######
-st.title("Evaluation on the Hub")
-st.markdown(
- """
- Welcome to Hugging Face's automatic model evaluator 👋!
-
- This application allows you to evaluate 🤗 Transformers
- [models](https://huggingface.co/models?library=transformers&sort=downloads)
- across a wide variety of [datasets](https://huggingface.co/datasets) on the
- Hub. Please select the dataset and configuration below. The results of your
- evaluation will be displayed on the [public
- leaderboards](https://huggingface.co/spaces/autoevaluate/leaderboards). For
- more details, check out out our [blog
- post](https://huggingface.co/blog/eval-on-the-hub).
- """
-)
-
-all_datasets = [d.id for d in list_datasets()]
-query_params = st.experimental_get_query_params()
-if "first_query_params" not in st.session_state:
- st.session_state.first_query_params = query_params
-first_query_params = st.session_state.first_query_params
-default_dataset = all_datasets[0]
-if "dataset" in first_query_params:
- if len(first_query_params["dataset"]) > 0 and first_query_params["dataset"][0] in all_datasets:
- default_dataset = first_query_params["dataset"][0]
-
-selected_dataset = st.selectbox(
- "Select a dataset",
- all_datasets,
- index=all_datasets.index(default_dataset),
- help="""Datasets with metadata can be evaluated with 1-click. Configure an evaluation job to add \
- new metadata to a dataset card.""",
-)
-st.experimental_set_query_params(**{"dataset": [selected_dataset]})
-
-# Check if selected dataset can be streamed
-is_valid_dataset = http_get(
- path="/is-valid",
- domain=DATASETS_PREVIEW_API,
- params={"dataset": selected_dataset},
-).json()
-if is_valid_dataset["viewer"] is False and is_valid_dataset["preview"] is False:
- st.error(
- """The dataset you selected is not currently supported. Open a \
- [discussion](https://huggingface.co/spaces/autoevaluate/model-evaluator/discussions) for support."""
- )
-
-metadata = get_metadata(selected_dataset, token=HF_TOKEN)
-print(f"INFO -- Dataset metadata: {metadata}")
-if metadata is None:
- st.warning("No evaluation metadata found. Please configure the evaluation job below.")
-
-with st.expander("Advanced configuration"):
- # Select task
- selected_task = st.selectbox(
- "Select a task",
- SUPPORTED_TASKS,
- index=SUPPORTED_TASKS.index(metadata[0]["task_id"]) if metadata is not None else 0,
- help="""Don't see your favourite task here? Open a \
- [discussion](https://huggingface.co/spaces/autoevaluate/model-evaluator/discussions) to request it!""",
- )
- # Select config
- configs = get_dataset_config_names(selected_dataset)
- selected_config = st.selectbox(
- "Select a config",
- configs,
- help="""Some datasets contain several sub-datasets, known as _configurations_. \
- Select one to evaluate your models on. \
- See the [docs](https://huggingface.co/docs/datasets/master/en/load_hub#configurations) for more details.
- """,
- )
- # Some datasets have multiple metadata (one per config), so we grab the one associated with the selected config
- config_metadata = get_config_metadata(selected_config, metadata)
- print(f"INFO -- Config metadata: {config_metadata}")
-
- # Select splits
- splits_resp = http_get(
- path="/splits",
- domain=DATASETS_PREVIEW_API,
- params={"dataset": selected_dataset},
- )
- if splits_resp.status_code == 200:
- split_names = []
- all_splits = splits_resp.json()
- for split in all_splits["splits"]:
- if split["config"] == selected_config:
- split_names.append(split["split"])
-
- if config_metadata is not None:
- eval_split = config_metadata["splits"].get("eval_split", None)
- else:
- eval_split = None
- selected_split = st.selectbox(
- "Select a split",
- split_names,
- index=split_names.index(eval_split) if eval_split is not None else 0,
- help="Be wary when evaluating models on the `train` split.",
- )
-
- # Select columns
- rows_resp = http_get(
- path="/first-rows",
- domain=DATASETS_PREVIEW_API,
- params={
- "dataset": selected_dataset,
- "config": selected_config,
- "split": selected_split,
- },
- ).json()
- col_names = list(pd.json_normalize(rows_resp["rows"][0]["row"]).columns)
-
- st.markdown("**Map your dataset columns**")
- st.markdown(
- """The model evaluator uses a standardised set of column names for the input examples and labels. \
- Please define the mapping between your dataset columns (right) and the standardised column names (left)."""
- )
- col1, col2 = st.columns(2)
-
- # TODO: find a better way to layout these items
- # TODO: need graceful way of handling dataset <--> task mismatch for datasets with metadata
- col_mapping = {}
- if selected_task in ["binary_classification", "multi_class_classification"]:
- with col1:
- st.markdown("`text` column")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.markdown("`target` column")
- with col2:
- text_col = st.selectbox(
- "This column should contain the text to be classified",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "text"))
- if config_metadata is not None
- else 0,
- )
- target_col = st.selectbox(
- "This column should contain the labels associated with the text",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "target"))
- if config_metadata is not None
- else 0,
- )
- col_mapping[text_col] = "text"
- col_mapping[target_col] = "target"
-
- elif selected_task == "text_zero_shot_classification":
- with col1:
- st.markdown("`text` column")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.markdown("`classes` column")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.markdown("`target` column")
- with col2:
- text_col = st.selectbox(
- "This column should contain the text to be classified",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "text"))
- if config_metadata is not None
- else 0,
- )
- classes_col = st.selectbox(
- "This column should contain the classes associated with the text",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "classes"))
- if config_metadata is not None
- else 0,
- )
- target_col = st.selectbox(
- "This column should contain the index of the correct class",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "target"))
- if config_metadata is not None
- else 0,
- )
- col_mapping[text_col] = "text"
- col_mapping[classes_col] = "classes"
- col_mapping[target_col] = "target"
-
- if selected_task in ["natural_language_inference"]:
- config_metadata = get_config_metadata(selected_config, metadata)
- with col1:
- st.markdown("`text1` column")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.markdown("`text2` column")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.markdown("`target` column")
- with col2:
- text1_col = st.selectbox(
- "This column should contain the first text passage to be classified",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "text1"))
- if config_metadata is not None
- else 0,
- )
- text2_col = st.selectbox(
- "This column should contain the second text passage to be classified",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "text2"))
- if config_metadata is not None
- else 0,
- )
- target_col = st.selectbox(
- "This column should contain the labels associated with the text",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "target"))
- if config_metadata is not None
- else 0,
- )
- col_mapping[text1_col] = "text1"
- col_mapping[text2_col] = "text2"
- col_mapping[target_col] = "target"
-
- elif selected_task == "entity_extraction":
- with col1:
- st.markdown("`tokens` column")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.markdown("`tags` column")
- with col2:
- tokens_col = st.selectbox(
- "This column should contain the array of tokens to be classified",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "tokens"))
- if config_metadata is not None
- else 0,
- )
- tags_col = st.selectbox(
- "This column should contain the labels associated with each part of the text",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "tags"))
- if config_metadata is not None
- else 0,
- )
- col_mapping[tokens_col] = "tokens"
- col_mapping[tags_col] = "tags"
-
- elif selected_task == "translation":
- with col1:
- st.markdown("`source` column")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.markdown("`target` column")
- with col2:
- text_col = st.selectbox(
- "This column should contain the text to be translated",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "source"))
- if config_metadata is not None
- else 0,
- )
- target_col = st.selectbox(
- "This column should contain the target translation",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "target"))
- if config_metadata is not None
- else 0,
- )
- col_mapping[text_col] = "source"
- col_mapping[target_col] = "target"
-
- elif selected_task == "summarization":
- with col1:
- st.markdown("`text` column")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.markdown("`target` column")
- with col2:
- text_col = st.selectbox(
- "This column should contain the text to be summarized",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "text"))
- if config_metadata is not None
- else 0,
- )
- target_col = st.selectbox(
- "This column should contain the target summary",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "target"))
- if config_metadata is not None
- else 0,
- )
- col_mapping[text_col] = "text"
- col_mapping[target_col] = "target"
-
- elif selected_task == "extractive_question_answering":
- if config_metadata is not None:
- col_mapping = config_metadata["col_mapping"]
- # Hub YAML parser converts periods to hyphens, so we remap them here
- col_mapping = format_col_mapping(col_mapping)
- with col1:
- st.markdown("`context` column")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.markdown("`question` column")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.markdown("`answers.text` column")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.markdown("`answers.answer_start` column")
- with col2:
- context_col = st.selectbox(
- "This column should contain the question's context",
- col_names,
- index=col_names.index(get_key(col_mapping, "context")) if config_metadata is not None else 0,
- )
- question_col = st.selectbox(
- "This column should contain the question to be answered, given the context",
- col_names,
- index=col_names.index(get_key(col_mapping, "question")) if config_metadata is not None else 0,
- )
- answers_text_col = st.selectbox(
- "This column should contain example answers to the question, extracted from the context",
- col_names,
- index=col_names.index(get_key(col_mapping, "answers.text")) if config_metadata is not None else 0,
- )
- answers_start_col = st.selectbox(
- "This column should contain the indices in the context of the first character of each `answers.text`",
- col_names,
- index=col_names.index(get_key(col_mapping, "answers.answer_start"))
- if config_metadata is not None
- else 0,
- )
- col_mapping[context_col] = "context"
- col_mapping[question_col] = "question"
- col_mapping[answers_text_col] = "answers.text"
- col_mapping[answers_start_col] = "answers.answer_start"
- elif selected_task in ["image_binary_classification", "image_multi_class_classification"]:
- with col1:
- st.markdown("`image` column")
- st.text("")
- st.text("")
- st.text("")
- st.text("")
- st.markdown("`target` column")
- with col2:
- image_col = st.selectbox(
- "This column should contain the images to be classified",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "image"))
- if config_metadata is not None
- else 0,
- )
- target_col = st.selectbox(
- "This column should contain the labels associated with the images",
- col_names,
- index=col_names.index(get_key(config_metadata["col_mapping"], "target"))
- if config_metadata is not None
- else 0,
- )
- col_mapping[image_col] = "image"
- col_mapping[target_col] = "target"
-
- # Select metrics
- st.markdown("**Select metrics**")
- st.markdown("The following metrics will be computed")
- html_string = " ".join(
- [
- ''
- + ''
- + metric
- + ""
- for metric in TASK_TO_DEFAULT_METRICS[selected_task]
- ]
- )
- st.markdown(html_string, unsafe_allow_html=True)
- selected_metrics = st.multiselect(
- "(Optional) Select additional metrics",
- sorted(list(set(SUPPORTED_METRICS) - set(TASK_TO_DEFAULT_METRICS[selected_task]))),
- help="""User-selected metrics will be computed with their default arguments. \
- For example, `f1` will report results for binary labels. \
- Check out the [available metrics](https://huggingface.co/metrics) for more details.""",
- )
-
-with st.form(key="form"):
- compatible_models = get_compatible_models(selected_task, [selected_dataset])
- selected_models = st.multiselect(
- "Select the models you wish to evaluate",
- compatible_models,
- help="""Don't see your favourite model in this list? Add the dataset and task it was trained on to the \
- [model card metadata.](https://huggingface.co/docs/hub/models-cards#model-card-metadata)""",
- )
- print("INFO -- Selected models before filter:", selected_models)
-
- hf_username = st.text_input("Enter your 🤗 Hub username to be notified when the evaluation is finished")
-
- submit_button = st.form_submit_button("Evaluate models 🚀")
-
- if submit_button:
- if len(hf_username) == 0:
- st.warning("No 🤗 Hub username provided! Please enter your username and try again.")
- elif len(selected_models) == 0:
- st.warning("⚠️ No models were selected for evaluation! Please select at least one model and try again.")
- elif len(selected_models) > 10:
- st.warning("Only 10 models can be evaluated at once. Please select fewer models and try again.")
- else:
- # Filter out previously evaluated models
- selected_models = filter_evaluated_models(
- selected_models,
- selected_task,
- selected_dataset,
- selected_config,
- selected_split,
- selected_metrics,
- )
- print("INFO -- Selected models after filter:", selected_models)
- if len(selected_models) > 0:
- project_payload = {
- "username": AUTOTRAIN_USERNAME,
- "proj_name": create_autotrain_project_name(selected_dataset, selected_config),
- "task": TASK_TO_ID[selected_task],
- "config": {
- "language": AUTOTRAIN_TASK_TO_LANG[selected_task]
- if selected_task in AUTOTRAIN_TASK_TO_LANG
- else "en",
- "max_models": 5,
- "instance": {
- "provider": "sagemaker" if selected_task in AUTOTRAIN_MACHINE.keys() else "ovh",
- "instance_type": AUTOTRAIN_MACHINE[selected_task]
- if selected_task in AUTOTRAIN_MACHINE.keys()
- else "p3",
- "max_runtime_seconds": 172800,
- "num_instances": 1,
- "disk_size_gb": 200,
- },
- "evaluation": {
- "metrics": selected_metrics,
- "models": selected_models,
- "hf_username": hf_username,
- },
- },
- }
- print(f"INFO -- Payload: {project_payload}")
- project_json_resp = http_post(
- path="/projects/create",
- payload=project_payload,
- token=HF_TOKEN,
- domain=AUTOTRAIN_BACKEND_API,
- ).json()
- print(f"INFO -- Project creation response: {project_json_resp}")
-
- if project_json_resp["created"]:
- data_payload = {
- "split": 4, # use "auto" split choice in AutoTrain
- "col_mapping": col_mapping,
- "load_config": {"max_size_bytes": 0, "shuffle": False},
- "dataset_id": selected_dataset,
- "dataset_config": selected_config,
- "dataset_split": selected_split,
- }
- data_json_resp = http_post(
- path=f"/projects/{project_json_resp['id']}/data/dataset",
- payload=data_payload,
- token=HF_TOKEN,
- domain=AUTOTRAIN_BACKEND_API,
- ).json()
- print(f"INFO -- Dataset creation response: {data_json_resp}")
- if data_json_resp["download_status"] == 1:
- train_json_resp = http_post(
- path=f"/projects/{project_json_resp['id']}/data/start_processing",
- token=HF_TOKEN,
- domain=AUTOTRAIN_BACKEND_API,
- ).json()
- # For local development we process and approve projects on-the-fly
- if "localhost" in AUTOTRAIN_BACKEND_API:
- with st.spinner("⏳ Waiting for data processing to complete ..."):
- is_data_processing_success = False
- while is_data_processing_success is not True:
- project_status = http_get(
- path=f"/projects/{project_json_resp['id']}",
- token=HF_TOKEN,
- domain=AUTOTRAIN_BACKEND_API,
- ).json()
- if project_status["status"] == 3:
- is_data_processing_success = True
- time.sleep(10)
-
- # Approve training job
- train_job_resp = http_post(
- path=f"/projects/{project_json_resp['id']}/start_training",
- token=HF_TOKEN,
- domain=AUTOTRAIN_BACKEND_API,
- ).json()
- st.success("✅ Data processing and project approval complete - go forth and evaluate!")
- else:
- # Prod/staging submissions are evaluated in a cron job via run_evaluation_jobs.py
- print(f"INFO -- AutoTrain job response: {train_json_resp}")
- if train_json_resp["success"]:
- train_eval_index = {
- "train-eval-index": [
- {
- "config": selected_config,
- "task": AUTOTRAIN_TASK_TO_HUB_TASK[selected_task],
- "task_id": selected_task,
- "splits": {"eval_split": selected_split},
- "col_mapping": col_mapping,
- }
- ]
- }
- selected_metadata = yaml.dump(train_eval_index, sort_keys=False)
- dataset_card_url = get_dataset_card_url(selected_dataset)
- st.success("✅ Successfully submitted evaluation job!")
- st.markdown(
- f"""
- Evaluation can take up to 1 hour to complete, so grab a ☕️ or 🍵 while you wait:
-
- * 🔔 A [Hub pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) with the evaluation results will be opened for each model you selected. Check your email for notifications.
- * 📊 Click [here](https://hf.co/spaces/autoevaluate/leaderboards?dataset={selected_dataset}) to view the results from your submission once the Hub pull request is merged.
- * 🥱 Tired of configuring evaluations? Add the following metadata to the [dataset card]({dataset_card_url}) to enable 1-click evaluations:
- """ # noqa
- )
- st.markdown(
- f"""
- ```yaml
- {selected_metadata}
- """
- )
- print("INFO -- Pushing evaluation job logs to the Hub")
- evaluation_log = {}
- evaluation_log["project_id"] = project_json_resp["id"]
- evaluation_log["autotrain_env"] = (
- "staging" if "staging" in AUTOTRAIN_BACKEND_API else "prod"
- )
- evaluation_log["payload"] = project_payload
- evaluation_log["project_creation_response"] = project_json_resp
- evaluation_log["dataset_creation_response"] = data_json_resp
- evaluation_log["autotrain_job_response"] = train_json_resp
- commit_evaluation_log(evaluation_log, hf_access_token=HF_TOKEN)
- else:
- st.error("🙈 Oh no, there was an error submitting your evaluation job!")
- else:
- st.warning("⚠️ No models left to evaluate! Please select other models and try again.")
diff --git a/spaces/autonomous019/Story_Generator_v2/spaces_info.py b/spaces/autonomous019/Story_Generator_v2/spaces_info.py
deleted file mode 100644
index 649c3185305b49bc3fe3fad8bc6be26719f599fb..0000000000000000000000000000000000000000
--- a/spaces/autonomous019/Story_Generator_v2/spaces_info.py
+++ /dev/null
@@ -1,76 +0,0 @@
-description = """Gradio Demo for BLOOM. To use it, simply add your text, or click one of the examples to load them.
-Tips:
-- Do NOT talk to BLOOM as an entity, it's not a chatbot but a webpage/blog/article completion model.
-- For the best results: MIMIC a few sentences of a webpage similar to the content you want to generate.
-Start a paragraph as if YOU were writing a blog, webpage, math post, coding article and BLOOM will generate a coherent follow-up. Longer prompts usually give more interesting results.
-- Content: Please see our [content disclaimer](https://hf.co/spaces/bigscience/bloom-book) before using the model, as it may sometimes behave in unexpected ways.
-
-Options:
-- sampling: imaginative completions (may be not super accurate e.g. math/history)
-- greedy: accurate completions (may be more boring or have repetitions)
-"""
-
-wip_description = """JAX / Flax Gradio Demo for BLOOM. The 176B BLOOM model running on a TPU v3-256 pod, with 2D model parallelism and custom mesh axes.
-Note:
-1. For this WIP demo, only **sampling** is supported.
-2. Rendering of the screenshot is currently not optimised. To experience the true speed of JAX / Flax, tick 'just output raw text'.
-"""
-
-examples = [
- [
- 'A "whatpu" is a small, furry animal native to Tanzania. An example of a sentence that uses the word whatpu is: We were traveling in Africa and we saw these very cute whatpus. To do a "farduddle" means to jump up and down really fast. An example of a sentence that uses the word farduddle is:',
- 32,
- "Sample",
- "Sample 1",
- ],
- [
- "A poem about the beauty of science by Alfred Edgar Brittle\nTitle: The Magic Craft\nIn the old times",
- 50,
- "Sample",
- "Sample 1",
- ],
- ["استخراج العدد العاملي في لغة بايثون:", 30, "Greedy", "Sample 1"],
- ["Pour déguster un ortolan, il faut tout d'abord", 32, "Sample", "Sample 1"],
- [
- "Traduce español de España a español de Argentina\nEl coche es rojo - el auto es rojo\nEl ordenador es nuevo - la computadora es nueva\nel boligrafo es negro -",
- 16,
- "Sample",
- "Sample 1",
- ],
- [
- "Estos ejemplos quitan vocales de las palabras\nEjemplos:\nhola - hl\nmanzana - mnzn\npapas - pps\nalacran - lcrn\npapa -",
- 16,
- "Sample",
- "Sample 1",
- ],
- [
- "Question: If I put cheese into the fridge, will it melt?\nAnswer:",
- 32,
- "Sample",
- "Sample 1",
- ],
- ["Math exercise - answers:\n34+10=44\n54+20=", 16, "Greedy", "Sample 1"],
- [
- "Question: Where does the Greek Goddess Persephone spend half of the year when she is not with her mother?\nAnswer:",
- 24,
- "Greedy",
- "Sample 1",
- ],
- [
- "spelling test answers.\nWhat are the letters in « language »?\nAnswer: l-a-n-g-u-a-g-e\nWhat are the letters in « Romanian »?\nAnswer:",
- 24,
- "Greedy",
- "Sample 1",
- ],
-]
-
-initial_prompt_value = """استخراج العدد العاملي في لغة بايثون :
-def factorial(n):
- if n == 0:
- return 1
- else:
- result = 1
- for i in range(1, n + 1) :
- result *= i
- return result
-print(factorial(5))"""
\ No newline at end of file
diff --git a/spaces/avivdm1/AutoGPT/autogpt/commands/execute_code.py b/spaces/avivdm1/AutoGPT/autogpt/commands/execute_code.py
deleted file mode 100644
index 11266f852727f2f8aedbc995b1e504a17acbfb77..0000000000000000000000000000000000000000
--- a/spaces/avivdm1/AutoGPT/autogpt/commands/execute_code.py
+++ /dev/null
@@ -1,158 +0,0 @@
-"""Execute code in a Docker container"""
-import os
-import subprocess
-
-import docker
-from docker.errors import ImageNotFound
-
-from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
-
-
-def execute_python_file(file: str) -> str:
- """Execute a Python file in a Docker container and return the output
-
- Args:
- file (str): The name of the file to execute
-
- Returns:
- str: The output of the file
- """
-
- print(f"Executing file '{file}' in workspace '{WORKSPACE_PATH}'")
-
- if not file.endswith(".py"):
- return "Error: Invalid file type. Only .py files are allowed."
-
- file_path = path_in_workspace(file)
-
- if not os.path.isfile(file_path):
- return f"Error: File '{file}' does not exist."
-
- if we_are_running_in_a_docker_container():
- result = subprocess.run(
- f"python {file_path}", capture_output=True, encoding="utf8", shell=True
- )
- if result.returncode == 0:
- return result.stdout
- else:
- return f"Error: {result.stderr}"
-
- try:
- client = docker.from_env()
-
- # You can replace this with the desired Python image/version
- # You can find available Python images on Docker Hub:
- # https://hub.docker.com/_/python
- image_name = "python:3-alpine"
- try:
- client.images.get(image_name)
- print(f"Image '{image_name}' found locally")
- except ImageNotFound:
- print(f"Image '{image_name}' not found locally, pulling from Docker Hub")
- # Use the low-level API to stream the pull response
- low_level_client = docker.APIClient()
- for line in low_level_client.pull(image_name, stream=True, decode=True):
- # Print the status and progress, if available
- status = line.get("status")
- progress = line.get("progress")
- if status and progress:
- print(f"{status}: {progress}")
- elif status:
- print(status)
-
- container = client.containers.run(
- image_name,
- f"python {file}",
- volumes={
- os.path.abspath(WORKSPACE_PATH): {
- "bind": "/workspace",
- "mode": "ro",
- }
- },
- working_dir="/workspace",
- stderr=True,
- stdout=True,
- detach=True,
- )
-
- container.wait()
- logs = container.logs().decode("utf-8")
- container.remove()
-
- # print(f"Execution complete. Output: {output}")
- # print(f"Logs: {logs}")
-
- return logs
-
- except docker.errors.DockerException as e:
- print(
- "Could not run the script in a container. If you haven't already, please install Docker https://docs.docker.com/get-docker/"
- )
- return f"Error: {str(e)}"
-
- except Exception as e:
- return f"Error: {str(e)}"
-
-
-def execute_shell(command_line: str) -> str:
- """Execute a shell command and return the output
-
- Args:
- command_line (str): The command line to execute
-
- Returns:
- str: The output of the command
- """
- current_dir = os.getcwd()
- # Change dir into workspace if necessary
- if str(WORKSPACE_PATH) not in current_dir:
- os.chdir(WORKSPACE_PATH)
-
- print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
-
- result = subprocess.run(command_line, capture_output=True, shell=True)
- output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
-
- # Change back to whatever the prior working dir was
-
- os.chdir(current_dir)
-
- return output
-
-
-def execute_shell_popen(command_line) -> str:
- """Execute a shell command with Popen and returns an english description
- of the event and the process id
-
- Args:
- command_line (str): The command line to execute
-
- Returns:
- str: Description of the fact that the process started and its id
- """
- current_dir = os.getcwd()
- # Change dir into workspace if necessary
- if str(WORKSPACE_PATH) not in current_dir:
- os.chdir(WORKSPACE_PATH)
-
- print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
-
- do_not_show_output = subprocess.DEVNULL
- process = subprocess.Popen(
- command_line, shell=True, stdout=do_not_show_output, stderr=do_not_show_output
- )
-
- # Change back to whatever the prior working dir was
-
- os.chdir(current_dir)
-
- return f"Subprocess started with PID:'{str(process.pid)}'"
-
-
-def we_are_running_in_a_docker_container() -> bool:
- """Check if we are running in a Docker container
-
- Returns:
- bool: True if we are running in a Docker container, False otherwise
- """
- return os.path.exists("/.dockerenv")
diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/img2img_gradio.py b/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/img2img_gradio.py
deleted file mode 100644
index 7fd4678380cd957b7c4b2b0ab752db9006e95015..0000000000000000000000000000000000000000
--- a/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/img2img_gradio.py
+++ /dev/null
@@ -1,283 +0,0 @@
-import gradio as gr
-import numpy as np
-import torch
-from torchvision.utils import make_grid
-import os, re
-from PIL import Image
-import torch
-import numpy as np
-from random import randint
-from omegaconf import OmegaConf
-from PIL import Image
-from tqdm import tqdm, trange
-from itertools import islice
-from einops import rearrange
-from torchvision.utils import make_grid
-import time
-from pytorch_lightning import seed_everything
-from torch import autocast
-from einops import rearrange, repeat
-from contextlib import nullcontext
-from ldmlib.util import instantiate_from_config
-from transformers import logging
-import pandas as pd
-from optimUtils import split_weighted_subprompts, logger
-logging.set_verbosity_error()
-import mimetypes
-mimetypes.init()
-mimetypes.add_type("application/javascript", ".js")
-
-
-def chunk(it, size):
- it = iter(it)
- return iter(lambda: tuple(islice(it, size)), ())
-
-
-def load_model_from_config(ckpt, verbose=False):
- print(f"Loading model from {ckpt}")
- pl_sd = torch.load(ckpt, map_location="cpu")
- if "global_step" in pl_sd:
- print(f"Global Step: {pl_sd['global_step']}")
- sd = pl_sd["state_dict"]
- return sd
-
-
-def load_img(image, h0, w0):
-
- image = image.convert("RGB")
- w, h = image.size
- print(f"loaded input image of size ({w}, {h})")
- if h0 is not None and w0 is not None:
- h, w = h0, w0
-
- w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 32
-
- print(f"New image size ({w}, {h})")
- image = image.resize((w, h), resample=Image.LANCZOS)
- image = np.array(image).astype(np.float32) / 255.0
- image = image[None].transpose(0, 3, 1, 2)
- image = torch.from_numpy(image)
- return 2.0 * image - 1.0
-
-config = "optimizedSD/v1-inference.yaml"
-ckpt = "models/ldm/stable-diffusion-v1/model.ckpt"
-sd = load_model_from_config(f"{ckpt}")
-li, lo = [], []
-for key, v_ in sd.items():
- sp = key.split(".")
- if (sp[0]) == "model":
- if "input_blocks" in sp:
- li.append(key)
- elif "middle_block" in sp:
- li.append(key)
- elif "time_embed" in sp:
- li.append(key)
- else:
- lo.append(key)
-for key in li:
- sd["model1." + key[6:]] = sd.pop(key)
-for key in lo:
- sd["model2." + key[6:]] = sd.pop(key)
-
-config = OmegaConf.load(f"{config}")
-
-model = instantiate_from_config(config.modelUNet)
-_, _ = model.load_state_dict(sd, strict=False)
-model.eval()
-
-modelCS = instantiate_from_config(config.modelCondStage)
-_, _ = modelCS.load_state_dict(sd, strict=False)
-modelCS.eval()
-
-modelFS = instantiate_from_config(config.modelFirstStage)
-_, _ = modelFS.load_state_dict(sd, strict=False)
-modelFS.eval()
-del sd
-
-def generate(
- image,
- prompt,
- strength,
- ddim_steps,
- n_iter,
- batch_size,
- Height,
- Width,
- scale,
- ddim_eta,
- unet_bs,
- device,
- seed,
- outdir,
- img_format,
- turbo,
- full_precision,
-):
-
- if seed == "":
- seed = randint(0, 1000000)
- seed = int(seed)
- seed_everything(seed)
-
- # Logging
- sampler = "ddim"
- logger(locals(), log_csv = "logs/img2img_gradio_logs.csv")
-
- init_image = load_img(image, Height, Width).to(device)
- model.unet_bs = unet_bs
- model.turbo = turbo
- model.cdevice = device
- modelCS.cond_stage_model.device = device
-
- if device != "cpu" and full_precision == False:
- model.half()
- modelCS.half()
- modelFS.half()
- init_image = init_image.half()
-
- tic = time.time()
- os.makedirs(outdir, exist_ok=True)
- outpath = outdir
- sample_path = os.path.join(outpath, "_".join(re.split(":| ", prompt)))[:150]
- os.makedirs(sample_path, exist_ok=True)
- base_count = len(os.listdir(sample_path))
-
- # n_rows = opt.n_rows if opt.n_rows > 0 else batch_size
- assert prompt is not None
- data = [batch_size * [prompt]]
-
- modelFS.to(device)
-
- init_image = repeat(init_image, "1 ... -> b ...", b=batch_size)
- init_latent = modelFS.get_first_stage_encoding(modelFS.encode_first_stage(init_image)) # move to latent space
-
- if device != "cpu":
- mem = torch.cuda.memory_allocated() / 1e6
- modelFS.to("cpu")
- while torch.cuda.memory_allocated() / 1e6 >= mem:
- time.sleep(1)
-
- assert 0.0 <= strength <= 1.0, "can only work with strength in [0.0, 1.0]"
- t_enc = int(strength * ddim_steps)
- print(f"target t_enc is {t_enc} steps")
-
- if full_precision == False and device != "cpu":
- precision_scope = autocast
- else:
- precision_scope = nullcontext
-
- all_samples = []
- seeds = ""
- with torch.no_grad():
- all_samples = list()
- for _ in trange(n_iter, desc="Sampling"):
- for prompts in tqdm(data, desc="data"):
- with precision_scope("cuda"):
- modelCS.to(device)
- uc = None
- if scale != 1.0:
- uc = modelCS.get_learned_conditioning(batch_size * [""])
- if isinstance(prompts, tuple):
- prompts = list(prompts)
-
- subprompts, weights = split_weighted_subprompts(prompts[0])
- if len(subprompts) > 1:
- c = torch.zeros_like(uc)
- totalWeight = sum(weights)
- # normalize each "sub prompt" and add it
- for i in range(len(subprompts)):
- weight = weights[i]
- # if not skip_normalize:
- weight = weight / totalWeight
- c = torch.add(c, modelCS.get_learned_conditioning(subprompts[i]), alpha=weight)
- else:
- c = modelCS.get_learned_conditioning(prompts)
-
- if device != "cpu":
- mem = torch.cuda.memory_allocated() / 1e6
- modelCS.to("cpu")
- while torch.cuda.memory_allocated() / 1e6 >= mem:
- time.sleep(1)
-
- # encode (scaled latent)
- z_enc = model.stochastic_encode(
- init_latent, torch.tensor([t_enc] * batch_size).to(device), seed, ddim_eta, ddim_steps
- )
- # decode it
- samples_ddim = model.sample(
- t_enc,
- c,
- z_enc,
- unconditional_guidance_scale=scale,
- unconditional_conditioning=uc,
- sampler = sampler
- )
-
- modelFS.to(device)
- print("saving images")
- for i in range(batch_size):
-
- x_samples_ddim = modelFS.decode_first_stage(samples_ddim[i].unsqueeze(0))
- x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
- all_samples.append(x_sample.to("cpu"))
- x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
- Image.fromarray(x_sample.astype(np.uint8)).save(
- os.path.join(sample_path, "seed_" + str(seed) + "_" + f"{base_count:05}.{img_format}")
- )
- seeds += str(seed) + ","
- seed += 1
- base_count += 1
-
- if device != "cpu":
- mem = torch.cuda.memory_allocated() / 1e6
- modelFS.to("cpu")
- while torch.cuda.memory_allocated() / 1e6 >= mem:
- time.sleep(1)
-
- del samples_ddim
- del x_sample
- del x_samples_ddim
- print("memory_final = ", torch.cuda.memory_allocated() / 1e6)
-
- toc = time.time()
-
- time_taken = (toc - tic) / 60.0
- grid = torch.cat(all_samples, 0)
- grid = make_grid(grid, nrow=n_iter)
- grid = 255.0 * rearrange(grid, "c h w -> h w c").cpu().numpy()
-
- txt = (
- "Samples finished in "
- + str(round(time_taken, 3))
- + " minutes and exported to \n"
- + sample_path
- + "\nSeeds used = "
- + seeds[:-1]
- )
- return Image.fromarray(grid.astype(np.uint8)), txt
-
-
-demo = gr.Interface(
- fn=generate,
- inputs=[
- gr.Image(tool="editor", type="pil"),
- "text",
- gr.Slider(0, 1, value=0.75),
- gr.Slider(1, 1000, value=50),
- gr.Slider(1, 100, step=1),
- gr.Slider(1, 100, step=1),
- gr.Slider(64, 4096, value=512, step=64),
- gr.Slider(64, 4096, value=512, step=64),
- gr.Slider(0, 50, value=7.5, step=0.1),
- gr.Slider(0, 1, step=0.01),
- gr.Slider(1, 2, value=1, step=1),
- gr.Text(value="cuda"),
- "text",
- gr.Text(value="outputs/img2img-samples"),
- gr.Radio(["png", "jpg"], value='png'),
- "checkbox",
- "checkbox",
- ],
- outputs=["image", "text"],
-)
-demo.launch()
diff --git a/spaces/awacke1/Gradio-Blocks-Demo-2/README.md b/spaces/awacke1/Gradio-Blocks-Demo-2/README.md
deleted file mode 100644
index 124b205b616fd02da9d1973cd8f3529eb5b087e6..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Gradio-Blocks-Demo-2/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: GradioBlocksTextGenerator
-emoji: 🚀
-colorFrom: red
-colorTo: blue
-sdk: gradio
-sdk_version: 3.0.24
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/awacke1/GradioAutoCSVLoaderToPlotly/README.md b/spaces/awacke1/GradioAutoCSVLoaderToPlotly/README.md
deleted file mode 100644
index 492dcf5a30e6d7bbbcfc4669a836fa6c8711a2e0..0000000000000000000000000000000000000000
--- a/spaces/awacke1/GradioAutoCSVLoaderToPlotly/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: GradioAutoCSVLoaderToPlotly
-emoji: 🚀
-colorFrom: yellow
-colorTo: purple
-sdk: gradio
-sdk_version: 3.17.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/awacke1/SKLearnSkopsTabularEditor/app.py b/spaces/awacke1/SKLearnSkopsTabularEditor/app.py
deleted file mode 100644
index 9d6eb9ad30b0d326a97a3d36f2844fad831eb29e..0000000000000000000000000000000000000000
--- a/spaces/awacke1/SKLearnSkopsTabularEditor/app.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import gradio as gr
-
-# title and description are optional
-title = "Top Ten Fun Facts"
-description = "This model predicts questions and answers with top ten topics most likely to help with fun facts from consensus. Drag and drop any column of the below example datasets to edit values as you wish and test your inputs to tabular processing AI models.."
-
-gr.Interface.load("huggingface/scikit-learn/tabular-playground", title=title, description=description).launch()
\ No newline at end of file
diff --git a/spaces/awacke1/Three.JS-TheCube-Game/style.css b/spaces/awacke1/Three.JS-TheCube-Game/style.css
deleted file mode 100644
index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Three.JS-TheCube-Game/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/azusarang/so-vits-svc-models-ba_P/vdecoder/nsf_hifigan/nvSTFT.py b/spaces/azusarang/so-vits-svc-models-ba_P/vdecoder/nsf_hifigan/nvSTFT.py
deleted file mode 100644
index 62bd5a008f81929054f036c81955d5d73377f772..0000000000000000000000000000000000000000
--- a/spaces/azusarang/so-vits-svc-models-ba_P/vdecoder/nsf_hifigan/nvSTFT.py
+++ /dev/null
@@ -1,134 +0,0 @@
-import math
-import os
-os.environ["LRU_CACHE_CAPACITY"] = "3"
-import random
-import torch
-import torch.utils.data
-import numpy as np
-import librosa
-from librosa.util import normalize
-from librosa.filters import mel as librosa_mel_fn
-from scipy.io.wavfile import read
-import soundfile as sf
-import torch.nn.functional as F
-
-def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
- sampling_rate = None
- try:
- data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile.
- except Exception as ex:
- print(f"'{full_path}' failed to load.\nException:")
- print(ex)
- if return_empty_on_exception:
- return [], sampling_rate or target_sr or 48000
- else:
- raise Exception(ex)
-
- if len(data.shape) > 1:
- data = data[:, 0]
- assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
-
- if np.issubdtype(data.dtype, np.integer): # if audio data is type int
- max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX
- else: # if audio data is type fp32
- max_mag = max(np.amax(data), -np.amin(data))
- max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
-
- data = torch.FloatTensor(data.astype(np.float32))/max_mag
-
- if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
- return [], sampling_rate or target_sr or 48000
- if target_sr is not None and sampling_rate != target_sr:
- data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr))
- sampling_rate = target_sr
-
- return data, sampling_rate
-
-def dynamic_range_compression(x, C=1, clip_val=1e-5):
- return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
-
-def dynamic_range_decompression(x, C=1):
- return np.exp(x) / C
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-def dynamic_range_decompression_torch(x, C=1):
- return torch.exp(x) / C
-
-class STFT():
- def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5):
- self.target_sr = sr
-
- self.n_mels = n_mels
- self.n_fft = n_fft
- self.win_size = win_size
- self.hop_length = hop_length
- self.fmin = fmin
- self.fmax = fmax
- self.clip_val = clip_val
- self.mel_basis = {}
- self.hann_window = {}
-
- def get_mel(self, y, keyshift=0, speed=1, center=False):
- sampling_rate = self.target_sr
- n_mels = self.n_mels
- n_fft = self.n_fft
- win_size = self.win_size
- hop_length = self.hop_length
- fmin = self.fmin
- fmax = self.fmax
- clip_val = self.clip_val
-
- factor = 2 ** (keyshift / 12)
- n_fft_new = int(np.round(n_fft * factor))
- win_size_new = int(np.round(win_size * factor))
- hop_length_new = int(np.round(hop_length * speed))
-
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- mel_basis_key = str(fmax)+'_'+str(y.device)
- if mel_basis_key not in self.mel_basis:
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
- self.mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)
-
- keyshift_key = str(keyshift)+'_'+str(y.device)
- if keyshift_key not in self.hann_window:
- self.hann_window[keyshift_key] = torch.hann_window(win_size_new).to(y.device)
-
- pad_left = (win_size_new - hop_length_new) //2
- pad_right = max((win_size_new- hop_length_new + 1) //2, win_size_new - y.size(-1) - pad_left)
- if pad_right < y.size(-1):
- mode = 'reflect'
- else:
- mode = 'constant'
- y = torch.nn.functional.pad(y.unsqueeze(1), (pad_left, pad_right), mode = mode)
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft_new, hop_length=hop_length_new, win_length=win_size_new, window=self.hann_window[keyshift_key],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
- # print(111,spec)
- spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
- if keyshift != 0:
- size = n_fft // 2 + 1
- resize = spec.size(1)
- if resize < size:
- spec = F.pad(spec, (0, 0, 0, size-resize))
- spec = spec[:, :size, :] * win_size / win_size_new
-
- # print(222,spec)
- spec = torch.matmul(self.mel_basis[mel_basis_key], spec)
- # print(333,spec)
- spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
- # print(444,spec)
- return spec
-
- def __call__(self, audiopath):
- audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
- spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
- return spect
-
-stft = STFT()
diff --git a/spaces/badayvedat/LLaVA/llava/model/multimodal_encoder/builder.py b/spaces/badayvedat/LLaVA/llava/model/multimodal_encoder/builder.py
deleted file mode 100644
index 2b13589d4e55af529fe0838c4130c2033ac10478..0000000000000000000000000000000000000000
--- a/spaces/badayvedat/LLaVA/llava/model/multimodal_encoder/builder.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import os
-from .clip_encoder import CLIPVisionTower
-
-
-def build_vision_tower(vision_tower_cfg, **kwargs):
- vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))
- is_absolute_path_exists = os.path.exists(vision_tower)
- if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion"):
- return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
-
- raise ValueError(f'Unknown vision tower: {vision_tower}')
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/crossfade/gui.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/crossfade/gui.js
deleted file mode 100644
index 40184cf4c9f43d1c469760f42aa7d70f8586aabb..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/crossfade/gui.js
+++ /dev/null
@@ -1,39 +0,0 @@
-var transitionParams = {
- "useTexture": true,
- "transition": 0.5,
- "transitionSpeed": 2.0,
- "texture": 5,
- "loopTexture": true,
- "animateTransition": true,
- "textureThreshold": 0.3
-};
-
-function initGUI() {
-
- var gui = new dat.GUI();
-
- gui.add( transitionParams, "useTexture" ).onChange( function( value ) {
-
- transition.useTexture( value );
-
- } );
-
- gui.add( transitionParams, 'loopTexture' );
-
- gui.add( transitionParams, 'texture', { Perlin: 0, Squares: 1, Cells: 2, Distort: 3, Gradient: 4, Radial: 5 } ).onChange( function( value ) {
-
- transition.setTexture( value );
-
- } ).listen();
-
- gui.add( transitionParams, "textureThreshold", 0, 1, 0.01 ).onChange( function( value ) {
-
- transition.setTextureThreshold( value );
-
- } );
-
- gui.add( transitionParams, "animateTransition" );
- gui.add( transitionParams, "transition", 0, 1, 0.01 ).listen();
- gui.add( transitionParams, "transitionSpeed", 0.5, 5, 0.01 );
-
-}
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderLib/distanceRGBA_frag.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderLib/distanceRGBA_frag.glsl.js
deleted file mode 100644
index 7c78545b38e00f34644e6576e4e99228dbfc614a..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderLib/distanceRGBA_frag.glsl.js
+++ /dev/null
@@ -1,33 +0,0 @@
-export default /* glsl */`
-#define DISTANCE
-
-uniform vec3 referencePosition;
-uniform float nearDistance;
-uniform float farDistance;
-varying vec3 vWorldPosition;
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-void main () {
-
- #include
-
- vec4 diffuseColor = vec4( 1.0 );
-
- #include
- #include
- #include
-
- float dist = length( vWorldPosition - referencePosition );
- dist = ( dist - nearDistance ) / ( farDistance - nearDistance );
- dist = saturate( dist ); // clamp to [ 0, 1 ]
-
- gl_FragColor = packDepthToRGBA( dist );
-
-}
-`;
diff --git a/spaces/beihai/PDF-Table-Extractor/.history/app_20220620231624.py b/spaces/beihai/PDF-Table-Extractor/.history/app_20220620231624.py
deleted file mode 100644
index b694f26981ce076a89361a2c7c94a91a502b9e03..0000000000000000000000000000000000000000
--- a/spaces/beihai/PDF-Table-Extractor/.history/app_20220620231624.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#-*- coding : utf-8-*-
-import os,subprocess,base64
-from subprocess import STDOUT #os process manipuation
-import streamlit as st
-# @st.cache
-# def gh():
-# """install ghostscript on the linux machine"""
-
-# proc = subprocess.Popen('apt-get update', shell=True, stdin=None, stdout=open(os.devnull,"wb"), stderr=STDOUT, executable="/bin/bash")
-# proc = subprocess.Popen('apt-get install sudo', shell=True, stdin=None, stdout=open(os.devnull,"wb"), stderr=STDOUT, executable="/bin/bash")
-# proc = subprocess.Popen('sudo apt update', shell=True, stdin=None, stdout=open(os.devnull,"wb"), stderr=STDOUT, executable="/bin/bash")
-# proc = subprocess.Popen('apt install ghostscript python3-tk', shell=True, stdin=None, stdout=open(os.devnull,"wb"), stderr=STDOUT, executable="/bin/bash")
-# proc = subprocess.Popen('apt-get install -y libgl1-mesa-glx', shell=True, stdin=None, stdout=open(os.devnull,"wb"), stderr=STDOUT, executable="/bin/bash")
-# proc.wait()
-# gh()
-import pandas as pd
-import camelot as cam # extracting tables from PDFs
-
-st.title("PDF Table Extractor")
-
-input_pdf = st.file_uploader(label = "", type = 'pdf')
-
-page_number = st.text_input("请填写表格所在PDF页码,eg: 3", value = 1)
-
-if input_pdf is not None:
- # byte object into a PDF file
- with open("input.pdf", "wb") as f:
- base64_pdf = base64.b64encode(input_pdf.read()).decode('utf-8')
- f.write(base64.b64decode(base64_pdf))
- f.close()
-
- # read the pdf and parse it using stream
- tables = cam.read_pdf("input.pdf", pages=page_number)
- result = pd.ExcelWriter('result.xlsx', engine='xlsxwriter')
- tables[0].to_excel(result,index=False)
- # for i in range(0,len(tables)):
- # table = tables[i].df
- # sheetname = str(i)
- # table.to_excel(result, sheetname,index=False)
-
- with open('result.xlsx','rb') as f:
- st.download_button('提取完成,点击下载!', f,file_name='result.xlsx',mime="application/vnd.ms-excel")
\ No newline at end of file
diff --git a/spaces/bigjoker/stable-diffusion-webui/modules/prompt_parser.py b/spaces/bigjoker/stable-diffusion-webui/modules/prompt_parser.py
deleted file mode 100644
index a7bbfa4ea73cbfcb6da0e1012ac166042b6fae08..0000000000000000000000000000000000000000
--- a/spaces/bigjoker/stable-diffusion-webui/modules/prompt_parser.py
+++ /dev/null
@@ -1,373 +0,0 @@
-import re
-from collections import namedtuple
-from typing import List
-import lark
-
-# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]"
-# will be represented with prompt_schedule like this (assuming steps=100):
-# [25, 'fantasy landscape with a mountain and an oak in foreground shoddy']
-# [50, 'fantasy landscape with a lake and an oak in foreground in background shoddy']
-# [60, 'fantasy landscape with a lake and an oak in foreground in background masterful']
-# [75, 'fantasy landscape with a lake and an oak in background masterful']
-# [100, 'fantasy landscape with a lake and a christmas tree in background masterful']
-
-schedule_parser = lark.Lark(r"""
-!start: (prompt | /[][():]/+)*
-prompt: (emphasized | scheduled | alternate | plain | WHITESPACE)*
-!emphasized: "(" prompt ")"
- | "(" prompt ":" prompt ")"
- | "[" prompt "]"
-scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER "]"
-alternate: "[" prompt ("|" prompt)+ "]"
-WHITESPACE: /\s+/
-plain: /([^\\\[\]():|]|\\.)+/
-%import common.SIGNED_NUMBER -> NUMBER
-""")
-
-def get_learned_conditioning_prompt_schedules(prompts, steps):
- """
- >>> g = lambda p: get_learned_conditioning_prompt_schedules([p], 10)[0]
- >>> g("test")
- [[10, 'test']]
- >>> g("a [b:3]")
- [[3, 'a '], [10, 'a b']]
- >>> g("a [b: 3]")
- [[3, 'a '], [10, 'a b']]
- >>> g("a [[[b]]:2]")
- [[2, 'a '], [10, 'a [[b]]']]
- >>> g("[(a:2):3]")
- [[3, ''], [10, '(a:2)']]
- >>> g("a [b : c : 1] d")
- [[1, 'a b d'], [10, 'a c d']]
- >>> g("a[b:[c:d:2]:1]e")
- [[1, 'abe'], [2, 'ace'], [10, 'ade']]
- >>> g("a [unbalanced")
- [[10, 'a [unbalanced']]
- >>> g("a [b:.5] c")
- [[5, 'a c'], [10, 'a b c']]
- >>> g("a [{b|d{:.5] c") # not handling this right now
- [[5, 'a c'], [10, 'a {b|d{ c']]
- >>> g("((a][:b:c [d:3]")
- [[3, '((a][:b:c '], [10, '((a][:b:c d']]
- >>> g("[a|(b:1.1)]")
- [[1, 'a'], [2, '(b:1.1)'], [3, 'a'], [4, '(b:1.1)'], [5, 'a'], [6, '(b:1.1)'], [7, 'a'], [8, '(b:1.1)'], [9, 'a'], [10, '(b:1.1)']]
- """
-
- def collect_steps(steps, tree):
- l = [steps]
- class CollectSteps(lark.Visitor):
- def scheduled(self, tree):
- tree.children[-1] = float(tree.children[-1])
- if tree.children[-1] < 1:
- tree.children[-1] *= steps
- tree.children[-1] = min(steps, int(tree.children[-1]))
- l.append(tree.children[-1])
- def alternate(self, tree):
- l.extend(range(1, steps+1))
- CollectSteps().visit(tree)
- return sorted(set(l))
-
- def at_step(step, tree):
- class AtStep(lark.Transformer):
- def scheduled(self, args):
- before, after, _, when = args
- yield before or () if step <= when else after
- def alternate(self, args):
- yield next(args[(step - 1)%len(args)])
- def start(self, args):
- def flatten(x):
- if type(x) == str:
- yield x
- else:
- for gen in x:
- yield from flatten(gen)
- return ''.join(flatten(args))
- def plain(self, args):
- yield args[0].value
- def __default__(self, data, children, meta):
- for child in children:
- yield child
- return AtStep().transform(tree)
-
- def get_schedule(prompt):
- try:
- tree = schedule_parser.parse(prompt)
- except lark.exceptions.LarkError as e:
- if 0:
- import traceback
- traceback.print_exc()
- return [[steps, prompt]]
- return [[t, at_step(t, tree)] for t in collect_steps(steps, tree)]
-
- promptdict = {prompt: get_schedule(prompt) for prompt in set(prompts)}
- return [promptdict[prompt] for prompt in prompts]
-
-
-ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at_step", "cond"])
-
-
-def get_learned_conditioning(model, prompts, steps):
- """converts a list of prompts into a list of prompt schedules - each schedule is a list of ScheduledPromptConditioning, specifying the comdition (cond),
- and the sampling step at which this condition is to be replaced by the next one.
-
- Input:
- (model, ['a red crown', 'a [blue:green:5] jeweled crown'], 20)
-
- Output:
- [
- [
- ScheduledPromptConditioning(end_at_step=20, cond=tensor([[-0.3886, 0.0229, -0.0523, ..., -0.4901, -0.3066, 0.0674], ..., [ 0.3317, -0.5102, -0.4066, ..., 0.4119, -0.7647, -1.0160]], device='cuda:0'))
- ],
- [
- ScheduledPromptConditioning(end_at_step=5, cond=tensor([[-0.3886, 0.0229, -0.0522, ..., -0.4901, -0.3067, 0.0673], ..., [-0.0192, 0.3867, -0.4644, ..., 0.1135, -0.3696, -0.4625]], device='cuda:0')),
- ScheduledPromptConditioning(end_at_step=20, cond=tensor([[-0.3886, 0.0229, -0.0522, ..., -0.4901, -0.3067, 0.0673], ..., [-0.7352, -0.4356, -0.7888, ..., 0.6994, -0.4312, -1.2593]], device='cuda:0'))
- ]
- ]
- """
- res = []
-
- prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps)
- cache = {}
-
- for prompt, prompt_schedule in zip(prompts, prompt_schedules):
-
- cached = cache.get(prompt, None)
- if cached is not None:
- res.append(cached)
- continue
-
- texts = [x[1] for x in prompt_schedule]
- conds = model.get_learned_conditioning(texts)
-
- cond_schedule = []
- for i, (end_at_step, text) in enumerate(prompt_schedule):
- cond_schedule.append(ScheduledPromptConditioning(end_at_step, conds[i]))
-
- cache[prompt] = cond_schedule
- res.append(cond_schedule)
-
- return res
-
-
-re_AND = re.compile(r"\bAND\b")
-re_weight = re.compile(r"^(.*?)(?:\s*:\s*([-+]?(?:\d+\.?|\d*\.\d+)))?\s*$")
-
-def get_multicond_prompt_list(prompts):
- res_indexes = []
-
- prompt_flat_list = []
- prompt_indexes = {}
-
- for prompt in prompts:
- subprompts = re_AND.split(prompt)
-
- indexes = []
- for subprompt in subprompts:
- match = re_weight.search(subprompt)
-
- text, weight = match.groups() if match is not None else (subprompt, 1.0)
-
- weight = float(weight) if weight is not None else 1.0
-
- index = prompt_indexes.get(text, None)
- if index is None:
- index = len(prompt_flat_list)
- prompt_flat_list.append(text)
- prompt_indexes[text] = index
-
- indexes.append((index, weight))
-
- res_indexes.append(indexes)
-
- return res_indexes, prompt_flat_list, prompt_indexes
-
-
-class ComposableScheduledPromptConditioning:
- def __init__(self, schedules, weight=1.0):
- self.schedules: List[ScheduledPromptConditioning] = schedules
- self.weight: float = weight
-
-
-class MulticondLearnedConditioning:
- def __init__(self, shape, batch):
- self.shape: tuple = shape # the shape field is needed to send this object to DDIM/PLMS
- self.batch: List[List[ComposableScheduledPromptConditioning]] = batch
-
-def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearnedConditioning:
- """same as get_learned_conditioning, but returns a list of ScheduledPromptConditioning along with the weight objects for each prompt.
- For each prompt, the list is obtained by splitting the prompt using the AND separator.
-
- https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/
- """
-
- res_indexes, prompt_flat_list, prompt_indexes = get_multicond_prompt_list(prompts)
-
- learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps)
-
- res = []
- for indexes in res_indexes:
- res.append([ComposableScheduledPromptConditioning(learned_conditioning[i], weight) for i, weight in indexes])
-
- return MulticondLearnedConditioning(shape=(len(prompts),), batch=res)
-
-
-def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_step):
- param = c[0][0].cond
- res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype)
- for i, cond_schedule in enumerate(c):
- target_index = 0
- for current, (end_at, cond) in enumerate(cond_schedule):
- if current_step <= end_at:
- target_index = current
- break
- res[i] = cond_schedule[target_index].cond
-
- return res
-
-
-def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step):
- param = c.batch[0][0].schedules[0].cond
-
- tensors = []
- conds_list = []
-
- for batch_no, composable_prompts in enumerate(c.batch):
- conds_for_batch = []
-
- for cond_index, composable_prompt in enumerate(composable_prompts):
- target_index = 0
- for current, (end_at, cond) in enumerate(composable_prompt.schedules):
- if current_step <= end_at:
- target_index = current
- break
-
- conds_for_batch.append((len(tensors), composable_prompt.weight))
- tensors.append(composable_prompt.schedules[target_index].cond)
-
- conds_list.append(conds_for_batch)
-
- # if prompts have wildly different lengths above the limit we'll get tensors fo different shapes
- # and won't be able to torch.stack them. So this fixes that.
- token_count = max([x.shape[0] for x in tensors])
- for i in range(len(tensors)):
- if tensors[i].shape[0] != token_count:
- last_vector = tensors[i][-1:]
- last_vector_repeated = last_vector.repeat([token_count - tensors[i].shape[0], 1])
- tensors[i] = torch.vstack([tensors[i], last_vector_repeated])
-
- return conds_list, torch.stack(tensors).to(device=param.device, dtype=param.dtype)
-
-
-re_attention = re.compile(r"""
-\\\(|
-\\\)|
-\\\[|
-\\]|
-\\\\|
-\\|
-\(|
-\[|
-:([+-]?[.\d]+)\)|
-\)|
-]|
-[^\\()\[\]:]+|
-:
-""", re.X)
-
-re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
-
-def parse_prompt_attention(text):
- """
- Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
- Accepted tokens are:
- (abc) - increases attention to abc by a multiplier of 1.1
- (abc:3.12) - increases attention to abc by a multiplier of 3.12
- [abc] - decreases attention to abc by a multiplier of 1.1
- \( - literal character '('
- \[ - literal character '['
- \) - literal character ')'
- \] - literal character ']'
- \\ - literal character '\'
- anything else - just text
-
- >>> parse_prompt_attention('normal text')
- [['normal text', 1.0]]
- >>> parse_prompt_attention('an (important) word')
- [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
- >>> parse_prompt_attention('(unbalanced')
- [['unbalanced', 1.1]]
- >>> parse_prompt_attention('\(literal\]')
- [['(literal]', 1.0]]
- >>> parse_prompt_attention('(unnecessary)(parens)')
- [['unnecessaryparens', 1.1]]
- >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
- [['a ', 1.0],
- ['house', 1.5730000000000004],
- [' ', 1.1],
- ['on', 1.0],
- [' a ', 1.1],
- ['hill', 0.55],
- [', sun, ', 1.1],
- ['sky', 1.4641000000000006],
- ['.', 1.1]]
- """
-
- res = []
- round_brackets = []
- square_brackets = []
-
- round_bracket_multiplier = 1.1
- square_bracket_multiplier = 1 / 1.1
-
- def multiply_range(start_position, multiplier):
- for p in range(start_position, len(res)):
- res[p][1] *= multiplier
-
- for m in re_attention.finditer(text):
- text = m.group(0)
- weight = m.group(1)
-
- if text.startswith('\\'):
- res.append([text[1:], 1.0])
- elif text == '(':
- round_brackets.append(len(res))
- elif text == '[':
- square_brackets.append(len(res))
- elif weight is not None and len(round_brackets) > 0:
- multiply_range(round_brackets.pop(), float(weight))
- elif text == ')' and len(round_brackets) > 0:
- multiply_range(round_brackets.pop(), round_bracket_multiplier)
- elif text == ']' and len(square_brackets) > 0:
- multiply_range(square_brackets.pop(), square_bracket_multiplier)
- else:
- parts = re.split(re_break, text)
- for i, part in enumerate(parts):
- if i > 0:
- res.append(["BREAK", -1])
- res.append([part, 1.0])
-
- for pos in round_brackets:
- multiply_range(pos, round_bracket_multiplier)
-
- for pos in square_brackets:
- multiply_range(pos, square_bracket_multiplier)
-
- if len(res) == 0:
- res = [["", 1.0]]
-
- # merge runs of identical weights
- i = 0
- while i + 1 < len(res):
- if res[i][1] == res[i + 1][1]:
- res[i][0] += res[i + 1][0]
- res.pop(i + 1)
- else:
- i += 1
-
- return res
-
-if __name__ == "__main__":
- import doctest
- doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
-else:
- import torch # doctest faster
diff --git a/spaces/bioriAsaeru/text-to-voice/Avira Antivirus Pro 2020 15.0.2002.1755 Crack Activation Key The Best Antivirus Solution.md b/spaces/bioriAsaeru/text-to-voice/Avira Antivirus Pro 2020 15.0.2002.1755 Crack Activation Key The Best Antivirus Solution.md
deleted file mode 100644
index f1b675cf6a161b927205ae306ed44722cd3bdfa5..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Avira Antivirus Pro 2020 15.0.2002.1755 Crack Activation Key The Best Antivirus Solution.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Avira Antivirus Pro 2020 15.0.2002.1755 Crack Activation Key Download
Download ✯ https://urloso.com/2uyRix
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/grids/_base_explorers.py b/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/grids/_base_explorers.py
deleted file mode 100644
index d3f26666aa596f7bd2e8695c4f00e7963e978ceb..0000000000000000000000000000000000000000
--- a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/grids/_base_explorers.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from abc import ABC, abstractmethod
-import time
-import typing as tp
-from dora import Explorer
-import treetable as tt
-
-
-def get_sheep_ping(sheep) -> tp.Optional[str]:
- """Return the amount of time since the Sheep made some update
- to its log. Returns a str using the relevant time unit."""
- ping = None
- if sheep.log is not None and sheep.log.exists():
- delta = time.time() - sheep.log.stat().st_mtime
- if delta > 3600 * 24:
- ping = f'{delta / (3600 * 24):.1f}d'
- elif delta > 3600:
- ping = f'{delta / (3600):.1f}h'
- elif delta > 60:
- ping = f'{delta / 60:.1f}m'
- else:
- ping = f'{delta:.1f}s'
- return ping
-
-
-class BaseExplorer(ABC, Explorer):
- """Base explorer for AudioCraft grids.
-
- All task specific solvers are expected to implement the `get_grid_metrics`
- method to specify logic about metrics to display for a given task.
-
- If additional stages are used, the child explorer must define how to handle
- these new stages in the `process_history` and `process_sheep` methods.
- """
- def stages(self):
- return ["train", "valid", "evaluate"]
-
- def get_grid_meta(self):
- """Returns the list of Meta information to display for each XP/job.
- """
- return [
- tt.leaf("index", align=">"),
- tt.leaf("name", wrap=140),
- tt.leaf("state"),
- tt.leaf("sig", align=">"),
- tt.leaf("sid", align="<"),
- ]
-
- @abstractmethod
- def get_grid_metrics(self):
- """Return the metrics that should be displayed in the tracking table.
- """
- ...
-
- def process_sheep(self, sheep, history):
- train = {
- "epoch": len(history),
- }
- parts = {"train": train}
- for metrics in history:
- for key, sub in metrics.items():
- part = parts.get(key, {})
- if 'duration' in sub:
- # Convert to minutes for readability.
- sub['duration'] = sub['duration'] / 60.
- part.update(sub)
- parts[key] = part
- ping = get_sheep_ping(sheep)
- if ping is not None:
- for name in self.stages():
- if name not in parts:
- parts[name] = {}
- # Add the ping to each part for convenience.
- parts[name]['ping'] = ping
- return parts
diff --git a/spaces/camel-ai/camel-data-explorer/README.md b/spaces/camel-ai/camel-data-explorer/README.md
deleted file mode 100644
index ae030b6bea2ca505c98c584da175ee649e6942bd..0000000000000000000000000000000000000000
--- a/spaces/camel-ai/camel-data-explorer/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Camel Data Explorer
-emoji: 🦀
-colorFrom: red
-colorTo: red
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/PIL/PyAccess.py b/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/PIL/PyAccess.py
deleted file mode 100644
index 99b46a4a66c013afc08edf134384e7a1d4dc200a..0000000000000000000000000000000000000000
--- a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/PIL/PyAccess.py
+++ /dev/null
@@ -1,363 +0,0 @@
-#
-# The Python Imaging Library
-# Pillow fork
-#
-# Python implementation of the PixelAccess Object
-#
-# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved.
-# Copyright (c) 1995-2009 by Fredrik Lundh.
-# Copyright (c) 2013 Eric Soroos
-#
-# See the README file for information on usage and redistribution
-#
-
-# Notes:
-#
-# * Implements the pixel access object following Access.c
-# * Taking only the tuple form, which is used from python.
-# * Fill.c uses the integer form, but it's still going to use the old
-# Access.c implementation.
-#
-
-import logging
-import sys
-
-from ._deprecate import deprecate
-
-try:
- from cffi import FFI
-
- defs = """
- struct Pixel_RGBA {
- unsigned char r,g,b,a;
- };
- struct Pixel_I16 {
- unsigned char l,r;
- };
- """
- ffi = FFI()
- ffi.cdef(defs)
-except ImportError as ex:
- # Allow error import for doc purposes, but error out when accessing
- # anything in core.
- from ._util import DeferredError
-
- FFI = ffi = DeferredError(ex)
-
-logger = logging.getLogger(__name__)
-
-
-class PyAccess:
- def __init__(self, img, readonly=False):
- deprecate("PyAccess", 11)
- vals = dict(img.im.unsafe_ptrs)
- self.readonly = readonly
- self.image8 = ffi.cast("unsigned char **", vals["image8"])
- self.image32 = ffi.cast("int **", vals["image32"])
- self.image = ffi.cast("unsigned char **", vals["image"])
- self.xsize, self.ysize = img.im.size
- self._img = img
-
- # Keep pointer to im object to prevent dereferencing.
- self._im = img.im
- if self._im.mode in ("P", "PA"):
- self._palette = img.palette
-
- # Debugging is polluting test traces, only useful here
- # when hacking on PyAccess
- # logger.debug("%s", vals)
- self._post_init()
-
- def _post_init(self):
- pass
-
- def __setitem__(self, xy, color):
- """
- Modifies the pixel at x,y. The color is given as a single
- numerical value for single band images, and a tuple for
- multi-band images
-
- :param xy: The pixel coordinate, given as (x, y). See
- :ref:`coordinate-system`.
- :param color: The pixel value.
- """
- if self.readonly:
- msg = "Attempt to putpixel a read only image"
- raise ValueError(msg)
- (x, y) = xy
- if x < 0:
- x = self.xsize + x
- if y < 0:
- y = self.ysize + y
- (x, y) = self.check_xy((x, y))
-
- if (
- self._im.mode in ("P", "PA")
- and isinstance(color, (list, tuple))
- and len(color) in [3, 4]
- ):
- # RGB or RGBA value for a P or PA image
- if self._im.mode == "PA":
- alpha = color[3] if len(color) == 4 else 255
- color = color[:3]
- color = self._palette.getcolor(color, self._img)
- if self._im.mode == "PA":
- color = (color, alpha)
-
- return self.set_pixel(x, y, color)
-
- def __getitem__(self, xy):
- """
- Returns the pixel at x,y. The pixel is returned as a single
- value for single band images or a tuple for multiple band
- images
-
- :param xy: The pixel coordinate, given as (x, y). See
- :ref:`coordinate-system`.
- :returns: a pixel value for single band images, a tuple of
- pixel values for multiband images.
- """
- (x, y) = xy
- if x < 0:
- x = self.xsize + x
- if y < 0:
- y = self.ysize + y
- (x, y) = self.check_xy((x, y))
- return self.get_pixel(x, y)
-
- putpixel = __setitem__
- getpixel = __getitem__
-
- def check_xy(self, xy):
- (x, y) = xy
- if not (0 <= x < self.xsize and 0 <= y < self.ysize):
- msg = "pixel location out of range"
- raise ValueError(msg)
- return xy
-
-
-class _PyAccess32_2(PyAccess):
- """PA, LA, stored in first and last bytes of a 32 bit word"""
-
- def _post_init(self, *args, **kwargs):
- self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32)
-
- def get_pixel(self, x, y):
- pixel = self.pixels[y][x]
- return pixel.r, pixel.a
-
- def set_pixel(self, x, y, color):
- pixel = self.pixels[y][x]
- # tuple
- pixel.r = min(color[0], 255)
- pixel.a = min(color[1], 255)
-
-
-class _PyAccess32_3(PyAccess):
- """RGB and friends, stored in the first three bytes of a 32 bit word"""
-
- def _post_init(self, *args, **kwargs):
- self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32)
-
- def get_pixel(self, x, y):
- pixel = self.pixels[y][x]
- return pixel.r, pixel.g, pixel.b
-
- def set_pixel(self, x, y, color):
- pixel = self.pixels[y][x]
- # tuple
- pixel.r = min(color[0], 255)
- pixel.g = min(color[1], 255)
- pixel.b = min(color[2], 255)
- pixel.a = 255
-
-
-class _PyAccess32_4(PyAccess):
- """RGBA etc, all 4 bytes of a 32 bit word"""
-
- def _post_init(self, *args, **kwargs):
- self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32)
-
- def get_pixel(self, x, y):
- pixel = self.pixels[y][x]
- return pixel.r, pixel.g, pixel.b, pixel.a
-
- def set_pixel(self, x, y, color):
- pixel = self.pixels[y][x]
- # tuple
- pixel.r = min(color[0], 255)
- pixel.g = min(color[1], 255)
- pixel.b = min(color[2], 255)
- pixel.a = min(color[3], 255)
-
-
-class _PyAccess8(PyAccess):
- """1, L, P, 8 bit images stored as uint8"""
-
- def _post_init(self, *args, **kwargs):
- self.pixels = self.image8
-
- def get_pixel(self, x, y):
- return self.pixels[y][x]
-
- def set_pixel(self, x, y, color):
- try:
- # integer
- self.pixels[y][x] = min(color, 255)
- except TypeError:
- # tuple
- self.pixels[y][x] = min(color[0], 255)
-
-
-class _PyAccessI16_N(PyAccess):
- """I;16 access, native bitendian without conversion"""
-
- def _post_init(self, *args, **kwargs):
- self.pixels = ffi.cast("unsigned short **", self.image)
-
- def get_pixel(self, x, y):
- return self.pixels[y][x]
-
- def set_pixel(self, x, y, color):
- try:
- # integer
- self.pixels[y][x] = min(color, 65535)
- except TypeError:
- # tuple
- self.pixels[y][x] = min(color[0], 65535)
-
-
-class _PyAccessI16_L(PyAccess):
- """I;16L access, with conversion"""
-
- def _post_init(self, *args, **kwargs):
- self.pixels = ffi.cast("struct Pixel_I16 **", self.image)
-
- def get_pixel(self, x, y):
- pixel = self.pixels[y][x]
- return pixel.l + pixel.r * 256
-
- def set_pixel(self, x, y, color):
- pixel = self.pixels[y][x]
- try:
- color = min(color, 65535)
- except TypeError:
- color = min(color[0], 65535)
-
- pixel.l = color & 0xFF # noqa: E741
- pixel.r = color >> 8
-
-
-class _PyAccessI16_B(PyAccess):
- """I;16B access, with conversion"""
-
- def _post_init(self, *args, **kwargs):
- self.pixels = ffi.cast("struct Pixel_I16 **", self.image)
-
- def get_pixel(self, x, y):
- pixel = self.pixels[y][x]
- return pixel.l * 256 + pixel.r
-
- def set_pixel(self, x, y, color):
- pixel = self.pixels[y][x]
- try:
- color = min(color, 65535)
- except Exception:
- color = min(color[0], 65535)
-
- pixel.l = color >> 8 # noqa: E741
- pixel.r = color & 0xFF
-
-
-class _PyAccessI32_N(PyAccess):
- """Signed Int32 access, native endian"""
-
- def _post_init(self, *args, **kwargs):
- self.pixels = self.image32
-
- def get_pixel(self, x, y):
- return self.pixels[y][x]
-
- def set_pixel(self, x, y, color):
- self.pixels[y][x] = color
-
-
-class _PyAccessI32_Swap(PyAccess):
- """I;32L/B access, with byteswapping conversion"""
-
- def _post_init(self, *args, **kwargs):
- self.pixels = self.image32
-
- def reverse(self, i):
- orig = ffi.new("int *", i)
- chars = ffi.cast("unsigned char *", orig)
- chars[0], chars[1], chars[2], chars[3] = chars[3], chars[2], chars[1], chars[0]
- return ffi.cast("int *", chars)[0]
-
- def get_pixel(self, x, y):
- return self.reverse(self.pixels[y][x])
-
- def set_pixel(self, x, y, color):
- self.pixels[y][x] = self.reverse(color)
-
-
-class _PyAccessF(PyAccess):
- """32 bit float access"""
-
- def _post_init(self, *args, **kwargs):
- self.pixels = ffi.cast("float **", self.image32)
-
- def get_pixel(self, x, y):
- return self.pixels[y][x]
-
- def set_pixel(self, x, y, color):
- try:
- # not a tuple
- self.pixels[y][x] = color
- except TypeError:
- # tuple
- self.pixels[y][x] = color[0]
-
-
-mode_map = {
- "1": _PyAccess8,
- "L": _PyAccess8,
- "P": _PyAccess8,
- "I;16N": _PyAccessI16_N,
- "LA": _PyAccess32_2,
- "La": _PyAccess32_2,
- "PA": _PyAccess32_2,
- "RGB": _PyAccess32_3,
- "LAB": _PyAccess32_3,
- "HSV": _PyAccess32_3,
- "YCbCr": _PyAccess32_3,
- "RGBA": _PyAccess32_4,
- "RGBa": _PyAccess32_4,
- "RGBX": _PyAccess32_4,
- "CMYK": _PyAccess32_4,
- "F": _PyAccessF,
- "I": _PyAccessI32_N,
-}
-
-if sys.byteorder == "little":
- mode_map["I;16"] = _PyAccessI16_N
- mode_map["I;16L"] = _PyAccessI16_N
- mode_map["I;16B"] = _PyAccessI16_B
-
- mode_map["I;32L"] = _PyAccessI32_N
- mode_map["I;32B"] = _PyAccessI32_Swap
-else:
- mode_map["I;16"] = _PyAccessI16_L
- mode_map["I;16L"] = _PyAccessI16_L
- mode_map["I;16B"] = _PyAccessI16_N
-
- mode_map["I;32L"] = _PyAccessI32_Swap
- mode_map["I;32B"] = _PyAccessI32_N
-
-
-def new(img, readonly=False):
- access_type = mode_map.get(img.mode, None)
- if not access_type:
- logger.debug("PyAccess Not Implemented: %s", img.mode)
- return None
- return access_type(img, readonly)
diff --git a/spaces/candlend/vits-hoshimi/vits/transforms.py b/spaces/candlend/vits-hoshimi/vits/transforms.py
deleted file mode 100644
index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000
--- a/spaces/candlend/vits-hoshimi/vits/transforms.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import numpy as np
-
-
-DEFAULT_MIN_BIN_WIDTH = 1e-3
-DEFAULT_MIN_BIN_HEIGHT = 1e-3
-DEFAULT_MIN_DERIVATIVE = 1e-3
-
-
-def piecewise_rational_quadratic_transform(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails=None,
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
-
- if tails is None:
- spline_fn = rational_quadratic_spline
- spline_kwargs = {}
- else:
- spline_fn = unconstrained_rational_quadratic_spline
- spline_kwargs = {
- 'tails': tails,
- 'tail_bound': tail_bound
- }
-
- outputs, logabsdet = spline_fn(
- inputs=inputs,
- unnormalized_widths=unnormalized_widths,
- unnormalized_heights=unnormalized_heights,
- unnormalized_derivatives=unnormalized_derivatives,
- inverse=inverse,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- **spline_kwargs
- )
- return outputs, logabsdet
-
-
-def searchsorted(bin_locations, inputs, eps=1e-6):
- bin_locations[..., -1] += eps
- return torch.sum(
- inputs[..., None] >= bin_locations,
- dim=-1
- ) - 1
-
-
-def unconstrained_rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails='linear',
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
- outside_interval_mask = ~inside_interval_mask
-
- outputs = torch.zeros_like(inputs)
- logabsdet = torch.zeros_like(inputs)
-
- if tails == 'linear':
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
- constant = np.log(np.exp(1 - min_derivative) - 1)
- unnormalized_derivatives[..., 0] = constant
- unnormalized_derivatives[..., -1] = constant
-
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
- logabsdet[outside_interval_mask] = 0
- else:
- raise RuntimeError('{} tails are not implemented.'.format(tails))
-
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
- inputs=inputs[inside_interval_mask],
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
- inverse=inverse,
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative
- )
-
- return outputs, logabsdet
-
-def rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- left=0., right=1., bottom=0., top=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- if torch.min(inputs) < left or torch.max(inputs) > right:
- raise ValueError('Input to a transform is not within its domain')
-
- num_bins = unnormalized_widths.shape[-1]
-
- if min_bin_width * num_bins > 1.0:
- raise ValueError('Minimal bin width too large for the number of bins')
- if min_bin_height * num_bins > 1.0:
- raise ValueError('Minimal bin height too large for the number of bins')
-
- widths = F.softmax(unnormalized_widths, dim=-1)
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
- cumwidths = torch.cumsum(widths, dim=-1)
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
- cumwidths = (right - left) * cumwidths + left
- cumwidths[..., 0] = left
- cumwidths[..., -1] = right
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
-
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
-
- heights = F.softmax(unnormalized_heights, dim=-1)
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
- cumheights = torch.cumsum(heights, dim=-1)
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
- cumheights = (top - bottom) * cumheights + bottom
- cumheights[..., 0] = bottom
- cumheights[..., -1] = top
- heights = cumheights[..., 1:] - cumheights[..., :-1]
-
- if inverse:
- bin_idx = searchsorted(cumheights, inputs)[..., None]
- else:
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
-
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
-
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
- delta = heights / widths
- input_delta = delta.gather(-1, bin_idx)[..., 0]
-
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
-
- input_heights = heights.gather(-1, bin_idx)[..., 0]
-
- if inverse:
- a = (((inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta)
- + input_heights * (input_delta - input_derivatives)))
- b = (input_heights * input_derivatives
- - (inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta))
- c = - input_delta * (inputs - input_cumheights)
-
- discriminant = b.pow(2) - 4 * a * c
- assert (discriminant >= 0).all()
-
- root = (2 * c) / (-b - torch.sqrt(discriminant))
- outputs = root * input_bin_widths + input_cumwidths
-
- theta_one_minus_theta = root * (1 - root)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - root).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, -logabsdet
- else:
- theta = (inputs - input_cumwidths) / input_bin_widths
- theta_one_minus_theta = theta * (1 - theta)
-
- numerator = input_heights * (input_delta * theta.pow(2)
- + input_derivatives * theta_one_minus_theta)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- outputs = input_cumheights + numerator / denominator
-
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - theta).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, logabsdet
diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/export/caffe2_modeling.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/export/caffe2_modeling.py
deleted file mode 100644
index e00de4ad28fd81483c9e1161394b7b508fdad91f..0000000000000000000000000000000000000000
--- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/export/caffe2_modeling.py
+++ /dev/null
@@ -1,419 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import functools
-import io
-import struct
-import types
-import torch
-
-from detectron2.modeling import meta_arch
-from detectron2.modeling.box_regression import Box2BoxTransform
-from detectron2.modeling.roi_heads import keypoint_head
-from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
-
-from .c10 import Caffe2Compatible
-from .caffe2_patch import ROIHeadsPatcher, patch_generalized_rcnn
-from .shared import (
- alias,
- check_set_pb_arg,
- get_pb_arg_floats,
- get_pb_arg_valf,
- get_pb_arg_vali,
- get_pb_arg_vals,
- mock_torch_nn_functional_interpolate,
-)
-
-
-def assemble_rcnn_outputs_by_name(image_sizes, tensor_outputs, force_mask_on=False):
- """
- A function to assemble caffe2 model's outputs (i.e. Dict[str, Tensor])
- to detectron2's format (i.e. list of Instances instance).
- This only works when the model follows the Caffe2 detectron's naming convention.
-
- Args:
- image_sizes (List[List[int, int]]): [H, W] of every image.
- tensor_outputs (Dict[str, Tensor]): external_output to its tensor.
-
- force_mask_on (Bool): if true, the it make sure there'll be pred_masks even
- if the mask is not found from tensor_outputs (usually due to model crash)
- """
-
- results = [Instances(image_size) for image_size in image_sizes]
-
- batch_splits = tensor_outputs.get("batch_splits", None)
- if batch_splits:
- raise NotImplementedError()
- assert len(image_sizes) == 1
- result = results[0]
-
- bbox_nms = tensor_outputs["bbox_nms"]
- score_nms = tensor_outputs["score_nms"]
- class_nms = tensor_outputs["class_nms"]
- # Detection will always success because Conv support 0-batch
- assert bbox_nms is not None
- assert score_nms is not None
- assert class_nms is not None
- if bbox_nms.shape[1] == 5:
- result.pred_boxes = RotatedBoxes(bbox_nms)
- else:
- result.pred_boxes = Boxes(bbox_nms)
- result.scores = score_nms
- result.pred_classes = class_nms.to(torch.int64)
-
- mask_fcn_probs = tensor_outputs.get("mask_fcn_probs", None)
- if mask_fcn_probs is not None:
- # finish the mask pred
- mask_probs_pred = mask_fcn_probs
- num_masks = mask_probs_pred.shape[0]
- class_pred = result.pred_classes
- indices = torch.arange(num_masks, device=class_pred.device)
- mask_probs_pred = mask_probs_pred[indices, class_pred][:, None]
- result.pred_masks = mask_probs_pred
- elif force_mask_on:
- # NOTE: there's no way to know the height/width of mask here, it won't be
- # used anyway when batch size is 0, so just set them to 0.
- result.pred_masks = torch.zeros([0, 1, 0, 0], dtype=torch.uint8)
-
- keypoints_out = tensor_outputs.get("keypoints_out", None)
- kps_score = tensor_outputs.get("kps_score", None)
- if keypoints_out is not None:
- # keypoints_out: [N, 4, #kypoints], where 4 is in order of (x, y, score, prob)
- keypoints_tensor = keypoints_out
- # NOTE: it's possible that prob is not calculated if "should_output_softmax"
- # is set to False in HeatmapMaxKeypoint, so just using raw score, seems
- # it doesn't affect mAP. TODO: check more carefully.
- keypoint_xyp = keypoints_tensor.transpose(1, 2)[:, :, [0, 1, 2]]
- result.pred_keypoints = keypoint_xyp
- elif kps_score is not None:
- # keypoint heatmap to sparse data structure
- pred_keypoint_logits = kps_score
- keypoint_head.keypoint_rcnn_inference(pred_keypoint_logits, [result])
-
- return results
-
-
-def _cast_to_f32(f64):
- return struct.unpack("f", struct.pack("f", f64))[0]
-
-
-def set_caffe2_compatible_tensor_mode(model, enable=True):
- def _fn(m):
- if isinstance(m, Caffe2Compatible):
- m.tensor_mode = enable
-
- model.apply(_fn)
-
-
-def convert_batched_inputs_to_c2_format(batched_inputs, size_divisibility, device):
- """
- See get_caffe2_inputs() below.
- """
- assert all(isinstance(x, dict) for x in batched_inputs)
- assert all(x["image"].dim() == 3 for x in batched_inputs)
-
- images = [x["image"] for x in batched_inputs]
- images = ImageList.from_tensors(images, size_divisibility)
-
- im_info = []
- for input_per_image, image_size in zip(batched_inputs, images.image_sizes):
- target_height = input_per_image.get("height", image_size[0])
- target_width = input_per_image.get("width", image_size[1]) # noqa
- # NOTE: The scale inside im_info is kept as convention and for providing
- # post-processing information if further processing is needed. For
- # current Caffe2 model definitions that don't include post-processing inside
- # the model, this number is not used.
- # NOTE: There can be a slight difference between width and height
- # scales, using a single number can results in numerical difference
- # compared with D2's post-processing.
- scale = target_height / image_size[0]
- im_info.append([image_size[0], image_size[1], scale])
- im_info = torch.Tensor(im_info)
-
- return images.tensor.to(device), im_info.to(device)
-
-
-class Caffe2MetaArch(Caffe2Compatible, torch.nn.Module):
- """
- Base class for caffe2-compatible implementation of a meta architecture.
- The forward is traceable and its traced graph can be converted to caffe2
- graph through ONNX.
- """
-
- def __init__(self, cfg, torch_model):
- """
- Args:
- cfg (CfgNode):
- torch_model (nn.Module): the detectron2 model (meta_arch) to be
- converted.
- """
- super().__init__()
- self._wrapped_model = torch_model
- self.eval()
- set_caffe2_compatible_tensor_mode(self, True)
-
- def get_caffe2_inputs(self, batched_inputs):
- """
- Convert pytorch-style structured inputs to caffe2-style inputs that
- are tuples of tensors.
-
- Args:
- batched_inputs (list[dict]): inputs to a detectron2 model
- in its standard format. Each dict has "image" (CHW tensor), and optionally
- "height" and "width".
-
- Returns:
- tuple[Tensor]:
- tuple of tensors that will be the inputs to the
- :meth:`forward` method. For existing models, the first
- is an NCHW tensor (padded and batched); the second is
- a im_info Nx3 tensor, where the rows are
- (height, width, unused legacy parameter)
- """
- return convert_batched_inputs_to_c2_format(
- batched_inputs,
- self._wrapped_model.backbone.size_divisibility,
- self._wrapped_model.device,
- )
-
- def encode_additional_info(self, predict_net, init_net):
- """
- Save extra metadata that will be used by inference in the output protobuf.
- """
- pass
-
- def forward(self, inputs):
- """
- Run the forward in caffe2-style. It has to use caffe2-compatible ops
- and the method will be used for tracing.
-
- Args:
- inputs (tuple[Tensor]): inputs defined by :meth:`get_caffe2_input`.
- They will be the inputs of the converted caffe2 graph.
-
- Returns:
- tuple[Tensor]: output tensors. They will be the outputs of the
- converted caffe2 graph.
- """
- raise NotImplementedError
-
- def _caffe2_preprocess_image(self, inputs):
- """
- Caffe2 implementation of preprocess_image, which is called inside each MetaArch's forward.
- It normalizes the input images, and the final caffe2 graph assumes the
- inputs have been batched already.
- """
- data, im_info = inputs
- data = alias(data, "data")
- im_info = alias(im_info, "im_info")
- mean, std = self._wrapped_model.pixel_mean, self._wrapped_model.pixel_std
- normalized_data = (data - mean) / std
- normalized_data = alias(normalized_data, "normalized_data")
-
- # Pack (data, im_info) into ImageList which is recognized by self.inference.
- images = ImageList(tensor=normalized_data, image_sizes=im_info)
- return images
-
- @staticmethod
- def get_outputs_converter(predict_net, init_net):
- """
- Creates a function that converts outputs of the caffe2 model to
- detectron2's standard format.
- The function uses information in `predict_net` and `init_net` that are
- available at inferene time. Therefore the function logic can be used in inference.
-
- The returned function has the following signature:
-
- def convert(batched_inputs, c2_inputs, c2_results) -> detectron2_outputs
-
- Where
-
- * batched_inputs (list[dict]): the original input format of the meta arch
- * c2_inputs (tuple[Tensor]): the caffe2 inputs.
- * c2_results (dict[str, Tensor]): the caffe2 output format,
- corresponding to the outputs of the :meth:`forward` function.
- * detectron2_outputs: the original output format of the meta arch.
-
- This function can be used to compare the outputs of the original meta arch and
- the converted caffe2 graph.
-
- Returns:
- callable: a callable of the above signature.
- """
- raise NotImplementedError
-
-
-class Caffe2GeneralizedRCNN(Caffe2MetaArch):
- def __init__(self, cfg, torch_model):
- assert isinstance(torch_model, meta_arch.GeneralizedRCNN)
- torch_model = patch_generalized_rcnn(torch_model)
- super().__init__(cfg, torch_model)
-
- try:
- use_heatmap_max_keypoint = cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT
- except AttributeError:
- use_heatmap_max_keypoint = False
- self.roi_heads_patcher = ROIHeadsPatcher(
- self._wrapped_model.roi_heads, use_heatmap_max_keypoint
- )
-
- def encode_additional_info(self, predict_net, init_net):
- size_divisibility = self._wrapped_model.backbone.size_divisibility
- check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
- check_set_pb_arg(
- predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
- )
- check_set_pb_arg(predict_net, "meta_architecture", "s", b"GeneralizedRCNN")
-
- @mock_torch_nn_functional_interpolate()
- def forward(self, inputs):
- if not self.tensor_mode:
- return self._wrapped_model.inference(inputs)
- images = self._caffe2_preprocess_image(inputs)
- features = self._wrapped_model.backbone(images.tensor)
- proposals, _ = self._wrapped_model.proposal_generator(images, features)
- with self.roi_heads_patcher.mock_roi_heads():
- detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals)
- return tuple(detector_results[0].flatten())
-
- @staticmethod
- def get_outputs_converter(predict_net, init_net):
- def f(batched_inputs, c2_inputs, c2_results):
- _, im_info = c2_inputs
- image_sizes = [[int(im[0]), int(im[1])] for im in im_info]
- results = assemble_rcnn_outputs_by_name(image_sizes, c2_results)
- return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
-
- return f
-
-
-class Caffe2RetinaNet(Caffe2MetaArch):
- def __init__(self, cfg, torch_model):
- assert isinstance(torch_model, meta_arch.RetinaNet)
- super().__init__(cfg, torch_model)
-
- @mock_torch_nn_functional_interpolate()
- def forward(self, inputs):
- assert self.tensor_mode
- images = self._caffe2_preprocess_image(inputs)
-
- # explicitly return the images sizes to avoid removing "im_info" by ONNX
- # since it's not used in the forward path
- return_tensors = [images.image_sizes]
-
- features = self._wrapped_model.backbone(images.tensor)
- features = [features[f] for f in self._wrapped_model.head_in_features]
- for i, feature_i in enumerate(features):
- features[i] = alias(feature_i, "feature_{}".format(i), is_backward=True)
- return_tensors.append(features[i])
-
- pred_logits, pred_anchor_deltas = self._wrapped_model.head(features)
- for i, (box_cls_i, box_delta_i) in enumerate(zip(pred_logits, pred_anchor_deltas)):
- return_tensors.append(alias(box_cls_i, "box_cls_{}".format(i)))
- return_tensors.append(alias(box_delta_i, "box_delta_{}".format(i)))
-
- return tuple(return_tensors)
-
- def encode_additional_info(self, predict_net, init_net):
- size_divisibility = self._wrapped_model.backbone.size_divisibility
- check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
- check_set_pb_arg(
- predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
- )
- check_set_pb_arg(predict_net, "meta_architecture", "s", b"RetinaNet")
-
- # Inference parameters:
- check_set_pb_arg(
- predict_net, "score_threshold", "f", _cast_to_f32(self._wrapped_model.test_score_thresh)
- )
- check_set_pb_arg(
- predict_net, "topk_candidates", "i", self._wrapped_model.test_topk_candidates
- )
- check_set_pb_arg(
- predict_net, "nms_threshold", "f", _cast_to_f32(self._wrapped_model.test_nms_thresh)
- )
- check_set_pb_arg(
- predict_net,
- "max_detections_per_image",
- "i",
- self._wrapped_model.max_detections_per_image,
- )
-
- check_set_pb_arg(
- predict_net,
- "bbox_reg_weights",
- "floats",
- [_cast_to_f32(w) for w in self._wrapped_model.box2box_transform.weights],
- )
- self._encode_anchor_generator_cfg(predict_net)
-
- def _encode_anchor_generator_cfg(self, predict_net):
- # serialize anchor_generator for future use
- serialized_anchor_generator = io.BytesIO()
- torch.save(self._wrapped_model.anchor_generator, serialized_anchor_generator)
- # Ideally we can put anchor generating inside the model, then we don't
- # need to store this information.
- bytes = serialized_anchor_generator.getvalue()
- check_set_pb_arg(predict_net, "serialized_anchor_generator", "s", bytes)
-
- @staticmethod
- def get_outputs_converter(predict_net, init_net):
- self = types.SimpleNamespace()
- serialized_anchor_generator = io.BytesIO(
- get_pb_arg_vals(predict_net, "serialized_anchor_generator", None)
- )
- self.anchor_generator = torch.load(serialized_anchor_generator)
- bbox_reg_weights = get_pb_arg_floats(predict_net, "bbox_reg_weights", None)
- self.box2box_transform = Box2BoxTransform(weights=tuple(bbox_reg_weights))
- self.test_score_thresh = get_pb_arg_valf(predict_net, "score_threshold", None)
- self.test_topk_candidates = get_pb_arg_vali(predict_net, "topk_candidates", None)
- self.test_nms_thresh = get_pb_arg_valf(predict_net, "nms_threshold", None)
- self.max_detections_per_image = get_pb_arg_vali(
- predict_net, "max_detections_per_image", None
- )
-
- # hack to reuse inference code from RetinaNet
- for meth in [
- "forward_inference",
- "inference_single_image",
- "_transpose_dense_predictions",
- "_decode_multi_level_predictions",
- "_decode_per_level_predictions",
- ]:
- setattr(self, meth, functools.partial(getattr(meta_arch.RetinaNet, meth), self))
-
- def f(batched_inputs, c2_inputs, c2_results):
- _, im_info = c2_inputs
- image_sizes = [[int(im[0]), int(im[1])] for im in im_info]
- dummy_images = ImageList(
- torch.randn(
- (
- len(im_info),
- 3,
- )
- + tuple(image_sizes[0])
- ),
- image_sizes,
- )
-
- num_features = len([x for x in c2_results.keys() if x.startswith("box_cls_")])
- pred_logits = [c2_results["box_cls_{}".format(i)] for i in range(num_features)]
- pred_anchor_deltas = [c2_results["box_delta_{}".format(i)] for i in range(num_features)]
-
- # For each feature level, feature should have the same batch size and
- # spatial dimension as the box_cls and box_delta.
- dummy_features = [x.clone()[:, 0:0, :, :] for x in pred_logits]
- # self.num_classess can be inferred
- self.num_classes = pred_logits[0].shape[1] // (pred_anchor_deltas[0].shape[1] // 4)
-
- results = self.forward_inference(
- dummy_images, dummy_features, [pred_logits, pred_anchor_deltas]
- )
- return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
-
- return f
-
-
-META_ARCH_CAFFE2_EXPORT_TYPE_MAP = {
- "GeneralizedRCNN": Caffe2GeneralizedRCNN,
- "RetinaNet": Caffe2RetinaNet,
-}
diff --git a/spaces/cffl/Exploring_Intelligent_Writing_Assistance/apps/data_utils.py b/spaces/cffl/Exploring_Intelligent_Writing_Assistance/apps/data_utils.py
deleted file mode 100644
index 6ff20283c938dbfcdbed483af7ca384476c56598..0000000000000000000000000000000000000000
--- a/spaces/cffl/Exploring_Intelligent_Writing_Assistance/apps/data_utils.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# ###########################################################################
-#
-# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
-# (C) Cloudera, Inc. 2022
-# All rights reserved.
-#
-# Applicable Open Source License: Apache 2.0
-#
-# NOTE: Cloudera open source products are modular software products
-# made up of hundreds of individual components, each of which was
-# individually copyrighted. Each Cloudera open source product is a
-# collective work under U.S. Copyright Law. Your license to use the
-# collective work is as provided in your written agreement with
-# Cloudera. Used apart from the collective work, this file is
-# licensed for your use pursuant to the open source license
-# identified above.
-#
-# This code is provided to you pursuant a written agreement with
-# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
-# this code. If you do not have a written agreement with Cloudera nor
-# with an authorized and properly licensed third party, you do not
-# have any rights to access nor to use this code.
-#
-# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
-# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
-# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
-# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
-# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
-# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
-# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
-# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
-# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
-# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
-# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
-# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
-# DATA.
-#
-# ###########################################################################
-
-import os
-from typing import List
-from collections import defaultdict
-from dataclasses import dataclass
-
-import numpy as np
-
-
-@dataclass
-class StyleAttributeData:
- source_attribute: str
- target_attribute: str
- examples: List[str]
- cls_model_path: str
- seq2seq_model_path: str
- sbert_model_path: str = "sentence-transformers/all-MiniLM-L6-v2"
- hf_base_url: str = "https://huggingface.co/"
-
- def __post_init__(self):
- self._make_attribute_selection_string()
- self._make_attribute_AND_string()
- self._make_attribute_THAN_string()
-
- def _make_attribute_selection_string(self):
- self.attribute_selecting_string = (
- f"{self.source_attribute}-{self.target_attribute}"
- )
-
- def _make_attribute_AND_string(self):
- self.attribute_AND_string = (
- f"**{self.source_attribute}** and **{self.target_attribute}**"
- )
-
- def _make_attribute_THAN_string(self):
- self.attribute_THAN_string = (
- f"**{self.source_attribute}** than **{self.target_attribute}**"
- )
-
- def build_model_url(self, model_type: str):
- """
- Build a complete HuggingFace url for the given `model_type`.
-
- Args:
- model_type (str): "cls", "seq2seq", "sbert"
- """
- attr_name = f"{model_type}_model_path"
- return os.path.join(self.hf_base_url, getattr(self, attr_name))
-
-
-# instantiate data classes & collect all data class instances
-DATA_PACKET = {
- "subjective-to-neutral": StyleAttributeData(
- source_attribute="subjective",
- target_attribute="neutral",
- examples=[
- "another strikingly elegant four-door design for the bentley s3 continental came from james.",
- "the band plays an engaging and contagious rhythm known as brega pop and calypso.",
- "chemical abstracts service (cas), a prominent division of the american chemical society, is the world's leading source of chemical information.",
- "the final fight scene is with the martial arts great, master ninja sho kosugi.",
- ],
- cls_model_path="cffl/bert-base-styleclassification-subjective-neutral",
- seq2seq_model_path="cffl/bart-base-styletransfer-subjective-to-neutral",
- ),
- "informal-to-formal": StyleAttributeData(
- source_attribute="informal",
- target_attribute="formal",
- examples=[
- "that was funny LOL",
- "btw - ur avatar looks familiar",
- "i loooooooooooooooooooooooove going to the movies.",
- "haha, thatd be dope",
- ],
- cls_model_path="cointegrated/roberta-base-formality",
- seq2seq_model_path="prithivida/informal_to_formal_styletransfer",
- ),
-}
-
-
-def format_classification_results(id2label: dict, cls_result):
- """
- Formats classification output to be plotted using Altair.
-
- Args:
- id2label (dict): Transformer model's label dictionary
- cls_result (List): Classification pipeline output
- """
-
- labels = [v for k, v in id2label.items()]
-
- format_cls_result = []
-
- for i in range(len(labels)):
- temp = defaultdict()
- temp["type"] = labels[i].capitalize()
- temp["value"] = round(cls_result[0]["distribution"][i], 4)
-
- if i == 0:
- temp["percentage_start"] = 0
- temp["percentage_end"] = temp["value"]
- else:
- temp["percentage_start"] = 1 - temp["value"]
- temp["percentage_end"] = 1
-
- format_cls_result.append(temp)
-
- return format_cls_result
-
-
-def string_to_list_string(text: str):
- return np.expand_dims(np.array(text), axis=0).tolist()
diff --git a/spaces/chendl/compositional_test/transformers/src/transformers/models/align/modeling_align.py b/spaces/chendl/compositional_test/transformers/src/transformers/models/align/modeling_align.py
deleted file mode 100644
index 09ee6eca62650efad7818c9cfdc8a0a83b8aa4f6..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/src/transformers/models/align/modeling_align.py
+++ /dev/null
@@ -1,1642 +0,0 @@
-# coding=utf-8
-# Copyright 2023 The Google Research Team Authors and The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" PyTorch ALIGN model."""
-
-import math
-from dataclasses import dataclass
-from typing import Any, Optional, Tuple, Union
-
-import torch
-import torch.utils.checkpoint
-from torch import nn
-
-from ...activations import ACT2FN
-from ...modeling_outputs import (
- BaseModelOutputWithNoAttention,
- BaseModelOutputWithPastAndCrossAttentions,
- BaseModelOutputWithPoolingAndCrossAttentions,
- BaseModelOutputWithPoolingAndNoAttention,
-)
-from ...modeling_utils import PreTrainedModel
-from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
-from ...utils import (
- ModelOutput,
- add_start_docstrings,
- add_start_docstrings_to_model_forward,
- logging,
- replace_return_docstrings,
-)
-from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
-
-
-logger = logging.get_logger(__name__)
-
-_CHECKPOINT_FOR_DOC = "kakaobrain/align-base"
-_CONFIG_FOR_DOC = "AlignConfig"
-
-
-ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST = [
- "kakaobrain/align-base",
- # See all ALIGN models at https://huggingface.co/models?filter=align
-]
-
-
-ALIGN_START_DOCSTRING = r"""
- This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
- library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
- etc.)
-
- This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
- Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
- and behavior.
-
- Parameters:
- config ([`AlignConfig`]): Model configuration class with all the parameters of the model.
- Initializing with a config file does not load the weights associated with the model, only the
- configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
-"""
-
-ALIGN_TEXT_INPUTS_DOCSTRING = r"""
- Args:
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
- Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
- it.
-
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
- [`PreTrainedTokenizer.__call__`] for details.
-
- [What are input IDs?](../glossary#input-ids)
- attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
-
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
-
- [What are attention masks?](../glossary#attention-mask)
- position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
- config.max_position_embeddings - 1]`.
-
- [What are position IDs?](../glossary#position-ids)
- token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
- Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
- 1]`:
-
- - 0 corresponds to a *sentence A* token,
- - 1 corresponds to a *sentence B* token.
-
- [What are token type IDs?](../glossary#token-type-ids)
- head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
- Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
-
- - 1 indicates the head is **not masked**,
- - 0 indicates the head is **masked**.
-
- inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
- is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
- model's internal embedding lookup matrix.
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
- tensors for more detail.
- output_hidden_states (`bool`, *optional*):
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
- more detail.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
-"""
-
-ALIGN_VISION_INPUTS_DOCSTRING = r"""
- Args:
- pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
- Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
- [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details.
- output_hidden_states (`bool`, *optional*):
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
- more detail.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
-"""
-
-ALIGN_INPUTS_DOCSTRING = r"""
- Args:
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
- Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
- it.
-
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
- [`PreTrainedTokenizer.__call__`] for details.
-
- [What are input IDs?](../glossary#input-ids)
- attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
-
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
-
- [What are attention masks?](../glossary#attention-mask)
- position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
- config.max_position_embeddings - 1]`.
-
- [What are position IDs?](../glossary#position-ids)
- token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
- Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
- 1]`:
-
- - 0 corresponds to a *sentence A* token,
- - 1 corresponds to a *sentence B* token.
-
- [What are token type IDs?](../glossary#token-type-ids)
- head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
- Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
-
- - 1 indicates the head is **not masked**,
- - 0 indicates the head is **masked**.
-
- inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
- is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
- model's internal embedding lookup matrix.
- pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
- Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
- [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details.
- return_loss (`bool`, *optional*):
- Whether or not to return the contrastive loss.
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
- tensors for more detail.
- output_hidden_states (`bool`, *optional*):
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
- more detail.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
-"""
-
-
-@dataclass
-class AlignVisionModelOutput(ModelOutput):
- """
- Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
-
- Args:
- image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
- The image embeddings obtained by applying the projection layer to the pooler_output.
- last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
- Sequence of hidden-states at the output of the last layer of the model.
- hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
- Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
- one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
-
- Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
- """
-
- image_embeds: Optional[torch.FloatTensor] = None
- last_hidden_state: torch.FloatTensor = None
- hidden_states: Optional[Tuple[torch.FloatTensor]] = None
-
-
-@dataclass
-class AlignTextModelOutput(ModelOutput):
- """
- Base class for text model's outputs that also contains a pooling of the last hidden states.
-
- Args:
- text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
- The text embeddings obtained by applying the projection layer to the pooler_output.
- last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
- Sequence of hidden-states at the output of the last layer of the model.
- hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
- Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
- one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
-
- Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
- attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
- Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
- sequence_length)`.
-
- Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
- heads.
- """
-
- text_embeds: Optional[torch.FloatTensor] = None
- last_hidden_state: torch.FloatTensor = None
- hidden_states: Optional[Tuple[torch.FloatTensor]] = None
- attentions: Optional[Tuple[torch.FloatTensor]] = None
-
-
-@dataclass
-class AlignOutput(ModelOutput):
- """
- Args:
- loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
- Contrastive loss for image-text similarity.
- logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
- The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
- similarity scores.
- logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
- The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
- similarity scores.
- text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
- The text embeddings obtained by applying the projection layer to the pooled output of [`AlignTextModel`].
- image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
- The output of [`AlignVisionModel`].
- text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
- The output of the [`AlignTextModel`].
- vision_model_output(`BaseModelOutputWithPoolingAndNoAttention`):
- The output of the [`AlignVisionModel`].
- """
-
- loss: Optional[torch.FloatTensor] = None
- logits_per_image: torch.FloatTensor = None
- logits_per_text: torch.FloatTensor = None
- text_embeds: torch.FloatTensor = None
- image_embeds: torch.FloatTensor = None
- text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
- vision_model_output: BaseModelOutputWithPoolingAndNoAttention = None
-
- def to_tuple(self) -> Tuple[Any]:
- return tuple(
- self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
- for k in self.keys()
- )
-
-
-# contrastive loss function, adapted from
-# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
-def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
- return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device), label_smoothing=0.1)
-
-
-def align_loss(similarity: torch.Tensor) -> torch.Tensor:
- caption_loss = contrastive_loss(similarity)
- image_loss = contrastive_loss(similarity.t())
- return (caption_loss + image_loss) / 2.0
-
-
-# Copied from transformers.models.efficientnet.modeling_efficientnet.round_filters with EfficientNet -> AlignVision
-def round_filters(config: AlignVisionConfig, num_channels: int):
- r"""
- Round number of filters based on depth multiplier.
- """
- divisor = config.depth_divisor
- num_channels *= config.width_coefficient
- new_dim = max(divisor, int(num_channels + divisor / 2) // divisor * divisor)
-
- # Make sure that round down does not go down by more than 10%.
- if new_dim < 0.9 * num_channels:
- new_dim += divisor
-
- return int(new_dim)
-
-
-# Copied from transformers.models.efficientnet.modeling_efficientnet.correct_pad
-def correct_pad(kernel_size: Union[int, Tuple], adjust: bool = True):
- r"""
- Utility function to get the tuple padding value for the depthwise convolution.
-
- Args:
- kernel_size (`int` or `tuple`):
- Kernel size of the convolution layers.
- adjust (`bool`, *optional*, defaults to `True`):
- Adjusts padding value to apply to right and bottom sides of the input.
- """
- if isinstance(kernel_size, int):
- kernel_size = (kernel_size, kernel_size)
-
- correct = (kernel_size[0] // 2, kernel_size[1] // 2)
- if adjust:
- return (correct[1] - 1, correct[1], correct[0] - 1, correct[0])
- else:
- return (correct[1], correct[1], correct[0], correct[0])
-
-
-# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetEmbeddings with EfficientNet->AlignVision
-class AlignVisionEmbeddings(nn.Module):
- r"""
- A module that corresponds to the stem module of the original work.
- """
-
- def __init__(self, config: AlignVisionConfig):
- super().__init__()
-
- self.out_dim = round_filters(config, 32)
- self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1))
- self.convolution = nn.Conv2d(
- config.num_channels, self.out_dim, kernel_size=3, stride=2, padding="valid", bias=False
- )
- self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)
- self.activation = ACT2FN[config.hidden_act]
-
- def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
- features = self.padding(pixel_values)
- features = self.convolution(features)
- features = self.batchnorm(features)
- features = self.activation(features)
-
- return features
-
-
-# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseConv2d with EfficientNet->AlignVision
-class AlignVisionDepthwiseConv2d(nn.Conv2d):
- def __init__(
- self,
- in_channels,
- depth_multiplier=1,
- kernel_size=3,
- stride=1,
- padding=0,
- dilation=1,
- bias=True,
- padding_mode="zeros",
- ):
- out_channels = in_channels * depth_multiplier
- super().__init__(
- in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- groups=in_channels,
- bias=bias,
- padding_mode=padding_mode,
- )
-
-
-# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetExpansionLayer with EfficientNet->AlignVision
-class AlignVisionExpansionLayer(nn.Module):
- r"""
- This corresponds to the expansion phase of each block in the original implementation.
- """
-
- def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int):
- super().__init__()
- self.expand_conv = nn.Conv2d(
- in_channels=in_dim,
- out_channels=out_dim,
- kernel_size=1,
- padding="same",
- bias=False,
- )
- self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps)
- self.expand_act = ACT2FN[config.hidden_act]
-
- def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
- # Expand phase
- hidden_states = self.expand_conv(hidden_states)
- hidden_states = self.expand_bn(hidden_states)
- hidden_states = self.expand_act(hidden_states)
-
- return hidden_states
-
-
-# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseLayer with with EfficientNet->AlignVision
-class AlignVisionDepthwiseLayer(nn.Module):
- r"""
- This corresponds to the depthwise convolution phase of each block in the original implementation.
- """
-
- def __init__(
- self,
- config: AlignVisionConfig,
- in_dim: int,
- stride: int,
- kernel_size: int,
- adjust_padding: bool,
- ):
- super().__init__()
- self.stride = stride
- conv_pad = "valid" if self.stride == 2 else "same"
- padding = correct_pad(kernel_size, adjust=adjust_padding)
-
- self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding)
- self.depthwise_conv = AlignVisionDepthwiseConv2d(
- in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False
- )
- self.depthwise_norm = nn.BatchNorm2d(
- num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
- )
- self.depthwise_act = ACT2FN[config.hidden_act]
-
- def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
- # Depthwise convolution
- if self.stride == 2:
- hidden_states = self.depthwise_conv_pad(hidden_states)
-
- hidden_states = self.depthwise_conv(hidden_states)
- hidden_states = self.depthwise_norm(hidden_states)
- hidden_states = self.depthwise_act(hidden_states)
-
- return hidden_states
-
-
-# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetSqueezeExciteLayer with with EfficientNet->AlignVision
-class AlignVisionSqueezeExciteLayer(nn.Module):
- r"""
- This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
- """
-
- def __init__(self, config: AlignVisionConfig, in_dim: int, expand_dim: int, expand: bool = False):
- super().__init__()
- self.dim = expand_dim if expand else in_dim
- self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio))
-
- self.squeeze = nn.AdaptiveAvgPool2d(output_size=1)
- self.reduce = nn.Conv2d(
- in_channels=self.dim,
- out_channels=self.dim_se,
- kernel_size=1,
- padding="same",
- )
- self.expand = nn.Conv2d(
- in_channels=self.dim_se,
- out_channels=self.dim,
- kernel_size=1,
- padding="same",
- )
- self.act_reduce = ACT2FN[config.hidden_act]
- self.act_expand = nn.Sigmoid()
-
- def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
- inputs = hidden_states
- hidden_states = self.squeeze(hidden_states)
- hidden_states = self.reduce(hidden_states)
- hidden_states = self.act_reduce(hidden_states)
-
- hidden_states = self.expand(hidden_states)
- hidden_states = self.act_expand(hidden_states)
- hidden_states = torch.mul(inputs, hidden_states)
-
- return hidden_states
-
-
-class AlignVisionFinalBlockLayer(nn.Module):
- r"""
- This corresponds to the final phase of each block in the original implementation.
- """
-
- def __init__(
- self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool
- ):
- super().__init__()
- self.apply_dropout = stride == 1 and not id_skip
- self.project_conv = nn.Conv2d(
- in_channels=in_dim,
- out_channels=out_dim,
- kernel_size=1,
- padding="same",
- bias=False,
- )
- self.project_bn = nn.BatchNorm2d(
- num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
- )
- self.dropout = nn.Dropout(p=drop_rate)
-
- def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor:
- hidden_states = self.project_conv(hidden_states)
- hidden_states = self.project_bn(hidden_states)
-
- if self.apply_dropout:
- hidden_states = self.dropout(hidden_states)
- hidden_states = hidden_states + embeddings
-
- return hidden_states
-
-
-class AlignVisionBlock(nn.Module):
- r"""
- This corresponds to the block module of original the EfficientNet vision encoder implementation.
-
- Args:
- config ([`AlignVisionConfig`]):
- Model configuration class.
- in_dim (`int`):
- Number of input channels.
- out_dim (`int`):
- Number of output channels.
- stride (`int`):
- Stride size to be used in convolution layers.
- expand_ratio (`int`):
- Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
- kernel_size (`int`):
- Kernel size for the depthwise convolution layer.
- drop_rate (`float`):
- Dropout rate to be used in the final phase of each block.
- id_skip (`bool`):
- Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
- of each block. Set to `True` for the first block of each stage.
- adjust_padding (`bool`):
- Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
- operation, set to `True` for inputs with odd input sizes.
- """
-
- def __init__(
- self,
- config: AlignVisionConfig,
- in_dim: int,
- out_dim: int,
- stride: int,
- expand_ratio: int,
- kernel_size: int,
- drop_rate: float,
- id_skip: bool,
- adjust_padding: bool,
- ):
- super().__init__()
- self.expand_ratio = expand_ratio
- self.expand = True if self.expand_ratio != 1 else False
- expand_in_dim = in_dim * expand_ratio
-
- if self.expand:
- self.expansion = AlignVisionExpansionLayer(
- config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride
- )
-
- self.depthwise_conv = AlignVisionDepthwiseLayer(
- config=config,
- in_dim=expand_in_dim if self.expand else in_dim,
- stride=stride,
- kernel_size=kernel_size,
- adjust_padding=adjust_padding,
- )
- self.squeeze_excite = AlignVisionSqueezeExciteLayer(
- config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand
- )
- self.projection = AlignVisionFinalBlockLayer(
- config=config,
- in_dim=expand_in_dim if self.expand else in_dim,
- out_dim=out_dim,
- stride=stride,
- drop_rate=drop_rate,
- id_skip=id_skip,
- )
-
- def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
- embeddings = hidden_states
- # Expansion and depthwise convolution phase
- if self.expand_ratio != 1:
- hidden_states = self.expansion(hidden_states)
- hidden_states = self.depthwise_conv(hidden_states)
-
- # Squeeze and excite phase
- hidden_states = self.squeeze_excite(hidden_states)
- hidden_states = self.projection(embeddings, hidden_states)
- return hidden_states
-
-
-class AlignVisionEncoder(nn.Module):
- r"""
- Forward propogates the embeddings through each vision encoder (EfficientNet) block.
-
- Args:
- config ([`AlignVisionConfig`]):
- Model configuration class.
- """
-
- def __init__(self, config: AlignVisionConfig):
- super().__init__()
- self.depth_coefficient = config.depth_coefficient
-
- def round_repeats(repeats):
- # Round number of block repeats based on depth multiplier.
- return int(math.ceil(self.depth_coefficient * repeats))
-
- num_base_blocks = len(config.in_channels)
- num_blocks = sum(round_repeats(n) for n in config.num_block_repeats)
-
- curr_block_num = 0
- blocks = []
- for i in range(num_base_blocks):
- in_dim = round_filters(config, config.in_channels[i])
- out_dim = round_filters(config, config.out_channels[i])
- stride = config.strides[i]
- kernel_size = config.kernel_sizes[i]
- expand_ratio = config.expand_ratios[i]
-
- for j in range(round_repeats(config.num_block_repeats[i])):
- id_skip = True if j == 0 else False
- stride = 1 if j > 0 else stride
- in_dim = out_dim if j > 0 else in_dim
- adjust_padding = False if curr_block_num in config.depthwise_padding else True
- drop_rate = config.drop_connect_rate * curr_block_num / num_blocks
-
- block = AlignVisionBlock(
- config=config,
- in_dim=in_dim,
- out_dim=out_dim,
- stride=stride,
- kernel_size=kernel_size,
- expand_ratio=expand_ratio,
- drop_rate=drop_rate,
- id_skip=id_skip,
- adjust_padding=adjust_padding,
- )
- blocks.append(block)
- curr_block_num += 1
-
- self.blocks = nn.ModuleList(blocks)
-
- def forward(
- self,
- hidden_states: torch.FloatTensor,
- output_hidden_states: Optional[bool] = False,
- return_dict: Optional[bool] = True,
- ) -> BaseModelOutputWithPoolingAndNoAttention:
- all_hidden_states = (hidden_states,) if output_hidden_states else None
-
- for block in self.blocks:
- hidden_states = block(hidden_states)
- if output_hidden_states:
- all_hidden_states += (hidden_states,)
-
- if not return_dict:
- return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
-
- return BaseModelOutputWithNoAttention(
- last_hidden_state=hidden_states,
- hidden_states=all_hidden_states,
- )
-
-
-# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->AlignText
-class AlignTextEmbeddings(nn.Module):
- """Construct the embeddings from word, position and token_type embeddings."""
-
- def __init__(self, config):
- super().__init__()
- self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
- self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
- self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
-
- # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
- # any TensorFlow checkpoint file
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
- # position_ids (1, len position emb) is contiguous in memory and exported when serialized
- self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
- self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
- self.register_buffer(
- "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
- )
-
- def forward(
- self,
- input_ids: Optional[torch.LongTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- past_key_values_length: int = 0,
- ) -> torch.Tensor:
- if input_ids is not None:
- input_shape = input_ids.size()
- else:
- input_shape = inputs_embeds.size()[:-1]
-
- seq_length = input_shape[1]
-
- if position_ids is None:
- position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
-
- # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
- # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
- # issue #5664
- if token_type_ids is None:
- if hasattr(self, "token_type_ids"):
- buffered_token_type_ids = self.token_type_ids[:, :seq_length]
- buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
- token_type_ids = buffered_token_type_ids_expanded
- else:
- token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
-
- if inputs_embeds is None:
- inputs_embeds = self.word_embeddings(input_ids)
- token_type_embeddings = self.token_type_embeddings(token_type_ids)
-
- embeddings = inputs_embeds + token_type_embeddings
- if self.position_embedding_type == "absolute":
- position_embeddings = self.position_embeddings(position_ids)
- embeddings += position_embeddings
- embeddings = self.LayerNorm(embeddings)
- embeddings = self.dropout(embeddings)
- return embeddings
-
-
-# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->AlignText
-class AlignTextSelfAttention(nn.Module):
- def __init__(self, config, position_embedding_type=None):
- super().__init__()
- if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
- raise ValueError(
- f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
- f"heads ({config.num_attention_heads})"
- )
-
- self.num_attention_heads = config.num_attention_heads
- self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
- self.all_head_size = self.num_attention_heads * self.attention_head_size
-
- self.query = nn.Linear(config.hidden_size, self.all_head_size)
- self.key = nn.Linear(config.hidden_size, self.all_head_size)
- self.value = nn.Linear(config.hidden_size, self.all_head_size)
-
- self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
- self.position_embedding_type = position_embedding_type or getattr(
- config, "position_embedding_type", "absolute"
- )
- if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
- self.max_position_embeddings = config.max_position_embeddings
- self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
-
- self.is_decoder = config.is_decoder
-
- def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
- new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
- x = x.view(new_x_shape)
- return x.permute(0, 2, 1, 3)
-
- def forward(
- self,
- hidden_states: torch.Tensor,
- attention_mask: Optional[torch.FloatTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
- past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
- output_attentions: Optional[bool] = False,
- ) -> Tuple[torch.Tensor]:
- mixed_query_layer = self.query(hidden_states)
-
- # If this is instantiated as a cross-attention module, the keys
- # and values come from an encoder; the attention mask needs to be
- # such that the encoder's padding tokens are not attended to.
- is_cross_attention = encoder_hidden_states is not None
-
- if is_cross_attention and past_key_value is not None:
- # reuse k,v, cross_attentions
- key_layer = past_key_value[0]
- value_layer = past_key_value[1]
- attention_mask = encoder_attention_mask
- elif is_cross_attention:
- key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
- value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
- attention_mask = encoder_attention_mask
- elif past_key_value is not None:
- key_layer = self.transpose_for_scores(self.key(hidden_states))
- value_layer = self.transpose_for_scores(self.value(hidden_states))
- key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
- value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
- else:
- key_layer = self.transpose_for_scores(self.key(hidden_states))
- value_layer = self.transpose_for_scores(self.value(hidden_states))
-
- query_layer = self.transpose_for_scores(mixed_query_layer)
-
- use_cache = past_key_value is not None
- if self.is_decoder:
- # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
- # Further calls to cross_attention layer can then reuse all cross-attention
- # key/value_states (first "if" case)
- # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
- # all previous decoder key/value_states. Further calls to uni-directional self-attention
- # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
- # if encoder bi-directional self-attention `past_key_value` is always `None`
- past_key_value = (key_layer, value_layer)
-
- # Take the dot product between "query" and "key" to get the raw attention scores.
- attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
-
- if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
- query_length, key_length = query_layer.shape[2], key_layer.shape[2]
- if use_cache:
- position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
- -1, 1
- )
- else:
- position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
- position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
- distance = position_ids_l - position_ids_r
-
- positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
- positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
-
- if self.position_embedding_type == "relative_key":
- relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
- attention_scores = attention_scores + relative_position_scores
- elif self.position_embedding_type == "relative_key_query":
- relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
- relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
- attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
-
- attention_scores = attention_scores / math.sqrt(self.attention_head_size)
- if attention_mask is not None:
- # Apply the attention mask is (precomputed for all layers in AlignTextModel forward() function)
- attention_scores = attention_scores + attention_mask
-
- # Normalize the attention scores to probabilities.
- attention_probs = nn.functional.softmax(attention_scores, dim=-1)
-
- # This is actually dropping out entire tokens to attend to, which might
- # seem a bit unusual, but is taken from the original Transformer paper.
- attention_probs = self.dropout(attention_probs)
-
- # Mask heads if we want to
- if head_mask is not None:
- attention_probs = attention_probs * head_mask
-
- context_layer = torch.matmul(attention_probs, value_layer)
-
- context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
- new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
- context_layer = context_layer.view(new_context_layer_shape)
-
- outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
-
- if self.is_decoder:
- outputs = outputs + (past_key_value,)
- return outputs
-
-
-# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->AlignText
-class AlignTextSelfOutput(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
-
- def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
- hidden_states = self.dense(hidden_states)
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
- return hidden_states
-
-
-# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->AlignText
-class AlignTextAttention(nn.Module):
- def __init__(self, config, position_embedding_type=None):
- super().__init__()
- self.self = AlignTextSelfAttention(config, position_embedding_type=position_embedding_type)
- self.output = AlignTextSelfOutput(config)
- self.pruned_heads = set()
-
- def prune_heads(self, heads):
- if len(heads) == 0:
- return
- heads, index = find_pruneable_heads_and_indices(
- heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
- )
-
- # Prune linear layers
- self.self.query = prune_linear_layer(self.self.query, index)
- self.self.key = prune_linear_layer(self.self.key, index)
- self.self.value = prune_linear_layer(self.self.value, index)
- self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
-
- # Update hyper params and store pruned heads
- self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
- self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
- self.pruned_heads = self.pruned_heads.union(heads)
-
- def forward(
- self,
- hidden_states: torch.Tensor,
- attention_mask: Optional[torch.FloatTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
- past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
- output_attentions: Optional[bool] = False,
- ) -> Tuple[torch.Tensor]:
- self_outputs = self.self(
- hidden_states,
- attention_mask,
- head_mask,
- encoder_hidden_states,
- encoder_attention_mask,
- past_key_value,
- output_attentions,
- )
- attention_output = self.output(self_outputs[0], hidden_states)
- outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
- return outputs
-
-
-# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->AlignText
-class AlignTextIntermediate(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
- if isinstance(config.hidden_act, str):
- self.intermediate_act_fn = ACT2FN[config.hidden_act]
- else:
- self.intermediate_act_fn = config.hidden_act
-
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
- hidden_states = self.dense(hidden_states)
- hidden_states = self.intermediate_act_fn(hidden_states)
- return hidden_states
-
-
-# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->AlignText
-class AlignTextOutput(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
-
- def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
- hidden_states = self.dense(hidden_states)
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
- return hidden_states
-
-
-# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->AlignText
-class AlignTextLayer(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.chunk_size_feed_forward = config.chunk_size_feed_forward
- self.seq_len_dim = 1
- self.attention = AlignTextAttention(config)
- self.is_decoder = config.is_decoder
- self.add_cross_attention = config.add_cross_attention
- if self.add_cross_attention:
- if not self.is_decoder:
- raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
- self.crossattention = AlignTextAttention(config, position_embedding_type="absolute")
- self.intermediate = AlignTextIntermediate(config)
- self.output = AlignTextOutput(config)
-
- def forward(
- self,
- hidden_states: torch.Tensor,
- attention_mask: Optional[torch.FloatTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
- past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
- output_attentions: Optional[bool] = False,
- ) -> Tuple[torch.Tensor]:
- # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
- self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
- self_attention_outputs = self.attention(
- hidden_states,
- attention_mask,
- head_mask,
- output_attentions=output_attentions,
- past_key_value=self_attn_past_key_value,
- )
- attention_output = self_attention_outputs[0]
-
- # if decoder, the last output is tuple of self-attn cache
- if self.is_decoder:
- outputs = self_attention_outputs[1:-1]
- present_key_value = self_attention_outputs[-1]
- else:
- outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
-
- cross_attn_present_key_value = None
- if self.is_decoder and encoder_hidden_states is not None:
- if not hasattr(self, "crossattention"):
- raise ValueError(
- f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
- " by setting `config.add_cross_attention=True`"
- )
-
- # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
- cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
- cross_attention_outputs = self.crossattention(
- attention_output,
- attention_mask,
- head_mask,
- encoder_hidden_states,
- encoder_attention_mask,
- cross_attn_past_key_value,
- output_attentions,
- )
- attention_output = cross_attention_outputs[0]
- outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
-
- # add cross-attn cache to positions 3,4 of present_key_value tuple
- cross_attn_present_key_value = cross_attention_outputs[-1]
- present_key_value = present_key_value + cross_attn_present_key_value
-
- layer_output = apply_chunking_to_forward(
- self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
- )
- outputs = (layer_output,) + outputs
-
- # if decoder, return the attn key/values as the last output
- if self.is_decoder:
- outputs = outputs + (present_key_value,)
-
- return outputs
-
- def feed_forward_chunk(self, attention_output):
- intermediate_output = self.intermediate(attention_output)
- layer_output = self.output(intermediate_output, attention_output)
- return layer_output
-
-
-# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->AlignText
-class AlignTextEncoder(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.config = config
- self.layer = nn.ModuleList([AlignTextLayer(config) for _ in range(config.num_hidden_layers)])
- self.gradient_checkpointing = False
-
- def forward(
- self,
- hidden_states: torch.Tensor,
- attention_mask: Optional[torch.FloatTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
- past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = False,
- output_hidden_states: Optional[bool] = False,
- return_dict: Optional[bool] = True,
- ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
- all_hidden_states = () if output_hidden_states else None
- all_self_attentions = () if output_attentions else None
- all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
-
- if self.gradient_checkpointing and self.training:
- if use_cache:
- logger.warning_once(
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
- )
- use_cache = False
-
- next_decoder_cache = () if use_cache else None
- for i, layer_module in enumerate(self.layer):
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
-
- layer_head_mask = head_mask[i] if head_mask is not None else None
- past_key_value = past_key_values[i] if past_key_values is not None else None
-
- if self.gradient_checkpointing and self.training:
-
- def create_custom_forward(module):
- def custom_forward(*inputs):
- return module(*inputs, past_key_value, output_attentions)
-
- return custom_forward
-
- layer_outputs = torch.utils.checkpoint.checkpoint(
- create_custom_forward(layer_module),
- hidden_states,
- attention_mask,
- layer_head_mask,
- encoder_hidden_states,
- encoder_attention_mask,
- )
- else:
- layer_outputs = layer_module(
- hidden_states,
- attention_mask,
- layer_head_mask,
- encoder_hidden_states,
- encoder_attention_mask,
- past_key_value,
- output_attentions,
- )
-
- hidden_states = layer_outputs[0]
- if use_cache:
- next_decoder_cache += (layer_outputs[-1],)
- if output_attentions:
- all_self_attentions = all_self_attentions + (layer_outputs[1],)
- if self.config.add_cross_attention:
- all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
-
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
-
- if not return_dict:
- return tuple(
- v
- for v in [
- hidden_states,
- next_decoder_cache,
- all_hidden_states,
- all_self_attentions,
- all_cross_attentions,
- ]
- if v is not None
- )
- return BaseModelOutputWithPastAndCrossAttentions(
- last_hidden_state=hidden_states,
- past_key_values=next_decoder_cache,
- hidden_states=all_hidden_states,
- attentions=all_self_attentions,
- cross_attentions=all_cross_attentions,
- )
-
-
-# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert -> AlignText
-class AlignTextPooler(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
- self.activation = nn.Tanh()
-
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
- # We "pool" the model by simply taking the hidden state corresponding
- # to the first token.
- first_token_tensor = hidden_states[:, 0]
- pooled_output = self.dense(first_token_tensor)
- pooled_output = self.activation(pooled_output)
- return pooled_output
-
-
-class AlignPreTrainedModel(PreTrainedModel):
- """
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
- models.
- """
-
- config_class = AlignConfig
- base_model_prefix = "align"
- supports_gradient_checkpointing = True
- _keys_to_ignore_on_load_missing = [r"position_ids"]
-
- def _init_weights(self, module):
- """Initialize the weights"""
- if isinstance(module, (nn.Linear, nn.Conv2d)):
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- if module.bias is not None:
- module.bias.data.zero_()
- elif isinstance(module, AlignModel):
- nn.init.xavier_uniform_(module.text_projection.weight)
- module.text_projection.bias.data.zero_()
- module.text_projection._is_hf_initialized = True
- elif isinstance(module, nn.Embedding):
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- if module.padding_idx is not None:
- module.weight.data[module.padding_idx].zero_()
- if isinstance(module, nn.LayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
-
- def _set_gradient_checkpointing(self, module, value=False):
- if isinstance(module, (AlignTextModel, AlignVisionModel)):
- module.gradient_checkpointing = value
-
-
-@add_start_docstrings(
- """The text model from ALIGN without any head or projection on top.""",
- ALIGN_START_DOCSTRING,
-)
-class AlignTextModel(AlignPreTrainedModel):
- config_class = AlignTextConfig
-
- def __init__(self, config: AlignTextConfig, add_pooling_layer: bool = True):
- super().__init__(config)
- self.config = config
-
- self.embeddings = AlignTextEmbeddings(config)
- self.encoder = AlignTextEncoder(config)
-
- self.pooler = AlignTextPooler(config) if add_pooling_layer else None
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_input_embeddings(self):
- return self.embeddings.word_embeddings
-
- def set_input_embeddings(self, value):
- self.embeddings.word_embeddings = value
-
- @add_start_docstrings_to_model_forward(ALIGN_TEXT_INPUTS_DOCSTRING)
- @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=AlignTextConfig)
- def forward(
- self,
- input_ids: Optional[torch.Tensor] = None,
- attention_mask: Optional[torch.Tensor] = None,
- token_type_ids: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.Tensor] = None,
- head_mask: Optional[torch.Tensor] = None,
- inputs_embeds: Optional[torch.Tensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
- r"""
- Returns:
-
- Examples:
-
- ```python
- >>> from transformers import AutoTokenizer, AlignTextModel
-
- >>> model = AlignTextModel.from_pretrained("kakaobrain/align-base")
- >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")
-
- >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
-
- >>> outputs = model(**inputs)
- >>> last_hidden_state = outputs.last_hidden_state
- >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
- ```"""
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
- elif input_ids is not None:
- input_shape = input_ids.size()
- elif inputs_embeds is not None:
- input_shape = inputs_embeds.size()[:-1]
- else:
- raise ValueError("You have to specify either input_ids or inputs_embeds")
-
- batch_size, seq_length = input_shape
- device = input_ids.device if input_ids is not None else inputs_embeds.device
-
- if attention_mask is None:
- attention_mask = torch.ones(((batch_size, seq_length)), device=device)
-
- if token_type_ids is None:
- if hasattr(self.embeddings, "token_type_ids"):
- buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
- buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
- token_type_ids = buffered_token_type_ids_expanded
- else:
- token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
-
- # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
- # ourselves in which case we just need to make it broadcastable to all heads.
- extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
-
- # Prepare head mask if needed
- # 1.0 in head_mask indicate we keep the head
- # attention_probs has shape bsz x n_heads x N x N
- # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
- # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
- head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
-
- embedding_output = self.embeddings(
- input_ids=input_ids,
- position_ids=position_ids,
- token_type_ids=token_type_ids,
- inputs_embeds=inputs_embeds,
- )
- encoder_outputs = self.encoder(
- embedding_output,
- attention_mask=extended_attention_mask,
- head_mask=head_mask,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- sequence_output = encoder_outputs[0]
- pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
-
- if not return_dict:
- return (sequence_output, pooled_output) + encoder_outputs[1:]
-
- return BaseModelOutputWithPoolingAndCrossAttentions(
- last_hidden_state=sequence_output,
- pooler_output=pooled_output,
- hidden_states=encoder_outputs.hidden_states,
- attentions=encoder_outputs.attentions,
- cross_attentions=encoder_outputs.cross_attentions,
- )
-
-
-@add_start_docstrings(
- """The vision model from ALIGN without any head or projection on top.""",
- ALIGN_START_DOCSTRING,
-)
-class AlignVisionModel(AlignPreTrainedModel):
- config_class = AlignVisionConfig
- main_input_name = "pixel_values"
-
- def __init__(self, config: AlignVisionConfig):
- super().__init__(config)
- self.config = config
- self.embeddings = AlignVisionEmbeddings(config)
- self.encoder = AlignVisionEncoder(config)
-
- # Final pooling layer
- if config.pooling_type == "mean":
- self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True)
- elif config.pooling_type == "max":
- self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True)
- else:
- raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}")
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_input_embeddings(self) -> nn.Module:
- return self.vision_model.embeddings.convolution
-
- @add_start_docstrings_to_model_forward(ALIGN_VISION_INPUTS_DOCSTRING)
- @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=AlignVisionConfig)
- def forward(
- self,
- pixel_values: Optional[torch.FloatTensor] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
- r"""
- Returns:
-
- Examples:
-
- ```python
- >>> from PIL import Image
- >>> import requests
- >>> from transformers import AutoProcessor, AlignVisionModel
-
- >>> model = AlignVisionModel.from_pretrained("kakaobrain/align-base")
- >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
-
- >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
- >>> image = Image.open(requests.get(url, stream=True).raw)
-
- >>> inputs = processor(images=image, return_tensors="pt")
-
- >>> outputs = model(**inputs)
- >>> last_hidden_state = outputs.last_hidden_state
- >>> pooled_output = outputs.pooler_output # pooled CLS states
- ```"""
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if pixel_values is None:
- raise ValueError("You have to specify pixel_values")
-
- embedding_output = self.embeddings(pixel_values)
- encoder_outputs = self.encoder(
- embedding_output,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- # Apply pooling
- last_hidden_state = encoder_outputs[0]
- pooled_output = self.pooler(last_hidden_state)
- # Reshape (batch_size, projection_dim, 1 , 1) -> (batch_size, projection_dim)
- pooled_output = pooled_output.reshape(pooled_output.shape[:2])
-
- if not return_dict:
- return (last_hidden_state, pooled_output) + encoder_outputs[1:]
-
- return BaseModelOutputWithPoolingAndNoAttention(
- last_hidden_state=last_hidden_state,
- pooler_output=pooled_output,
- hidden_states=encoder_outputs.hidden_states,
- )
-
-
-@add_start_docstrings(ALIGN_START_DOCSTRING)
-class AlignModel(AlignPreTrainedModel):
- config_class = AlignConfig
-
- def __init__(self, config: AlignConfig):
- super().__init__(config)
-
- if not isinstance(config.text_config, AlignTextConfig):
- raise ValueError(
- "config.text_config is expected to be of type AlignTextConfig but is of type"
- f" {type(config.text_config)}."
- )
-
- if not isinstance(config.vision_config, AlignVisionConfig):
- raise ValueError(
- "config.vision_config is expected to be of type AlignVisionConfig but is of type"
- f" {type(config.vision_config)}."
- )
-
- text_config = config.text_config
- vision_config = config.vision_config
-
- self.projection_dim = config.projection_dim
- self.text_embed_dim = text_config.hidden_size
-
- self.text_model = AlignTextModel(text_config)
- self.vision_model = AlignVisionModel(vision_config)
-
- self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim)
- self.temperature = nn.Parameter(torch.ones([]) * self.config.temperature_init_value)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- @add_start_docstrings_to_model_forward(ALIGN_TEXT_INPUTS_DOCSTRING)
- def get_text_features(
- self,
- input_ids: Optional[torch.Tensor] = None,
- attention_mask: Optional[torch.Tensor] = None,
- token_type_ids: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.Tensor] = None,
- head_mask: Optional[torch.Tensor] = None,
- inputs_embeds: Optional[torch.Tensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> torch.FloatTensor:
- r"""
- Returns:
- text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
- applying the projection layer to the pooled output of [`AlignTextModel`].
-
- Examples:
-
- ```python
- >>> from transformers import AutoTokenizer, AlignModel
-
- >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
- >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")
-
- >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
- >>> text_features = model.get_text_features(**inputs)
- ```"""
- # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components.
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- text_outputs = self.text_model(
- input_ids=input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- last_hidden_state = text_outputs[0][:, 0, :]
- text_features = self.text_projection(last_hidden_state)
-
- return text_features
-
- @add_start_docstrings_to_model_forward(ALIGN_VISION_INPUTS_DOCSTRING)
- def get_image_features(
- self,
- pixel_values: Optional[torch.FloatTensor] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> torch.FloatTensor:
- r"""
- Returns:
- image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
- applying the projection layer to the pooled output of [`AlignVisionModel`].
-
- Examples:
-
- ```python
- >>> from PIL import Image
- >>> import requests
- >>> from transformers import AutoProcessor, AlignModel
-
- >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
- >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
-
- >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
- >>> image = Image.open(requests.get(url, stream=True).raw)
-
- >>> inputs = processor(images=image, return_tensors="pt")
-
- >>> image_features = model.get_image_features(**inputs)
- ```"""
- # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components.
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- vision_outputs = self.vision_model(
- pixel_values=pixel_values,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- image_features = vision_outputs[1] # pooled_output
-
- return image_features
-
- @add_start_docstrings_to_model_forward(ALIGN_INPUTS_DOCSTRING)
- @replace_return_docstrings(output_type=AlignOutput, config_class=AlignConfig)
- def forward(
- self,
- input_ids: Optional[torch.LongTensor] = None,
- pixel_values: Optional[torch.FloatTensor] = None,
- attention_mask: Optional[torch.Tensor] = None,
- token_type_ids: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.Tensor] = None,
- head_mask: Optional[torch.Tensor] = None,
- inputs_embeds: Optional[torch.Tensor] = None,
- return_loss: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, AlignOutput]:
- r"""
- Returns:
-
- Examples:
-
- ```python
- >>> from PIL import Image
- >>> import requests
- >>> from transformers import AutoProcessor, AlignModel
-
- >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
- >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
-
- >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
- >>> image = Image.open(requests.get(url, stream=True).raw)
-
- >>> inputs = processor(
- ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
- ... )
-
- >>> outputs = model(**inputs)
- >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
- >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
- ```"""
- # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components.
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- vision_outputs = self.vision_model(
- pixel_values=pixel_values,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- text_outputs = self.text_model(
- input_ids=input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- image_embeds = vision_outputs[1]
- text_embeds = text_outputs[0][:, 0, :]
- text_embeds = self.text_projection(text_embeds)
-
- # normalized features
- image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
- text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
-
- # cosine similarity as logits
- logits_per_text = torch.matmul(text_embeds, image_embeds.t()) / self.temperature
- logits_per_image = logits_per_text.t()
-
- loss = None
- if return_loss:
- loss = align_loss(logits_per_text)
-
- if not return_dict:
- output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
- return ((loss,) + output) if loss is not None else output
-
- return AlignOutput(
- loss=loss,
- logits_per_image=logits_per_image,
- logits_per_text=logits_per_text,
- text_embeds=text_embeds,
- image_embeds=image_embeds,
- text_model_output=text_outputs,
- vision_model_output=vision_outputs,
- )
diff --git a/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/text/chinese_bert.py b/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/text/chinese_bert.py
deleted file mode 100644
index 8159425df4bf7e577008b22f44e84f3147fdce14..0000000000000000000000000000000000000000
--- a/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/text/chinese_bert.py
+++ /dev/null
@@ -1,100 +0,0 @@
-import torch
-import sys
-from transformers import AutoTokenizer, AutoModelForMaskedLM
-
-tokenizer = AutoTokenizer.from_pretrained("./bert/chinese-roberta-wwm-ext-large")
-
-models = dict()
-
-
-def get_bert_feature(text, word2ph, device=None):
- if (
- sys.platform == "darwin"
- and torch.backends.mps.is_available()
- and device == "cpu"
- ):
- device = "mps"
- if not device:
- device = "cuda"
- if device not in models.keys():
- models[device] = AutoModelForMaskedLM.from_pretrained(
- "./bert/chinese-roberta-wwm-ext-large"
- ).to(device)
- with torch.no_grad():
- inputs = tokenizer(text, return_tensors="pt")
- for i in inputs:
- inputs[i] = inputs[i].to(device)
- res = models[device](**inputs, output_hidden_states=True)
- res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()
-
- assert len(word2ph) == len(text) + 2
- word2phone = word2ph
- phone_level_feature = []
- for i in range(len(word2phone)):
- repeat_feature = res[i].repeat(word2phone[i], 1)
- phone_level_feature.append(repeat_feature)
-
- phone_level_feature = torch.cat(phone_level_feature, dim=0)
-
- return phone_level_feature.T
-
-
-if __name__ == "__main__":
- import torch
-
- word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征
- word2phone = [
- 1,
- 2,
- 1,
- 2,
- 2,
- 1,
- 2,
- 2,
- 1,
- 2,
- 2,
- 1,
- 2,
- 2,
- 2,
- 2,
- 2,
- 1,
- 1,
- 2,
- 2,
- 1,
- 2,
- 2,
- 2,
- 2,
- 1,
- 2,
- 2,
- 2,
- 2,
- 2,
- 1,
- 2,
- 2,
- 2,
- 2,
- 1,
- ]
-
- # 计算总帧数
- total_frames = sum(word2phone)
- print(word_level_feature.shape)
- print(word2phone)
- phone_level_feature = []
- for i in range(len(word2phone)):
- print(word_level_feature[i].shape)
-
- # 对每个词重复word2phone[i]次
- repeat_feature = word_level_feature[i].repeat(word2phone[i], 1)
- phone_level_feature.append(repeat_feature)
-
- phone_level_feature = torch.cat(phone_level_feature, dim=0)
- print(phone_level_feature.shape) # torch.Size([36, 1024])
diff --git a/spaces/choimirai/whisper-large-v3/app.py b/spaces/choimirai/whisper-large-v3/app.py
deleted file mode 100644
index 3251cf43b2a57bf298db41591ae4c5e4286232e3..0000000000000000000000000000000000000000
--- a/spaces/choimirai/whisper-large-v3/app.py
+++ /dev/null
@@ -1,176 +0,0 @@
-import torch
-
-import gradio as gr
-import yt_dlp as youtube_dl
-from transformers import pipeline
-from transformers.pipelines.audio_utils import ffmpeg_read
-
-import tempfile
-import os
-
-MODEL_NAME = "openai/whisper-large-v3"
-BATCH_SIZE = 8
-FILE_LIMIT_MB = 1000
-YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
-
-device = 0 if torch.cuda.is_available() else "cpu"
-
-pipe = pipeline(
- task="automatic-speech-recognition",
- model=MODEL_NAME,
- chunk_length_s=30,
- device=device,
-)
-
-def chunks_to_srt(chunks):
- srt_format = ""
- for i, chunk in enumerate(chunks, 1):
- start_time, end_time = chunk['timestamp']
- start_time_hms = "{:02}:{:02}:{:02},{:03}".format(int(start_time // 3600), int((start_time % 3600) // 60), int(start_time % 60), int((start_time % 1) * 1000))
- end_time_hms = "{:02}:{:02}:{:02},{:03}".format(int(end_time // 3600), int((end_time % 3600) // 60), int(end_time % 60), int((end_time % 1) * 1000))
- srt_format += f"{i}\n{start_time_hms} --> {end_time_hms}\n{chunk['text']}\n\n"
- return srt_format
-
-def transcribe(inputs, task, language, return_timestamps):
- if inputs is None:
- raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
-
- # Map the language names to their corresponding codes
- language_codes = {"English": "en", "Korean": "ko", "Japanese": "ja"}
- language_code = language_codes.get(language, "en") # Default to "en" if the language is not found
- result = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task, "language": f"<|{language_code}|>"}, return_timestamps=return_timestamps)
-
- if return_timestamps:
- return chunks_to_srt(result['chunks'])
- else:
- return result['text']
-
-
-def _return_yt_html_embed(yt_url):
- video_id = yt_url.split("?v=")[-1]
- HTML_str = (
- f' '
- " "
- )
- return HTML_str
-
-def download_yt_audio(yt_url, filename):
- info_loader = youtube_dl.YoutubeDL()
-
- try:
- info = info_loader.extract_info(yt_url, download=False)
- except youtube_dl.utils.DownloadError as err:
- raise gr.Error(str(err))
-
- file_length = info["duration_string"]
- file_h_m_s = file_length.split(":")
- file_h_m_s = [int(sub_length) for sub_length in file_h_m_s]
-
- if len(file_h_m_s) == 1:
- file_h_m_s.insert(0, 0)
- if len(file_h_m_s) == 2:
- file_h_m_s.insert(0, 0)
- file_length_s = file_h_m_s[0] * 3600 + file_h_m_s[1] * 60 + file_h_m_s[2]
-
- if file_length_s > YT_LENGTH_LIMIT_S:
- yt_length_limit_hms = time.strftime("%HH:%MM:%SS", time.gmtime(YT_LENGTH_LIMIT_S))
- file_length_hms = time.strftime("%HH:%MM:%SS", time.gmtime(file_length_s))
- raise gr.Error(f"Maximum YouTube length is {yt_length_limit_hms}, got {file_length_hms} YouTube video.")
-
- ydl_opts = {"outtmpl": filename, "format": "worstvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best"}
-
- with youtube_dl.YoutubeDL(ydl_opts) as ydl:
- try:
- ydl.download([yt_url])
- except youtube_dl.utils.ExtractorError as err:
- raise gr.Error(str(err))
-
-
-def yt_transcribe(yt_url, task, return_timestamps, language, max_filesize=75.0):
- html_embed_str = _return_yt_html_embed(yt_url)
-
- with tempfile.TemporaryDirectory() as tmpdirname:
- filepath = os.path.join(tmpdirname, "video.mp4")
- download_yt_audio(yt_url, filepath)
- with open(filepath, "rb") as f:
- inputs = f.read()
-
- inputs = ffmpeg_read(inputs, pipe.feature_extractor.sampling_rate)
- inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
-
- # Map the language names to their corresponding codes
- language_codes = {"English": "en", "Korean": "ko", "Japanese": "ja"}
- language_code = language_codes.get(language, "en") # Default to "en" if the language is not found
-
- result = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task, "language": f"<|{language_code}|>"}, return_timestamps=return_timestamps)
-
- if return_timestamps:
- return html_embed_str, chunks_to_srt(result['chunks'])
- else:
- return html_embed_str, result['text']
-
-
-demo = gr.Blocks()
-
-mf_transcribe = gr.Interface(
- fn=transcribe,
- inputs=[
- gr.inputs.Audio(source="microphone", type="filepath", optional=True),
- gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
- gr.inputs.Checkbox(label="Return timestamps"),
- gr.inputs.Dropdown(choices=["English", "Korean", "Japanese"], label="Language"),
- ],
- outputs="text",
- layout="horizontal",
- theme="huggingface",
- title="Whisper Large V3: Transcribe Audio",
- description=(
- "\n\n"
- "⭐️Brought to you by Chiomirai School⭐️ "
- ),
- allow_flagging="never",
-)
-
-file_transcribe = gr.Interface(
- fn=transcribe,
- inputs=[
- gr.inputs.Audio(source="upload", type="filepath", optional=True, label="Audio file"),
- gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
- gr.inputs.Checkbox(label="Return timestamps"),
- gr.inputs.Dropdown(choices=["English", "Korean", "Japanese"], label="Language"),
- ],
- outputs="text",
- layout="horizontal",
- theme="huggingface",
- title="Whisper Large V3: Transcribe Audio File",
- description=(
- "\n\n"
- "⭐️Brought to you by Chiomirai School⭐️ "
- ),
- allow_flagging="never",
-)
-
-yt_transcribe = gr.Interface(
- fn=yt_transcribe,
- inputs=[
- gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
- gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
- gr.inputs.Checkbox(label="Return timestamps"),
- gr.inputs.Dropdown(choices=["English", "Korean", "Japanese"], label="Language"),
- ],
- outputs=["html", "text"],
- layout="horizontal",
- theme="huggingface",
- title="Whisper Large V3: Transcribe YouTube",
- description=(
- "\n\n"
- "⭐️Brought to you by Chiomirai School⭐️ "
- ),
- allow_flagging="never",
-)
-
-with demo:
- gr.TabbedInterface([mf_transcribe, file_transcribe, yt_transcribe], ["Microphone", "Audio file", "YouTube"])
-
-demo.launch(enable_queue=True)
-
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fastapi/middleware/gzip.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fastapi/middleware/gzip.py
deleted file mode 100644
index bbeb2cc7861a735d6cd5c0e29aeb6dbf8457023a..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fastapi/middleware/gzip.py
+++ /dev/null
@@ -1 +0,0 @@
-from starlette.middleware.gzip import GZipMiddleware as GZipMiddleware # noqa
diff --git a/spaces/cihyFjudo/fairness-paper-search/Download and Play Cabelas Hunting Expeditions Skidrow Crack 16 for PC.md b/spaces/cihyFjudo/fairness-paper-search/Download and Play Cabelas Hunting Expeditions Skidrow Crack 16 for PC.md
deleted file mode 100644
index 7912ef3d6bbd94b132ee566efc0b715160ba8c17..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Download and Play Cabelas Hunting Expeditions Skidrow Crack 16 for PC.md
+++ /dev/null
@@ -1,6 +0,0 @@
-cabela's hunting expeditions skidrow crack 16
Download Zip ✑ https://tinurli.com/2uwhEw
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cleanmaster/so-vits-svc-akagi/preprocess_hubert_f0.py b/spaces/cleanmaster/so-vits-svc-akagi/preprocess_hubert_f0.py
deleted file mode 100644
index 4fe7f21541acb01537797f430d53b3c0e63279e1..0000000000000000000000000000000000000000
--- a/spaces/cleanmaster/so-vits-svc-akagi/preprocess_hubert_f0.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import os
-import argparse
-
-import torch
-import json
-from glob import glob
-
-from pyworld import pyworld
-from tqdm import tqdm
-from scipy.io import wavfile
-
-import utils
-from mel_processing import mel_spectrogram_torch
-#import h5py
-import logging
-logging.getLogger('numba').setLevel(logging.WARNING)
-
-import parselmouth
-import librosa
-import numpy as np
-
-
-def get_f0(path,p_len=None, f0_up_key=0):
- x, _ = librosa.load(path, 32000)
- if p_len is None:
- p_len = x.shape[0]//320
- else:
- assert abs(p_len-x.shape[0]//320) < 3, (path, p_len, x.shape)
- time_step = 320 / 32000 * 1000
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
-
- f0 = parselmouth.Sound(x, 32000).to_pitch_ac(
- time_step=time_step / 1000, voicing_threshold=0.6,
- pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
-
- pad_size=(p_len - len(f0) + 1) // 2
- if(pad_size>0 or p_len - len(f0) - pad_size>0):
- f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
-
- f0bak = f0.copy()
- f0 *= pow(2, f0_up_key / 12)
- f0_mel = 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- f0_coarse = np.rint(f0_mel).astype(np.int)
- return f0_coarse, f0bak
-
-def resize2d(x, target_len):
- source = np.array(x)
- source[source<0.001] = np.nan
- target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source)
- res = np.nan_to_num(target)
- return res
-
-def compute_f0(path, c_len):
- x, sr = librosa.load(path, sr=32000)
- f0, t = pyworld.dio(
- x.astype(np.double),
- fs=sr,
- f0_ceil=800,
- frame_period=1000 * 320 / sr,
- )
- f0 = pyworld.stonemask(x.astype(np.double), f0, t, 32000)
- for index, pitch in enumerate(f0):
- f0[index] = round(pitch, 1)
- assert abs(c_len - x.shape[0]//320) < 3, (c_len, f0.shape)
-
- return None, resize2d(f0, c_len)
-
-
-def process(filename):
- print(filename)
- save_name = filename+".soft.pt"
- if not os.path.exists(save_name):
- devive = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- wav, _ = librosa.load(filename, sr=16000)
- wav = torch.from_numpy(wav).unsqueeze(0).to(devive)
- c = utils.get_hubert_content(hmodel, wav)
- torch.save(c.cpu(), save_name)
- else:
- c = torch.load(save_name)
- f0path = filename+".f0.npy"
- if not os.path.exists(f0path):
- cf0, f0 = compute_f0(filename, c.shape[-1] * 2)
- np.save(f0path, f0)
-
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in_dir", type=str, default="dataset/32k", help="path to input dir")
- args = parser.parse_args()
-
- print("Loading hubert for content...")
- hmodel = utils.get_hubert_model(0 if torch.cuda.is_available() else None)
- print("Loaded hubert.")
-
- filenames = glob(f'{args.in_dir}/*/*.wav', recursive=True)#[:10]
-
- for filename in tqdm(filenames):
- process(filename)
-
\ No newline at end of file
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ExifTags.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ExifTags.py
deleted file mode 100644
index 2347c6d4c2768b6c946a386bba9f1325ed91193f..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ExifTags.py
+++ /dev/null
@@ -1,380 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# EXIF tags
-#
-# Copyright (c) 2003 by Secret Labs AB
-#
-# See the README file for information on usage and redistribution.
-#
-
-"""
-This module provides constants and clear-text names for various
-well-known EXIF tags.
-"""
-
-from enum import IntEnum
-
-
-class Base(IntEnum):
- # possibly incomplete
- InteropIndex = 0x0001
- ProcessingSoftware = 0x000B
- NewSubfileType = 0x00FE
- SubfileType = 0x00FF
- ImageWidth = 0x0100
- ImageLength = 0x0101
- BitsPerSample = 0x0102
- Compression = 0x0103
- PhotometricInterpretation = 0x0106
- Thresholding = 0x0107
- CellWidth = 0x0108
- CellLength = 0x0109
- FillOrder = 0x010A
- DocumentName = 0x010D
- ImageDescription = 0x010E
- Make = 0x010F
- Model = 0x0110
- StripOffsets = 0x0111
- Orientation = 0x0112
- SamplesPerPixel = 0x0115
- RowsPerStrip = 0x0116
- StripByteCounts = 0x0117
- MinSampleValue = 0x0118
- MaxSampleValue = 0x0119
- XResolution = 0x011A
- YResolution = 0x011B
- PlanarConfiguration = 0x011C
- PageName = 0x011D
- FreeOffsets = 0x0120
- FreeByteCounts = 0x0121
- GrayResponseUnit = 0x0122
- GrayResponseCurve = 0x0123
- T4Options = 0x0124
- T6Options = 0x0125
- ResolutionUnit = 0x0128
- PageNumber = 0x0129
- TransferFunction = 0x012D
- Software = 0x0131
- DateTime = 0x0132
- Artist = 0x013B
- HostComputer = 0x013C
- Predictor = 0x013D
- WhitePoint = 0x013E
- PrimaryChromaticities = 0x013F
- ColorMap = 0x0140
- HalftoneHints = 0x0141
- TileWidth = 0x0142
- TileLength = 0x0143
- TileOffsets = 0x0144
- TileByteCounts = 0x0145
- SubIFDs = 0x014A
- InkSet = 0x014C
- InkNames = 0x014D
- NumberOfInks = 0x014E
- DotRange = 0x0150
- TargetPrinter = 0x0151
- ExtraSamples = 0x0152
- SampleFormat = 0x0153
- SMinSampleValue = 0x0154
- SMaxSampleValue = 0x0155
- TransferRange = 0x0156
- ClipPath = 0x0157
- XClipPathUnits = 0x0158
- YClipPathUnits = 0x0159
- Indexed = 0x015A
- JPEGTables = 0x015B
- OPIProxy = 0x015F
- JPEGProc = 0x0200
- JpegIFOffset = 0x0201
- JpegIFByteCount = 0x0202
- JpegRestartInterval = 0x0203
- JpegLosslessPredictors = 0x0205
- JpegPointTransforms = 0x0206
- JpegQTables = 0x0207
- JpegDCTables = 0x0208
- JpegACTables = 0x0209
- YCbCrCoefficients = 0x0211
- YCbCrSubSampling = 0x0212
- YCbCrPositioning = 0x0213
- ReferenceBlackWhite = 0x0214
- XMLPacket = 0x02BC
- RelatedImageFileFormat = 0x1000
- RelatedImageWidth = 0x1001
- RelatedImageLength = 0x1002
- Rating = 0x4746
- RatingPercent = 0x4749
- ImageID = 0x800D
- CFARepeatPatternDim = 0x828D
- BatteryLevel = 0x828F
- Copyright = 0x8298
- ExposureTime = 0x829A
- FNumber = 0x829D
- IPTCNAA = 0x83BB
- ImageResources = 0x8649
- ExifOffset = 0x8769
- InterColorProfile = 0x8773
- ExposureProgram = 0x8822
- SpectralSensitivity = 0x8824
- GPSInfo = 0x8825
- ISOSpeedRatings = 0x8827
- OECF = 0x8828
- Interlace = 0x8829
- TimeZoneOffset = 0x882A
- SelfTimerMode = 0x882B
- SensitivityType = 0x8830
- StandardOutputSensitivity = 0x8831
- RecommendedExposureIndex = 0x8832
- ISOSpeed = 0x8833
- ISOSpeedLatitudeyyy = 0x8834
- ISOSpeedLatitudezzz = 0x8835
- ExifVersion = 0x9000
- DateTimeOriginal = 0x9003
- DateTimeDigitized = 0x9004
- OffsetTime = 0x9010
- OffsetTimeOriginal = 0x9011
- OffsetTimeDigitized = 0x9012
- ComponentsConfiguration = 0x9101
- CompressedBitsPerPixel = 0x9102
- ShutterSpeedValue = 0x9201
- ApertureValue = 0x9202
- BrightnessValue = 0x9203
- ExposureBiasValue = 0x9204
- MaxApertureValue = 0x9205
- SubjectDistance = 0x9206
- MeteringMode = 0x9207
- LightSource = 0x9208
- Flash = 0x9209
- FocalLength = 0x920A
- Noise = 0x920D
- ImageNumber = 0x9211
- SecurityClassification = 0x9212
- ImageHistory = 0x9213
- TIFFEPStandardID = 0x9216
- MakerNote = 0x927C
- UserComment = 0x9286
- SubsecTime = 0x9290
- SubsecTimeOriginal = 0x9291
- SubsecTimeDigitized = 0x9292
- AmbientTemperature = 0x9400
- Humidity = 0x9401
- Pressure = 0x9402
- WaterDepth = 0x9403
- Acceleration = 0x9404
- CameraElevationAngle = 0x9405
- XPTitle = 0x9C9B
- XPComment = 0x9C9C
- XPAuthor = 0x9C9D
- XPKeywords = 0x9C9E
- XPSubject = 0x9C9F
- FlashPixVersion = 0xA000
- ColorSpace = 0xA001
- ExifImageWidth = 0xA002
- ExifImageHeight = 0xA003
- RelatedSoundFile = 0xA004
- ExifInteroperabilityOffset = 0xA005
- FlashEnergy = 0xA20B
- SpatialFrequencyResponse = 0xA20C
- FocalPlaneXResolution = 0xA20E
- FocalPlaneYResolution = 0xA20F
- FocalPlaneResolutionUnit = 0xA210
- SubjectLocation = 0xA214
- ExposureIndex = 0xA215
- SensingMethod = 0xA217
- FileSource = 0xA300
- SceneType = 0xA301
- CFAPattern = 0xA302
- CustomRendered = 0xA401
- ExposureMode = 0xA402
- WhiteBalance = 0xA403
- DigitalZoomRatio = 0xA404
- FocalLengthIn35mmFilm = 0xA405
- SceneCaptureType = 0xA406
- GainControl = 0xA407
- Contrast = 0xA408
- Saturation = 0xA409
- Sharpness = 0xA40A
- DeviceSettingDescription = 0xA40B
- SubjectDistanceRange = 0xA40C
- ImageUniqueID = 0xA420
- CameraOwnerName = 0xA430
- BodySerialNumber = 0xA431
- LensSpecification = 0xA432
- LensMake = 0xA433
- LensModel = 0xA434
- LensSerialNumber = 0xA435
- CompositeImage = 0xA460
- CompositeImageCount = 0xA461
- CompositeImageExposureTimes = 0xA462
- Gamma = 0xA500
- PrintImageMatching = 0xC4A5
- DNGVersion = 0xC612
- DNGBackwardVersion = 0xC613
- UniqueCameraModel = 0xC614
- LocalizedCameraModel = 0xC615
- CFAPlaneColor = 0xC616
- CFALayout = 0xC617
- LinearizationTable = 0xC618
- BlackLevelRepeatDim = 0xC619
- BlackLevel = 0xC61A
- BlackLevelDeltaH = 0xC61B
- BlackLevelDeltaV = 0xC61C
- WhiteLevel = 0xC61D
- DefaultScale = 0xC61E
- DefaultCropOrigin = 0xC61F
- DefaultCropSize = 0xC620
- ColorMatrix1 = 0xC621
- ColorMatrix2 = 0xC622
- CameraCalibration1 = 0xC623
- CameraCalibration2 = 0xC624
- ReductionMatrix1 = 0xC625
- ReductionMatrix2 = 0xC626
- AnalogBalance = 0xC627
- AsShotNeutral = 0xC628
- AsShotWhiteXY = 0xC629
- BaselineExposure = 0xC62A
- BaselineNoise = 0xC62B
- BaselineSharpness = 0xC62C
- BayerGreenSplit = 0xC62D
- LinearResponseLimit = 0xC62E
- CameraSerialNumber = 0xC62F
- LensInfo = 0xC630
- ChromaBlurRadius = 0xC631
- AntiAliasStrength = 0xC632
- ShadowScale = 0xC633
- DNGPrivateData = 0xC634
- MakerNoteSafety = 0xC635
- CalibrationIlluminant1 = 0xC65A
- CalibrationIlluminant2 = 0xC65B
- BestQualityScale = 0xC65C
- RawDataUniqueID = 0xC65D
- OriginalRawFileName = 0xC68B
- OriginalRawFileData = 0xC68C
- ActiveArea = 0xC68D
- MaskedAreas = 0xC68E
- AsShotICCProfile = 0xC68F
- AsShotPreProfileMatrix = 0xC690
- CurrentICCProfile = 0xC691
- CurrentPreProfileMatrix = 0xC692
- ColorimetricReference = 0xC6BF
- CameraCalibrationSignature = 0xC6F3
- ProfileCalibrationSignature = 0xC6F4
- AsShotProfileName = 0xC6F6
- NoiseReductionApplied = 0xC6F7
- ProfileName = 0xC6F8
- ProfileHueSatMapDims = 0xC6F9
- ProfileHueSatMapData1 = 0xC6FA
- ProfileHueSatMapData2 = 0xC6FB
- ProfileToneCurve = 0xC6FC
- ProfileEmbedPolicy = 0xC6FD
- ProfileCopyright = 0xC6FE
- ForwardMatrix1 = 0xC714
- ForwardMatrix2 = 0xC715
- PreviewApplicationName = 0xC716
- PreviewApplicationVersion = 0xC717
- PreviewSettingsName = 0xC718
- PreviewSettingsDigest = 0xC719
- PreviewColorSpace = 0xC71A
- PreviewDateTime = 0xC71B
- RawImageDigest = 0xC71C
- OriginalRawFileDigest = 0xC71D
- SubTileBlockSize = 0xC71E
- RowInterleaveFactor = 0xC71F
- ProfileLookTableDims = 0xC725
- ProfileLookTableData = 0xC726
- OpcodeList1 = 0xC740
- OpcodeList2 = 0xC741
- OpcodeList3 = 0xC74E
- NoiseProfile = 0xC761
-
-
-"""Maps EXIF tags to tag names."""
-TAGS = {
- **{i.value: i.name for i in Base},
- 0x920C: "SpatialFrequencyResponse",
- 0x9214: "SubjectLocation",
- 0x9215: "ExposureIndex",
- 0x828E: "CFAPattern",
- 0x920B: "FlashEnergy",
- 0x9216: "TIFF/EPStandardID",
-}
-
-
-class GPS(IntEnum):
- GPSVersionID = 0
- GPSLatitudeRef = 1
- GPSLatitude = 2
- GPSLongitudeRef = 3
- GPSLongitude = 4
- GPSAltitudeRef = 5
- GPSAltitude = 6
- GPSTimeStamp = 7
- GPSSatellites = 8
- GPSStatus = 9
- GPSMeasureMode = 10
- GPSDOP = 11
- GPSSpeedRef = 12
- GPSSpeed = 13
- GPSTrackRef = 14
- GPSTrack = 15
- GPSImgDirectionRef = 16
- GPSImgDirection = 17
- GPSMapDatum = 18
- GPSDestLatitudeRef = 19
- GPSDestLatitude = 20
- GPSDestLongitudeRef = 21
- GPSDestLongitude = 22
- GPSDestBearingRef = 23
- GPSDestBearing = 24
- GPSDestDistanceRef = 25
- GPSDestDistance = 26
- GPSProcessingMethod = 27
- GPSAreaInformation = 28
- GPSDateStamp = 29
- GPSDifferential = 30
- GPSHPositioningError = 31
-
-
-"""Maps EXIF GPS tags to tag names."""
-GPSTAGS = {i.value: i.name for i in GPS}
-
-
-class Interop(IntEnum):
- InteropIndex = 1
- InteropVersion = 2
- RelatedImageFileFormat = 4096
- RelatedImageWidth = 4097
- RleatedImageHeight = 4098
-
-
-class IFD(IntEnum):
- Exif = 34665
- GPSInfo = 34853
- Makernote = 37500
- Interop = 40965
- IFD1 = -1
-
-
-class LightSource(IntEnum):
- Unknown = 0
- Daylight = 1
- Fluorescent = 2
- Tungsten = 3
- Flash = 4
- Fine = 9
- Cloudy = 10
- Shade = 11
- DaylightFluorescent = 12
- DayWhiteFluorescent = 13
- CoolWhiteFluorescent = 14
- WhiteFluorescent = 15
- StandardLightA = 17
- StandardLightB = 18
- StandardLightC = 19
- D55 = 20
- D65 = 21
- D75 = 22
- D50 = 23
- ISO = 24
- Other = 255
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/TiffTags.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/TiffTags.py
deleted file mode 100644
index 30b05e4e1d41fa21a7b7bf12c04ee05af6aa5284..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/TiffTags.py
+++ /dev/null
@@ -1,560 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# TIFF tags
-#
-# This module provides clear-text names for various well-known
-# TIFF tags. the TIFF codec works just fine without it.
-#
-# Copyright (c) Secret Labs AB 1999.
-#
-# See the README file for information on usage and redistribution.
-#
-
-##
-# This module provides constants and clear-text names for various
-# well-known TIFF tags.
-##
-
-from collections import namedtuple
-
-
-class TagInfo(namedtuple("_TagInfo", "value name type length enum")):
- __slots__ = []
-
- def __new__(cls, value=None, name="unknown", type=None, length=None, enum=None):
- return super().__new__(cls, value, name, type, length, enum or {})
-
- def cvt_enum(self, value):
- # Using get will call hash(value), which can be expensive
- # for some types (e.g. Fraction). Since self.enum is rarely
- # used, it's usually better to test it first.
- return self.enum.get(value, value) if self.enum else value
-
-
-def lookup(tag, group=None):
- """
- :param tag: Integer tag number
- :param group: Which :py:data:`~PIL.TiffTags.TAGS_V2_GROUPS` to look in
-
- .. versionadded:: 8.3.0
-
- :returns: Taginfo namedtuple, From the ``TAGS_V2`` info if possible,
- otherwise just populating the value and name from ``TAGS``.
- If the tag is not recognized, "unknown" is returned for the name
-
- """
-
- if group is not None:
- info = TAGS_V2_GROUPS[group].get(tag) if group in TAGS_V2_GROUPS else None
- else:
- info = TAGS_V2.get(tag)
- return info or TagInfo(tag, TAGS.get(tag, "unknown"))
-
-
-##
-# Map tag numbers to tag info.
-#
-# id: (Name, Type, Length, enum_values)
-#
-# The length here differs from the length in the tiff spec. For
-# numbers, the tiff spec is for the number of fields returned. We
-# agree here. For string-like types, the tiff spec uses the length of
-# field in bytes. In Pillow, we are using the number of expected
-# fields, in general 1 for string-like types.
-
-
-BYTE = 1
-ASCII = 2
-SHORT = 3
-LONG = 4
-RATIONAL = 5
-SIGNED_BYTE = 6
-UNDEFINED = 7
-SIGNED_SHORT = 8
-SIGNED_LONG = 9
-SIGNED_RATIONAL = 10
-FLOAT = 11
-DOUBLE = 12
-IFD = 13
-LONG8 = 16
-
-TAGS_V2 = {
- 254: ("NewSubfileType", LONG, 1),
- 255: ("SubfileType", SHORT, 1),
- 256: ("ImageWidth", LONG, 1),
- 257: ("ImageLength", LONG, 1),
- 258: ("BitsPerSample", SHORT, 0),
- 259: (
- "Compression",
- SHORT,
- 1,
- {
- "Uncompressed": 1,
- "CCITT 1d": 2,
- "Group 3 Fax": 3,
- "Group 4 Fax": 4,
- "LZW": 5,
- "JPEG": 6,
- "PackBits": 32773,
- },
- ),
- 262: (
- "PhotometricInterpretation",
- SHORT,
- 1,
- {
- "WhiteIsZero": 0,
- "BlackIsZero": 1,
- "RGB": 2,
- "RGB Palette": 3,
- "Transparency Mask": 4,
- "CMYK": 5,
- "YCbCr": 6,
- "CieLAB": 8,
- "CFA": 32803, # TIFF/EP, Adobe DNG
- "LinearRaw": 32892, # Adobe DNG
- },
- ),
- 263: ("Threshholding", SHORT, 1),
- 264: ("CellWidth", SHORT, 1),
- 265: ("CellLength", SHORT, 1),
- 266: ("FillOrder", SHORT, 1),
- 269: ("DocumentName", ASCII, 1),
- 270: ("ImageDescription", ASCII, 1),
- 271: ("Make", ASCII, 1),
- 272: ("Model", ASCII, 1),
- 273: ("StripOffsets", LONG, 0),
- 274: ("Orientation", SHORT, 1),
- 277: ("SamplesPerPixel", SHORT, 1),
- 278: ("RowsPerStrip", LONG, 1),
- 279: ("StripByteCounts", LONG, 0),
- 280: ("MinSampleValue", SHORT, 0),
- 281: ("MaxSampleValue", SHORT, 0),
- 282: ("XResolution", RATIONAL, 1),
- 283: ("YResolution", RATIONAL, 1),
- 284: ("PlanarConfiguration", SHORT, 1, {"Contiguous": 1, "Separate": 2}),
- 285: ("PageName", ASCII, 1),
- 286: ("XPosition", RATIONAL, 1),
- 287: ("YPosition", RATIONAL, 1),
- 288: ("FreeOffsets", LONG, 1),
- 289: ("FreeByteCounts", LONG, 1),
- 290: ("GrayResponseUnit", SHORT, 1),
- 291: ("GrayResponseCurve", SHORT, 0),
- 292: ("T4Options", LONG, 1),
- 293: ("T6Options", LONG, 1),
- 296: ("ResolutionUnit", SHORT, 1, {"none": 1, "inch": 2, "cm": 3}),
- 297: ("PageNumber", SHORT, 2),
- 301: ("TransferFunction", SHORT, 0),
- 305: ("Software", ASCII, 1),
- 306: ("DateTime", ASCII, 1),
- 315: ("Artist", ASCII, 1),
- 316: ("HostComputer", ASCII, 1),
- 317: ("Predictor", SHORT, 1, {"none": 1, "Horizontal Differencing": 2}),
- 318: ("WhitePoint", RATIONAL, 2),
- 319: ("PrimaryChromaticities", RATIONAL, 6),
- 320: ("ColorMap", SHORT, 0),
- 321: ("HalftoneHints", SHORT, 2),
- 322: ("TileWidth", LONG, 1),
- 323: ("TileLength", LONG, 1),
- 324: ("TileOffsets", LONG, 0),
- 325: ("TileByteCounts", LONG, 0),
- 330: ("SubIFDs", LONG, 0),
- 332: ("InkSet", SHORT, 1),
- 333: ("InkNames", ASCII, 1),
- 334: ("NumberOfInks", SHORT, 1),
- 336: ("DotRange", SHORT, 0),
- 337: ("TargetPrinter", ASCII, 1),
- 338: ("ExtraSamples", SHORT, 0),
- 339: ("SampleFormat", SHORT, 0),
- 340: ("SMinSampleValue", DOUBLE, 0),
- 341: ("SMaxSampleValue", DOUBLE, 0),
- 342: ("TransferRange", SHORT, 6),
- 347: ("JPEGTables", UNDEFINED, 1),
- # obsolete JPEG tags
- 512: ("JPEGProc", SHORT, 1),
- 513: ("JPEGInterchangeFormat", LONG, 1),
- 514: ("JPEGInterchangeFormatLength", LONG, 1),
- 515: ("JPEGRestartInterval", SHORT, 1),
- 517: ("JPEGLosslessPredictors", SHORT, 0),
- 518: ("JPEGPointTransforms", SHORT, 0),
- 519: ("JPEGQTables", LONG, 0),
- 520: ("JPEGDCTables", LONG, 0),
- 521: ("JPEGACTables", LONG, 0),
- 529: ("YCbCrCoefficients", RATIONAL, 3),
- 530: ("YCbCrSubSampling", SHORT, 2),
- 531: ("YCbCrPositioning", SHORT, 1),
- 532: ("ReferenceBlackWhite", RATIONAL, 6),
- 700: ("XMP", BYTE, 0),
- 33432: ("Copyright", ASCII, 1),
- 33723: ("IptcNaaInfo", UNDEFINED, 1),
- 34377: ("PhotoshopInfo", BYTE, 0),
- # FIXME add more tags here
- 34665: ("ExifIFD", LONG, 1),
- 34675: ("ICCProfile", UNDEFINED, 1),
- 34853: ("GPSInfoIFD", LONG, 1),
- 36864: ("ExifVersion", UNDEFINED, 1),
- 37724: ("ImageSourceData", UNDEFINED, 1),
- 40965: ("InteroperabilityIFD", LONG, 1),
- 41730: ("CFAPattern", UNDEFINED, 1),
- # MPInfo
- 45056: ("MPFVersion", UNDEFINED, 1),
- 45057: ("NumberOfImages", LONG, 1),
- 45058: ("MPEntry", UNDEFINED, 1),
- 45059: ("ImageUIDList", UNDEFINED, 0), # UNDONE, check
- 45060: ("TotalFrames", LONG, 1),
- 45313: ("MPIndividualNum", LONG, 1),
- 45569: ("PanOrientation", LONG, 1),
- 45570: ("PanOverlap_H", RATIONAL, 1),
- 45571: ("PanOverlap_V", RATIONAL, 1),
- 45572: ("BaseViewpointNum", LONG, 1),
- 45573: ("ConvergenceAngle", SIGNED_RATIONAL, 1),
- 45574: ("BaselineLength", RATIONAL, 1),
- 45575: ("VerticalDivergence", SIGNED_RATIONAL, 1),
- 45576: ("AxisDistance_X", SIGNED_RATIONAL, 1),
- 45577: ("AxisDistance_Y", SIGNED_RATIONAL, 1),
- 45578: ("AxisDistance_Z", SIGNED_RATIONAL, 1),
- 45579: ("YawAngle", SIGNED_RATIONAL, 1),
- 45580: ("PitchAngle", SIGNED_RATIONAL, 1),
- 45581: ("RollAngle", SIGNED_RATIONAL, 1),
- 40960: ("FlashPixVersion", UNDEFINED, 1),
- 50741: ("MakerNoteSafety", SHORT, 1, {"Unsafe": 0, "Safe": 1}),
- 50780: ("BestQualityScale", RATIONAL, 1),
- 50838: ("ImageJMetaDataByteCounts", LONG, 0), # Can be more than one
- 50839: ("ImageJMetaData", UNDEFINED, 1), # see Issue #2006
-}
-TAGS_V2_GROUPS = {
- # ExifIFD
- 34665: {
- 36864: ("ExifVersion", UNDEFINED, 1),
- 40960: ("FlashPixVersion", UNDEFINED, 1),
- 40965: ("InteroperabilityIFD", LONG, 1),
- 41730: ("CFAPattern", UNDEFINED, 1),
- },
- # GPSInfoIFD
- 34853: {
- 0: ("GPSVersionID", BYTE, 4),
- 1: ("GPSLatitudeRef", ASCII, 2),
- 2: ("GPSLatitude", RATIONAL, 3),
- 3: ("GPSLongitudeRef", ASCII, 2),
- 4: ("GPSLongitude", RATIONAL, 3),
- 5: ("GPSAltitudeRef", BYTE, 1),
- 6: ("GPSAltitude", RATIONAL, 1),
- 7: ("GPSTimeStamp", RATIONAL, 3),
- 8: ("GPSSatellites", ASCII, 0),
- 9: ("GPSStatus", ASCII, 2),
- 10: ("GPSMeasureMode", ASCII, 2),
- 11: ("GPSDOP", RATIONAL, 1),
- 12: ("GPSSpeedRef", ASCII, 2),
- 13: ("GPSSpeed", RATIONAL, 1),
- 14: ("GPSTrackRef", ASCII, 2),
- 15: ("GPSTrack", RATIONAL, 1),
- 16: ("GPSImgDirectionRef", ASCII, 2),
- 17: ("GPSImgDirection", RATIONAL, 1),
- 18: ("GPSMapDatum", ASCII, 0),
- 19: ("GPSDestLatitudeRef", ASCII, 2),
- 20: ("GPSDestLatitude", RATIONAL, 3),
- 21: ("GPSDestLongitudeRef", ASCII, 2),
- 22: ("GPSDestLongitude", RATIONAL, 3),
- 23: ("GPSDestBearingRef", ASCII, 2),
- 24: ("GPSDestBearing", RATIONAL, 1),
- 25: ("GPSDestDistanceRef", ASCII, 2),
- 26: ("GPSDestDistance", RATIONAL, 1),
- 27: ("GPSProcessingMethod", UNDEFINED, 0),
- 28: ("GPSAreaInformation", UNDEFINED, 0),
- 29: ("GPSDateStamp", ASCII, 11),
- 30: ("GPSDifferential", SHORT, 1),
- },
- # InteroperabilityIFD
- 40965: {1: ("InteropIndex", ASCII, 1), 2: ("InteropVersion", UNDEFINED, 1)},
-}
-
-# Legacy Tags structure
-# these tags aren't included above, but were in the previous versions
-TAGS = {
- 347: "JPEGTables",
- 700: "XMP",
- # Additional Exif Info
- 32932: "Wang Annotation",
- 33434: "ExposureTime",
- 33437: "FNumber",
- 33445: "MD FileTag",
- 33446: "MD ScalePixel",
- 33447: "MD ColorTable",
- 33448: "MD LabName",
- 33449: "MD SampleInfo",
- 33450: "MD PrepDate",
- 33451: "MD PrepTime",
- 33452: "MD FileUnits",
- 33550: "ModelPixelScaleTag",
- 33723: "IptcNaaInfo",
- 33918: "INGR Packet Data Tag",
- 33919: "INGR Flag Registers",
- 33920: "IrasB Transformation Matrix",
- 33922: "ModelTiepointTag",
- 34264: "ModelTransformationTag",
- 34377: "PhotoshopInfo",
- 34735: "GeoKeyDirectoryTag",
- 34736: "GeoDoubleParamsTag",
- 34737: "GeoAsciiParamsTag",
- 34850: "ExposureProgram",
- 34852: "SpectralSensitivity",
- 34855: "ISOSpeedRatings",
- 34856: "OECF",
- 34864: "SensitivityType",
- 34865: "StandardOutputSensitivity",
- 34866: "RecommendedExposureIndex",
- 34867: "ISOSpeed",
- 34868: "ISOSpeedLatitudeyyy",
- 34869: "ISOSpeedLatitudezzz",
- 34908: "HylaFAX FaxRecvParams",
- 34909: "HylaFAX FaxSubAddress",
- 34910: "HylaFAX FaxRecvTime",
- 36864: "ExifVersion",
- 36867: "DateTimeOriginal",
- 36868: "DateTimeDigitized",
- 37121: "ComponentsConfiguration",
- 37122: "CompressedBitsPerPixel",
- 37724: "ImageSourceData",
- 37377: "ShutterSpeedValue",
- 37378: "ApertureValue",
- 37379: "BrightnessValue",
- 37380: "ExposureBiasValue",
- 37381: "MaxApertureValue",
- 37382: "SubjectDistance",
- 37383: "MeteringMode",
- 37384: "LightSource",
- 37385: "Flash",
- 37386: "FocalLength",
- 37396: "SubjectArea",
- 37500: "MakerNote",
- 37510: "UserComment",
- 37520: "SubSec",
- 37521: "SubSecTimeOriginal",
- 37522: "SubsecTimeDigitized",
- 40960: "FlashPixVersion",
- 40961: "ColorSpace",
- 40962: "PixelXDimension",
- 40963: "PixelYDimension",
- 40964: "RelatedSoundFile",
- 40965: "InteroperabilityIFD",
- 41483: "FlashEnergy",
- 41484: "SpatialFrequencyResponse",
- 41486: "FocalPlaneXResolution",
- 41487: "FocalPlaneYResolution",
- 41488: "FocalPlaneResolutionUnit",
- 41492: "SubjectLocation",
- 41493: "ExposureIndex",
- 41495: "SensingMethod",
- 41728: "FileSource",
- 41729: "SceneType",
- 41730: "CFAPattern",
- 41985: "CustomRendered",
- 41986: "ExposureMode",
- 41987: "WhiteBalance",
- 41988: "DigitalZoomRatio",
- 41989: "FocalLengthIn35mmFilm",
- 41990: "SceneCaptureType",
- 41991: "GainControl",
- 41992: "Contrast",
- 41993: "Saturation",
- 41994: "Sharpness",
- 41995: "DeviceSettingDescription",
- 41996: "SubjectDistanceRange",
- 42016: "ImageUniqueID",
- 42032: "CameraOwnerName",
- 42033: "BodySerialNumber",
- 42034: "LensSpecification",
- 42035: "LensMake",
- 42036: "LensModel",
- 42037: "LensSerialNumber",
- 42112: "GDAL_METADATA",
- 42113: "GDAL_NODATA",
- 42240: "Gamma",
- 50215: "Oce Scanjob Description",
- 50216: "Oce Application Selector",
- 50217: "Oce Identification Number",
- 50218: "Oce ImageLogic Characteristics",
- # Adobe DNG
- 50706: "DNGVersion",
- 50707: "DNGBackwardVersion",
- 50708: "UniqueCameraModel",
- 50709: "LocalizedCameraModel",
- 50710: "CFAPlaneColor",
- 50711: "CFALayout",
- 50712: "LinearizationTable",
- 50713: "BlackLevelRepeatDim",
- 50714: "BlackLevel",
- 50715: "BlackLevelDeltaH",
- 50716: "BlackLevelDeltaV",
- 50717: "WhiteLevel",
- 50718: "DefaultScale",
- 50719: "DefaultCropOrigin",
- 50720: "DefaultCropSize",
- 50721: "ColorMatrix1",
- 50722: "ColorMatrix2",
- 50723: "CameraCalibration1",
- 50724: "CameraCalibration2",
- 50725: "ReductionMatrix1",
- 50726: "ReductionMatrix2",
- 50727: "AnalogBalance",
- 50728: "AsShotNeutral",
- 50729: "AsShotWhiteXY",
- 50730: "BaselineExposure",
- 50731: "BaselineNoise",
- 50732: "BaselineSharpness",
- 50733: "BayerGreenSplit",
- 50734: "LinearResponseLimit",
- 50735: "CameraSerialNumber",
- 50736: "LensInfo",
- 50737: "ChromaBlurRadius",
- 50738: "AntiAliasStrength",
- 50740: "DNGPrivateData",
- 50778: "CalibrationIlluminant1",
- 50779: "CalibrationIlluminant2",
- 50784: "Alias Layer Metadata",
-}
-
-
-def _populate():
- for k, v in TAGS_V2.items():
- # Populate legacy structure.
- TAGS[k] = v[0]
- if len(v) == 4:
- for sk, sv in v[3].items():
- TAGS[(k, sv)] = sk
-
- TAGS_V2[k] = TagInfo(k, *v)
-
- for group, tags in TAGS_V2_GROUPS.items():
- for k, v in tags.items():
- tags[k] = TagInfo(k, *v)
-
-
-_populate()
-##
-# Map type numbers to type names -- defined in ImageFileDirectory.
-
-TYPES = {}
-
-# was:
-# TYPES = {
-# 1: "byte",
-# 2: "ascii",
-# 3: "short",
-# 4: "long",
-# 5: "rational",
-# 6: "signed byte",
-# 7: "undefined",
-# 8: "signed short",
-# 9: "signed long",
-# 10: "signed rational",
-# 11: "float",
-# 12: "double",
-# }
-
-#
-# These tags are handled by default in libtiff, without
-# adding to the custom dictionary. From tif_dir.c, searching for
-# case TIFFTAG in the _TIFFVSetField function:
-# Line: item.
-# 148: case TIFFTAG_SUBFILETYPE:
-# 151: case TIFFTAG_IMAGEWIDTH:
-# 154: case TIFFTAG_IMAGELENGTH:
-# 157: case TIFFTAG_BITSPERSAMPLE:
-# 181: case TIFFTAG_COMPRESSION:
-# 202: case TIFFTAG_PHOTOMETRIC:
-# 205: case TIFFTAG_THRESHHOLDING:
-# 208: case TIFFTAG_FILLORDER:
-# 214: case TIFFTAG_ORIENTATION:
-# 221: case TIFFTAG_SAMPLESPERPIXEL:
-# 228: case TIFFTAG_ROWSPERSTRIP:
-# 238: case TIFFTAG_MINSAMPLEVALUE:
-# 241: case TIFFTAG_MAXSAMPLEVALUE:
-# 244: case TIFFTAG_SMINSAMPLEVALUE:
-# 247: case TIFFTAG_SMAXSAMPLEVALUE:
-# 250: case TIFFTAG_XRESOLUTION:
-# 256: case TIFFTAG_YRESOLUTION:
-# 262: case TIFFTAG_PLANARCONFIG:
-# 268: case TIFFTAG_XPOSITION:
-# 271: case TIFFTAG_YPOSITION:
-# 274: case TIFFTAG_RESOLUTIONUNIT:
-# 280: case TIFFTAG_PAGENUMBER:
-# 284: case TIFFTAG_HALFTONEHINTS:
-# 288: case TIFFTAG_COLORMAP:
-# 294: case TIFFTAG_EXTRASAMPLES:
-# 298: case TIFFTAG_MATTEING:
-# 305: case TIFFTAG_TILEWIDTH:
-# 316: case TIFFTAG_TILELENGTH:
-# 327: case TIFFTAG_TILEDEPTH:
-# 333: case TIFFTAG_DATATYPE:
-# 344: case TIFFTAG_SAMPLEFORMAT:
-# 361: case TIFFTAG_IMAGEDEPTH:
-# 364: case TIFFTAG_SUBIFD:
-# 376: case TIFFTAG_YCBCRPOSITIONING:
-# 379: case TIFFTAG_YCBCRSUBSAMPLING:
-# 383: case TIFFTAG_TRANSFERFUNCTION:
-# 389: case TIFFTAG_REFERENCEBLACKWHITE:
-# 393: case TIFFTAG_INKNAMES:
-
-# Following pseudo-tags are also handled by default in libtiff:
-# TIFFTAG_JPEGQUALITY 65537
-
-# some of these are not in our TAGS_V2 dict and were included from tiff.h
-
-# This list also exists in encode.c
-LIBTIFF_CORE = {
- 255,
- 256,
- 257,
- 258,
- 259,
- 262,
- 263,
- 266,
- 274,
- 277,
- 278,
- 280,
- 281,
- 340,
- 341,
- 282,
- 283,
- 284,
- 286,
- 287,
- 296,
- 297,
- 321,
- 320,
- 338,
- 32995,
- 322,
- 323,
- 32998,
- 32996,
- 339,
- 32997,
- 330,
- 531,
- 530,
- 301,
- 532,
- 333,
- # as above
- 269, # this has been in our tests forever, and works
- 65537,
-}
-
-LIBTIFF_CORE.remove(255) # We don't have support for subfiletypes
-LIBTIFF_CORE.remove(322) # We don't have support for writing tiled images with libtiff
-LIBTIFF_CORE.remove(323) # Tiled images
-LIBTIFF_CORE.remove(333) # Ink Names either
-
-# Note to advanced users: There may be combinations of these
-# parameters and values that when added properly, will work and
-# produce valid tiff images that may work in your application.
-# It is safe to add and remove tags from this set from Pillow's point
-# of view so long as you test against libtiff.
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/altair/utils/__init__.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/altair/utils/__init__.py
deleted file mode 100644
index 0bd8ec5e3b566d8a2d43a0904fd49db7862a21eb..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/altair/utils/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from .core import (
- infer_vegalite_type,
- infer_encoding_types,
- sanitize_dataframe,
- parse_shorthand,
- use_signature,
- update_nested,
- display_traceback,
- SchemaBase,
-)
-from .html import spec_to_html
-from .plugin_registry import PluginRegistry
-from .deprecation import AltairDeprecationWarning
-from .schemapi import Undefined
-
-
-__all__ = (
- "infer_vegalite_type",
- "infer_encoding_types",
- "sanitize_dataframe",
- "spec_to_html",
- "parse_shorthand",
- "use_signature",
- "update_nested",
- "display_traceback",
- "AltairDeprecationWarning",
- "SchemaBase",
- "Undefined",
- "PluginRegistry",
-)
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/assdec.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/assdec.c
deleted file mode 100644
index 89d7b51894b267801536ddb16896032f9d2ce7c9..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/assdec.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * SSA/ASS decoder
- * Copyright (c) 2010 Aurelien Jacobs
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include
-
-#include "avcodec.h"
-#include "ass.h"
-#include "codec_internal.h"
-#include "config_components.h"
-#include "libavutil/internal.h"
-#include "libavutil/mem.h"
-
-static av_cold int ass_decode_init(AVCodecContext *avctx)
-{
- avctx->subtitle_header = av_malloc(avctx->extradata_size + 1);
- if (!avctx->subtitle_header)
- return AVERROR(ENOMEM);
- if (avctx->extradata_size)
- memcpy(avctx->subtitle_header, avctx->extradata, avctx->extradata_size);
- avctx->subtitle_header[avctx->extradata_size] = 0;
- avctx->subtitle_header_size = avctx->extradata_size;
- return 0;
-}
-
-static int ass_decode_frame(AVCodecContext *avctx, AVSubtitle *sub,
- int *got_sub_ptr, const AVPacket *avpkt)
-{
- if (avpkt->size <= 0)
- return avpkt->size;
-
- sub->rects = av_malloc(sizeof(*sub->rects));
- if (!sub->rects)
- return AVERROR(ENOMEM);
- sub->rects[0] = av_mallocz(sizeof(*sub->rects[0]));
- if (!sub->rects[0])
- return AVERROR(ENOMEM);
- sub->num_rects = 1;
- sub->rects[0]->type = SUBTITLE_ASS;
- sub->rects[0]->ass = av_strdup(avpkt->data);
- if (!sub->rects[0]->ass)
- return AVERROR(ENOMEM);
- *got_sub_ptr = 1;
- return avpkt->size;
-}
-
-#if CONFIG_SSA_DECODER
-const FFCodec ff_ssa_decoder = {
- .p.name = "ssa",
- CODEC_LONG_NAME("ASS (Advanced SubStation Alpha) subtitle"),
- .p.type = AVMEDIA_TYPE_SUBTITLE,
- .p.id = AV_CODEC_ID_ASS,
- .init = ass_decode_init,
- FF_CODEC_DECODE_SUB_CB(ass_decode_frame),
-};
-#endif
-
-#if CONFIG_ASS_DECODER
-const FFCodec ff_ass_decoder = {
- .p.name = "ass",
- CODEC_LONG_NAME("ASS (Advanced SubStation Alpha) subtitle"),
- .p.type = AVMEDIA_TYPE_SUBTITLE,
- .p.id = AV_CODEC_ID_ASS,
- .init = ass_decode_init,
- FF_CODEC_DECODE_SUB_CB(ass_decode_frame),
-};
-#endif
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/g723_1.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/g723_1.c
deleted file mode 100644
index d19d09dd899be76be1ec36e9528bb87c215403d7..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/g723_1.c
+++ /dev/null
@@ -1,1333 +0,0 @@
-/*
- * G.723.1 compatible decoder
- * Copyright (c) 2006 Benjamin Larsson
- * Copyright (c) 2010 Mohamed Naufal Basheer
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include
-#include
-
-#include "libavutil/common.h"
-#include "libavutil/intmath.h"
-
-#include "acelp_vectors.h"
-#include "celp_math.h"
-#include "g723_1.h"
-
-const int16_t ff_g723_1_cos_tab[COS_TBL_SIZE + 1] = {
- 16384, 16383, 16379, 16373, 16364, 16353, 16340, 16324,
- 16305, 16284, 16261, 16235, 16207, 16176, 16143, 16107,
- 16069, 16029, 15986, 15941, 15893, 15843, 15791, 15736,
- 15679, 15619, 15557, 15493, 15426, 15357, 15286, 15213,
- 15137, 15059, 14978, 14896, 14811, 14724, 14635, 14543,
- 14449, 14354, 14256, 14155, 14053, 13949, 13842, 13733,
- 13623, 13510, 13395, 13279, 13160, 13039, 12916, 12792,
- 12665, 12537, 12406, 12274, 12140, 12004, 11866, 11727,
- 11585, 11442, 11297, 11151, 11003, 10853, 10702, 10549,
- 10394, 10238, 10080, 9921, 9760, 9598, 9434, 9269,
- 9102, 8935, 8765, 8595, 8423, 8250, 8076, 7900,
- 7723, 7545, 7366, 7186, 7005, 6823, 6639, 6455,
- 6270, 6084, 5897, 5708, 5520, 5330, 5139, 4948,
- 4756, 4563, 4370, 4176, 3981, 3786, 3590, 3393,
- 3196, 2999, 2801, 2603, 2404, 2205, 2006, 1806,
- 1606, 1406, 1205, 1005, 804, 603, 402, 201,
- 0, -201, -402, -603, -804, -1005, -1205, -1406,
- -1606, -1806, -2006, -2205, -2404, -2603, -2801, -2999,
- -3196, -3393, -3590, -3786, -3981, -4176, -4370, -4563,
- -4756, -4948, -5139, -5330, -5520, -5708, -5897, -6084,
- -6270, -6455, -6639, -6823, -7005, -7186, -7366, -7545,
- -7723, -7900, -8076, -8250, -8423, -8595, -8765, -8935,
- -9102, -9269, -9434, -9598, -9760, -9921, -10080, -10238,
- -10394, -10549, -10702, -10853, -11003, -11151, -11297, -11442,
- -11585, -11727, -11866, -12004, -12140, -12274, -12406, -12537,
- -12665, -12792, -12916, -13039, -13160, -13279, -13395, -13510,
- -13623, -13733, -13842, -13949, -14053, -14155, -14256, -14354,
- -14449, -14543, -14635, -14724, -14811, -14896, -14978, -15059,
- -15137, -15213, -15286, -15357, -15426, -15493, -15557, -15619,
- -15679, -15736, -15791, -15843, -15893, -15941, -15986, -16029,
- -16069, -16107, -16143, -16176, -16207, -16235, -16261, -16284,
- -16305, -16324, -16340, -16353, -16364, -16373, -16379, -16383,
- -16384, -16383, -16379, -16373, -16364, -16353, -16340, -16324,
- -16305, -16284, -16261, -16235, -16207, -16176, -16143, -16107,
- -16069, -16029, -15986, -15941, -15893, -15843, -15791, -15736,
- -15679, -15619, -15557, -15493, -15426, -15357, -15286, -15213,
- -15137, -15059, -14978, -14896, -14811, -14724, -14635, -14543,
- -14449, -14354, -14256, -14155, -14053, -13949, -13842, -13733,
- -13623, -13510, -13395, -13279, -13160, -13039, -12916, -12792,
- -12665, -12537, -12406, -12274, -12140, -12004, -11866, -11727,
- -11585, -11442, -11297, -11151, -11003, -10853, -10702, -10549,
- -10394, -10238, -10080, -9921, -9760, -9598, -9434, -9269,
- -9102, -8935, -8765, -8595, -8423, -8250, -8076, -7900,
- -7723, -7545, -7366, -7186, -7005, -6823, -6639, -6455,
- -6270, -6084, -5897, -5708, -5520, -5330, -5139, -4948,
- -4756, -4563, -4370, -4176, -3981, -3786, -3590, -3393,
- -3196, -2999, -2801, -2603, -2404, -2205, -2006, -1806,
- -1606, -1406, -1205, -1005, -804, -603, -402, -201,
- 0, 201, 402, 603, 804, 1005, 1205, 1406,
- 1606, 1806, 2006, 2205, 2404, 2603, 2801, 2999,
- 3196, 3393, 3590, 3786, 3981, 4176, 4370, 4563,
- 4756, 4948, 5139, 5330, 5520, 5708, 5897, 6084,
- 6270, 6455, 6639, 6823, 7005, 7186, 7366, 7545,
- 7723, 7900, 8076, 8250, 8423, 8595, 8765, 8935,
- 9102, 9269, 9434, 9598, 9760, 9921, 10080, 10238,
- 10394, 10549, 10702, 10853, 11003, 11151, 11297, 11442,
- 11585, 11727, 11866, 12004, 12140, 12274, 12406, 12537,
- 12665, 12792, 12916, 13039, 13160, 13279, 13395, 13510,
- 13623, 13733, 13842, 13949, 14053, 14155, 14256, 14354,
- 14449, 14543, 14635, 14724, 14811, 14896, 14978, 15059,
- 15137, 15213, 15286, 15357, 15426, 15493, 15557, 15619,
- 15679, 15736, 15791, 15843, 15893, 15941, 15986, 16029,
- 16069, 16107, 16143, 16176, 16207, 16235, 16261, 16284,
- 16305, 16324, 16340, 16353, 16364, 16373, 16379, 16383,
- 16384
-};
-
-const int16_t ff_g723_1_lsp_band0[LSP_CB_SIZE][3] = {
- { 0, 0, 0}, { -270, -1372, -1032}, { -541, -1650, -1382},
- { -723, -2011, -2213}, { -941, -1122, -1942}, { -780, -1145, -2454},
- { -884, -1309, -1373}, {-1051, -1523, -1766}, {-1083, -1622, -2300},
- { -777, -1377, -2147}, { -935, -1467, -2763}, { -802, -1327, -3471},
- { -935, -1959, -3999}, { -240, -89, 222}, { -661, -257, -160},
- { -994, -466, -419}, { -188, -164, -278}, { -342, -512, -415},
- { -607, -511, -797}, { 16, 19, -716}, { 374, 425, -972},
- { -346, 245, -282}, { -265, 506, -754}, { -620, -147, 1955},
- { -742, -860, 2597}, { -150, -352, 2704}, { 305, 880, 1954},
- { 123, 731, 2766}, { -348, 765, 3327}, { 618, 221, 3258},
- { -178, -47, 4219}, { 393, 1304, 3842}, { 698, 1702, 4801},
- { 63, -584, 1229}, { -215, -732, 1704}, { 172, -335, 1909},
- { -2, 216, 1797}, { 353, 127, 2205}, {-1208, 188, 11},
- { -513, -75, -683}, { -973, 222, -646}, { -616, -843, -388},
- { -950, -1113, -359}, {-1431, -623, -705}, {-1398, -1063, -178},
- { -45, -461, 35}, { -9, -657, -216}, { 127, -1078, 95},
- { -950, -1156, 584}, {-1480, -1494, 449}, { -120, -705, 516},
- { -368, -961, 727}, { -378, -526, 973}, { -793, -614, 676},
- { -801, -755, 1287}, {-1476, -340, 1636}, { -505, -1254, 1543},
- {-1243, -1622, 1532}, { -776, -1477, -655}, {-1151, -1296, -823},
- {-1153, -1672, -1124}, {-1291, -2003, -1702}, { -622, -1283, 57},
- { -471, -1611, 509}, {-1060, -1570, -139}, { -873, -2156, -536},
- {-1716, -2021, -364}, {-2150, -3218, -1291}, {-1248, -1945, -2904},
- {-1215, -2633, -2855}, { 167, -244, 84}, { 349, -412, -217},
- { -40, -352, 632}, { 227, -529, 405}, { 68, -383, -443},
- { 167, -558, -706}, { -275, -854, -14}, { -351, -1089, -449},
- { 341, -72, -289}, { 603, -106, -474}, { 322, -219, -649},
- { 179, -317, -998}, { 450, -291, -996}, { 555, 195, -525},
- { 784, 272, -831}, { -148, -384, -849}, { 82, -536, -1357},
- { 238, -172, -1354}, { 422, -268, -1841}, { 297, -737, -2079},
- { -111, -801, -598}, { 1, -668, -984}, { -131, -818, -1299},
- { -329, -521, -1310}, { -151, -778, -1834}, { -93, -352, -1746},
- { -568, -640, -1821}, { -509, -941, -2183}, { 464, -815, -1250},
- { 79, -1133, -1597}, { -184, -1353, -2123}, { -196, -410, -2427},
- { -192, -833, -2810}, { -259, -1382, -3045}, { -217, 4, -1166},
- { -800, -325, -1219}, { -363, -830, -898}, { -661, -1134, -960},
- { -386, -980, -1501}, { -627, -1159, -1722}, { -903, -829, -855},
- { -685, -829, -1313}, {-1065, -959, -1405}, { 441, 25, -847},
- { 655, -27, -1181}, { 1159, -110, -705}, { 856, 253, -1671},
- { 415, 404, -1}, { 322, 903, -398}, { 670, 499, -292},
- { 803, 591, -610}, { 1144, 591, -814}, { 717, 183, 393},
- { 857, 381, 106}, { 609, 62, -27}, { 792, 198, -325},
- { 735, 805, 88}, { 1142, 812, 78}, { 1028, 366, -292},
- { 1309, 743, -237}, { 1615, 589, -79}, { 1010, 639, -243},
- { 999, 964, -311}, { 1500, 1137, -615}, { 988, 357, 646},
- { 1227, 667, 683}, { 1164, 1565, 894}, { 1392, 2015, 477},
- { 1138, 533, 250}, { 1437, 896, 391}, { 1765, 1118, 99},
- { 1112, 1090, 802}, { 1596, 846, 1134}, { 937, 1161, 279},
- { 1719, 1254, 683}, { 1338, 1086, 35}, { 1419, 1324, 428},
- { 1428, 1524, 40}, { 2108, 1594, 89}, { 1015, 544, 1222},
- { 1121, 925, 1263}, { 1030, 1318, 1485}, { 1295, 789, 1817},
- { 1323, 1272, 1909}, { 1724, 1237, 1803}, { 1797, 1689, 858},
- { 2149, 1367, 1301}, { 2302, 1867, 761}, { 2863, 2351, 1053},
- { 52, 163, -76}, { 230, 309, -492}, { -71, 619, 39},
- { -218, 856, 499}, { -654, 736, -207}, { -535, 1259, 155},
- { -480, 1476, 643}, { 262, 1081, 102}, { 309, 1592, -182},
- { 627, 1629, 534}, { 337, 643, 456}, { 758, 670, 713},
- { 202, 1126, 658}, { 612, 1131, 666}, { 686, 1223, 1136},
- { -131, 377, 525}, { 42, 708, 907}, { 87, 1488, 1035},
- { 432, 2117, 904}, { 137, 981, 1332}, { -447, 1014, 1136},
- { -839, 1793, 1246}, { -559, 297, 198}, { -850, 685, 446},
- {-1273, 632, 826}, { -401, -544, 173}, { -753, -793, 144},
- { -436, -9, 772}, { -115, -243, 1310}, { -670, -269, 374},
- {-1027, -13, 639}, { -887, -81, 1137}, {-1277, -455, 158},
- {-1411, -720, 736}, { 172, 88, 403}, { 386, 255, 756},
- { -500, 522, 910}, { -958, 659, 1388}, { -395, 301, 1344},
- { -356, 768, 1813}, { -613, 841, 2419}, { 445, -122, 252},
- { 629, -87, 723}, { 283, -253, 870}, { 456, -116, 1381},
- { 757, 180, 1059}, { 532, 408, 1509}, { 947, 288, 1806},
- { 1325, 994, 2524}, { 892, 1219, 3023}, { 1397, 1596, 3406},
- { 1143, 1552, 2546}, { 1850, 1433, 2710}, { -10, 134, 1002},
- { 154, 499, 1323}, { 508, 792, 1117}, { 509, 1340, 1616},
- { 762, 862, 1608}, { 787, 740, 2320}, { 794, 1727, 1283},
- { 465, 2108, 1660}, { -120, 1451, 1613}, { -386, 2016, 2169},
- { 891, 1225, 2050}, { 456, 1480, 2185}, { 1493, 1283, 1209},
- { 1397, 1636, 1518}, { 1776, 1738, 1552}, { 1572, 1698, 2141},
- { 1389, 2126, 1271}, { 1959, 2413, 1119}, { 1365, 2892, 1505},
- { 2206, 1971, 1623}, { 2076, 1950, 2280}, { 1717, 2291, 1867},
- { 2366, 2515, 1953}, { 2865, 2838, 2522}, { 2535, 3465, 2011},
- { 3381, 4127, 2638}, { 836, 2667, 2289}, { 1761, 2773, 2337},
- { 1415, 3325, 2911}, { 2354, 3138, 3126}, { 2659, 4192, 4010},
- { 1048, 1786, 1818}, { 1242, 2111, 2240}, { 1512, 2079, 2780},
- { 1573, 2491, 3138}, { 2230, 2377, 2782}, { 416, 1773, 2704},
- { 725, 2336, 3297}, { 1252, 2373, 3978}, { 2094, 2268, 3568},
- { 2011, 2712, 4528}, { 1341, 3507, 3876}, { 1216, 3919, 4922},
- { 1693, 4793, 6012}
-};
-
-const int16_t ff_g723_1_lsp_band1[LSP_CB_SIZE][3] = {
- { 0, 0, 0}, {-2114, -1302, 76}, {-2652, -1278, -1368},
- {-2847, -828, -349}, {-3812, -2190, -349}, {-3946, -364, -449},
- {-2725, -4492, -3607}, {-3495, -4764, -1744}, { -51, -756, 84},
- { -153, -1191, 504}, { 108, -1418, 1167}, { -835, -896, 390},
- { -569, -1702, 87}, {-1151, -1818, 933}, {-1826, -2547, 411},
- {-1842, -1818, 1451}, {-2438, -1611, 781}, {-2747, -2477, 1311},
- { -940, 1252, 477}, {-1629, 1688, 602}, {-1202, 617, 280},
- {-1737, 393, 580}, {-1528, 1077, 1199}, {-2165, -161, 1408},
- {-2504, -1087, 2371}, {-3458, -175, 1395}, {-1397, -98, -843},
- {-2252, -177, -1149}, {-1489, -726, -1283}, {-1558, -265, -1744},
- {-1867, -821, -1897}, {-2062, -1516, -2340}, {-2595, -1142, -2861},
- { 170, 46, -819}, { -193, -204, -1151}, { 326, -196, -1532},
- { 780, 329, -816}, { 201, 369, -1243}, { 650, -209, -1060},
- { 1144, -15, -1216}, { 1203, -259, -1867}, { -890, -564, -1430},
- { -638, -852, -1921}, { 177, -739, -1358}, { -261, -526, -1666},
- { 206, -407, -2255}, { 338, -526, -822}, { 421, -1095, -1009},
- { 765, -607, -1408}, { 825, -1295, -2004}, { 357, -905, -1815},
- { -58, -1248, -1588}, { -596, -1436, -2046}, { -73, -1159, -2116},
- { -115, -1382, -2581}, { -160, -1723, -1952}, { -6, -2196, -2954},
- { -649, -1705, -2603}, { -617, -1453, -3282}, { -949, -2019, -3102},
- { -812, 1544, 1937}, {-1854, 574, 2000}, {-1463, 1140, 2649},
- {-2683, 1748, 1452}, {-2486, 2241, 2523}, { 783, 1910, 1435},
- { 581, 2682, 1376}, { 236, 2197, 1885}, { -453, 2943, 2057},
- { -682, 2178, 2565}, {-1342, 3201, 3328}, { -288, -184, 262},
- { 121, -149, -183}, { 758, -412, 206}, { 1038, -204, 853},
- { 1577, -457, 700}, { 937, -640, -567}, { 1508, -528, -1024},
- { -225, -527, -427}, { -564, -1095, -332}, { -742, -353, -186},
- {-1288, -459, 84}, {-1853, -484, -274}, {-1554, -731, 825},
- {-2425, -234, 382}, {-1722, 293, -271}, {-2515, 425, -564},
- {-2599, 818, 464}, { -358, 118, -375}, { -613, 198, -874},
- { -690, 683, -324}, {-1352, 1155, -168}, {-1093, 129, -324},
- {-1184, 611, -858}, { 433, 386, -372}, { -120, 486, -634},
- { 234, 851, -631}, { 602, 128, 46}, { 1099, 410, 159},
- { 715, -145, -424}, { 1198, -85, -593}, { 1390, 367, -358},
- { 1683, 362, -964}, { 1711, 622, 45}, { 2033, 833, -383},
- { 2890, 549, -506}, { 7, 401, 52}, { 72, 811, 415},
- { 566, 668, 41}, { 467, 1218, 130}, { 68, 957, -187},
- { -25, 1649, -103}, { -661, 260, 214}, { -925, -94, 612},
- { -321, -422, 965}, { -788, -672, 1783}, { 400, -673, 779},
- { 741, -595, 1635}, { -161, 307, 657}, { -382, 836, 871},
- { -814, 400, 1223}, { 364, 606, 1247}, { 57, 75, 1571},
- { 151, 471, 2287}, { -81, 1021, 1502}, { 227, 1470, 1097},
- { 658, 1275, 1653}, { 664, 1478, 2377}, { 263, -127, 444},
- { 264, 89, 969}, { 794, 171, 576}, { 821, 186, 1226},
- { 404, 462, 517}, { 339, 918, 794}, { 1280, 1423, 196},
- { 1453, 2019, 365}, { 1615, 1481, 672}, { 2394, 1708, 508},
- { 806, 1238, 573}, { 713, 1158, 1078}, { 1285, 1436, 1232},
- { 1790, 1188, 1141}, { 765, 643, 864}, { 1032, 797, 1279},
- { 900, 563, 1827}, { 1514, 673, 2312}, { 1544, 1129, 3240},
- { 1469, 1050, 1594}, { 1945, 1318, 1988}, { 2397, 2026, 2060},
- { 3538, 2057, 2620}, { 1249, -118, 74}, { 1727, 194, 421},
- { 2078, -50, -463}, { 970, 688, -432}, { 1149, 952, -110},
- { 1254, 1275, -651}, { 1386, 929, 401}, { 1960, 1167, 232},
- { 407, -752, -243}, { 859, -1118, 172}, { -227, -860, -992},
- { -796, -1175, -1380}, { 8, -1282, -388}, { 353, -1781, -1037},
- { -732, -397, -807}, { -853, -28, -1342}, {-1229, -1207, -1959},
- {-1015, -1125, -2543}, {-1452, -1791, -2725}, {-1891, -2416, -3269},
- { -918, -1629, -783}, { -580, -2155, -698}, {-1097, -2364, -96},
- {-1387, -1513, 7}, {-1588, -2076, -664}, {-1473, -2740, -784},
- {-2378, -3149, -56}, {-2856, -2092, -169}, {-3391, -3708, 316},
- {-1176, -890, -614}, {-1944, -1061, -800}, { -299, -1517, -1000},
- { -640, -1850, -1526}, {-1454, -1536, -1233}, {-1890, -1955, -1756},
- {-1086, -1921, -2122}, { -750, -2325, -2260}, {-1325, -2413, -2673},
- {-1114, -2542, -3459}, {-1341, -2901, -3963}, {-1160, -2226, -1393},
- {-1001, -2772, -1573}, {-1594, -2641, -1978}, {-1534, -3046, -2624},
- {-2224, -2196, -675}, {-2807, -3054, -1102}, {-2008, -2840, -1186},
- {-1980, -3332, -1695}, {-1715, -3562, -505}, {-2527, -4000, -1887},
- {-2333, -2734, -2296}, {-3440, -2401, -3211}, {-2008, -3528, -3337},
- {-2247, -3291, -4510}, { -475, 949, 155}, { -149, 1365, 545},
- { -757, 1644, 1083}, { -217, 2053, 1353}, {-1433, 2301, 1462},
- { 495, 1661, 529}, { 10, 2037, 740}, { 2082, 1898, 978},
- { 2831, 2294, 911}, { 842, 793, 420}, { 1223, 1023, 863},
- { 1237, 451, 780}, { 1744, 708, 822}, { 1533, 284, 1384},
- { 2135, 609, 1538}, { 2305, 626, 540}, { 2368, 1187, 955},
- { 2586, 1255, -7}, { 3116, 1131, 726}, { 3431, 1730, 428},
- { 2734, 1648, 1307}, { 2988, 1231, 2010}, { 3523, 2024, 1488},
- { 1034, 1657, 871}, { 1206, 2163, 1036}, { 1807, 2372, 1233},
- { 1808, 1769, 1493}, { 1573, 2332, 1779}, { 1216, 1609, 1866},
- { 1480, 1898, 2513}, { 465, 2708, 2776}, { 771, 3638, 3338},
- { 1869, 2599, 2623}, { 2825, 2745, 2468}, { 2638, 2439, 1585},
- { 2094, 2970, 1308}, { 2022, 3057, 1999}, { 3428, 2912, 1816},
- { 4536, 2974, 2129}, { 1046, 2563, 2086}, { 1363, 3562, 2318},
- { 2511, 1891, 2984}, { 1866, 2306, 3986}, { 3272, 2924, 3682},
- { 3146, 3564, 2272}, { 3592, 3968, 2822}, { 2431, 3369, 3069},
- { 1931, 4709, 3090}, { 2629, 4220, 3986}, { 4639, 4056, 3664},
- { 4035, 5334, 4912}
-};
-
-const int16_t ff_g723_1_lsp_band2[LSP_CB_SIZE][4] = {
- { 0, 0, 0, 0}, { 601, 512, -542, 334},
- { 428, 1087, -484, -132}, { 652, 622, -391, -572},
- { 378, 799, 141, -860}, { 1040, 409, 112, -554},
- { 1123, 670, -75, -847}, { 1421, 494, -315, -1095},
- { 787, 1001, 114, -460}, { 988, 1672, 216, -681},
- { 1007, 1241, -132, -1247}, { 1073, 399, 186, -5},
- { 1262, 193, -694, -129}, { 325, 196, 51, -641},
- { 861, -59, 350, -458}, { 1261, 567, 586, -346},
- { 1532, 885, 210, -517}, { 2027, 937, 113, -792},
- { 1383, 1064, 334, 38}, { 1964, 1468, 459, 133},
- { 2062, 1186, -98, -121}, { 2577, 1445, 506, -373},
- { 2310, 1682, -2, -960}, { 2876, 1939, 765, 138},
- { 3581, 2360, 649, -414}, { 219, 176, -398, -309},
- { 434, -78, -435, -880}, { -344, 301, 265, -552},
- { -915, 470, 657, -380}, { 419, -432, -163, -453},
- { 351, -953, 8, -562}, { 789, -43, 20, -958},
- { 302, -594, -352, -1159}, { 1040, 108, -668, -924},
- { 1333, 210, -1217, -1663}, { 483, 589, -350, -1140},
- { 1003, 824, -802, -1184}, { 745, 58, -589, -1443},
- { 346, 247, -915, -1683}, { 270, 796, -720, -2043},
- { 1208, 722, -222, -193}, { 1486, 1180, -412, -672},
- { 1722, 179, -69, -521}, { 2047, 860, -666, -1410},
- { -146, 222, -281, -805}, { -189, 90, -114, -1307},
- { -152, 1086, -241, -764}, { -439, 733, -601, -1302},
- { -833, -167, -351, -601}, { -856, -422, -411, -1059},
- { -747, -355, -582, -1644}, { -837, 210, -916, -1144},
- {-1800, 32, -878, -1687}, { -48, -23, -1146, 52},
- { -350, -409, -1656, -364}, { 265, -728, -858, -577},
- { 458, -247, -1141, -997}, { 691, -407, -1988, -1161},
- { -66, -104, -705, -1249}, { -431, -93, -1191, -1844},
- { 203, -732, -1000, -1693}, { 10, -832, -1846, -1819},
- { 493, -128, -1436, -1768}, { 488, -311, -1730, -2540},
- { -653, -532, -1150, -1172}, {-1086, -289, -1706, -1533},
- { -699, -1205, -1216, -1766}, {-1032, -1481, -2074, -1523},
- { -721, -1220, -2277, -2600}, { 12, -539, -1484, -1131},
- { -40, -911, -2106, -441}, { -471, -484, -2267, -1549},
- { -141, -988, -3006, -1721}, {-1545, -2102, -583, 342},
- {-1383, -2772, -386, -13}, {-2118, -2589, -1205, 72},
- {-2147, -3231, -965, 390}, {-2949, -3300, -621, 637},
- {-3907, -4138, -865, 803}, {-1287, -845, -375, -548},
- {-1416, -1169, -487, -1277}, {-1400, -1690, -1027, -418},
- {-2018, -1909, -1188, -1260}, {-1418, -2222, -2029, -128},
- {-2067, -2998, -2693, -310}, { -950, -1028, -1538, 185},
- {-1616, -915, -2205, -549}, { 19, -821, -1145, 352},
- { 184, -1175, -1356, -627}, { -547, -1088, -1661, -911},
- { -216, -1502, -2197, -948}, { -795, -1306, -2374, -451},
- { -924, -1889, -2796, -680}, { -600, -1614, -3609, -885},
- {-2392, -2528, 319, 303}, {-2908, -2095, -310, 573},
- {-3460, -2141, 49, -113}, {-2231, -448, 675, -146},
- {-2805, -532, 1231, 479}, {-2684, -486, -200, 611},
- {-3525, -971, -198, 704}, {-3707, 173, 349, 254},
- {-4734, -1447, -34, 880}, { 777, -512, 114, -10},
- { 1250, -66, 442, -5}, { 604, 613, 452, -352},
- { 1224, 777, 675, -1014}, {-1372, -79, -1208, -238},
- {-2389, -17, -1157, -818}, {-1504, -673, -1133, -1060},
- {-1984, -799, -2005, -1973}, {-2037, -798, -1068, -105},
- {-3190, -899, -1817, -194}, { -156, -886, 394, -318},
- { -258, -1283, 551, 202}, { -536, -1729, 910, 331},
- { -847, -1109, 795, -163}, {-1171, -1128, 715, 519},
- {-1080, -1319, 1685, 668}, {-1000, -1921, 96, 211},
- {-1487, -2148, 831, 174}, {-1139, -374, 414, -4},
- {-1517, -1383, 396, -352}, {-1012, 439, -59, -967},
- {-1812, 706, -440, -1030}, {-1971, -329, -34, -827},
- {-2472, -1588, -151, -606}, {-2161, 374, -281, 76},
- {-3012, 231, -15, -690}, { 1104, 566, 721, 209},
- { 1685, 564, 383, 98}, { 1898, 750, 792, -97},
- { 556, -64, 561, -93}, { 876, 162, 913, -22},
- { 961, 675, 1296, 140}, { 756, -396, 851, 544},
- { 360, -303, 1341, 396}, { 878, -22, 1464, 863},
- { -309, -273, 642, -129}, { -686, -82, 842, 454},
- { -5, -47, 1069, 998}, { -94, 967, 1277, 298},
- { -489, 385, 1473, 746}, { -369, -717, 1333, 242},
- { 281, -993, 1726, 924}, { 464, 601, 1575, 1376},
- { -250, 206, 2339, 1175}, { -438, 377, -597, -285},
- {-1020, 787, -790, -287}, { -458, -410, 215, 295},
- { -589, -860, -121, 797}, {-1175, 122, -437, 466},
- {-1480, -121, 367, 924}, { 234, 323, 770, -555},
- { 145, 30, 996, 26}, { 66, 849, 93, -145},
- { -117, 1261, 474, -399}, {-1495, 1051, 218, -506},
- {-1390, 694, 994, 88}, { 616, 7, 78, 304},
- { 1060, 52, -62, 835}, { 833, 454, 649, 1359},
- { -770, 464, 47, 93}, { -574, 1199, -39, 379},
- { 114, -98, 488, 485}, { 727, 244, 606, 696},
- { -76, 455, 671, 546}, { -565, -13, 145, 819},
- { -376, 569, 448, 1128}, { 218, 122, 265, 1167},
- { 230, 738, 932, 1003}, { 138, 477, 36, 450},
- { 404, 787, -73, 1000}, { 497, 1259, 387, 1231},
- { 17, 207, 195, -79}, { 562, 358, 53, -158},
- { 493, 387, 478, 189}, { 678, 831, 640, 558},
- { -197, 523, 613, 57}, { 429, 894, 769, 111},
- { 67, 1174, 568, 511}, { 1242, 824, 251, 840},
- { 1419, 1074, 864, 481}, { 924, 1474, 669, 724},
- { 1539, 1879, 654, 1590}, { 445, 337, 1111, 541},
- { 472, 1421, 1264, 1094}, { 794, 735, 1103, 668},
- { 1055, 863, 1192, 1020}, { 778, 1105, 806, 1798},
- { 1052, 1527, 1587, 2151}, { 881, 1552, 1265, 391},
- { 726, 872, 1812, 601}, { 1469, 280, 1008, 616},
- { 1403, 577, 1803, 1244}, { 1650, 1314, 1148, 1072},
- { 1297, 1669, 1911, 1026}, { 2093, 1044, 2115, 1189},
- { 1644, 1961, 2587, 1512}, { 25, -315, -9, -106},
- { 290, -339, 428, -444}, { -68, -783, 735, 772},
- { 245, -555, 468, 47}, { 334, -895, 814, 146},
- { 235, 368, -964, -959}, { -203, 315, -1566, -1217},
- { 801, 17, -276, -354}, { 894, -495, -789, -635},
- { 716, 291, -1189, -357}, { 560, -260, -733, -2},
- { 679, -508, -1429, 211}, { -51, -62, -428, 557},
- { 322, -638, -211, 614}, { -878, -1057, -84, -71},
- { -388, -1415, -167, -318}, { -754, -1574, 214, -539},
- {-1419, -2004, -92, -787}, { -47, -856, -347, -255},
- { 23, -1211, -173, 320}, { -658, -487, -893, 353},
- { -783, -1587, -584, 507}, {-1420, -859, -378, 441},
- {-2095, -1491, -137, 439}, { -321, -1450, -1288, -12},
- { -359, -2113, -553, -8}, { -831, -1918, -1561, 32},
- {-1014, -2487, -1359, -939}, { -475, -311, -169, -236},
- { -907, -426, 276, -611}, { -96, -400, 50, -710},
- { -426, -1022, -10, -985}, { -197, -258, -744, -575},
- { -611, -930, -771, -394}, { -267, -776, -612, -939},
- { -256, -1346, -802, -1122}, { -796, -1570, -825, -754},
- { 712, 876, 141, 227}, { 981, 1509, 85, 124},
- { 1462, 1228, 979, -39}, { 1734, 999, 1481, 440},
- { 2293, 1116, 769, 440}, { 2504, 1480, 1241, 356},
- { 2474, 1909, 1558, 810}, { 917, 1134, 607, -134},
- { 509, 1809, 781, -123}, { 1712, 1506, 559, -423},
- { 2037, 2317, 726, -155}, { 3031, 2676, 1203, 331},
- { 3664, 3274, 1768, 531}, { 1610, 1839, 867, 183},
- { 1774, 1972, 1538, 97}, { 1822, 2158, 1282, 659},
- { 2222, 2758, 1818, 900}, { 3251, 2124, 1723, 996},
- { 3633, 2336, 2408, 1453}, { 2923, 3517, 2567, 1318},
-};
-
-const int32_t ff_g723_1_combinatorial_table[PULSE_MAX][SUBFRAME_LEN/GRID_SIZE] = {
- {118755, 98280, 80730, 65780, 53130,
- 42504, 33649, 26334, 20349, 15504,
- 11628, 8568, 6188, 4368, 3003,
- 2002, 1287, 792, 462, 252,
- 126, 56, 21, 6, 1,
- 0, 0, 0, 0, 0},
-
- { 23751, 20475, 17550, 14950, 12650,
- 10626, 8855, 7315, 5985, 4845,
- 3876, 3060, 2380, 1820, 1365,
- 1001, 715, 495, 330, 210,
- 126, 70, 35, 15, 5,
- 1, 0, 0, 0, 0},
-
- { 3654, 3276, 2925, 2600, 2300,
- 2024, 1771, 1540, 1330, 1140,
- 969, 816, 680, 560, 455,
- 364, 286, 220, 165, 120,
- 84, 56, 35, 20, 10,
- 4, 1, 0, 0, 0},
-
- { 406, 378, 351, 325, 300,
- 276, 253, 231, 210, 190,
- 171, 153, 136, 120, 105,
- 91, 78, 66, 55, 45,
- 36, 28, 21, 15, 10,
- 6, 3, 1, 0, 0},
-
- { 29, 28, 27, 26, 25,
- 24, 23, 22, 21, 20,
- 19, 18, 17, 16, 15,
- 14, 13, 12, 11, 10,
- 9, 8, 7, 6, 5,
- 4, 3, 2, 1, 0},
-
- { 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1},
-};
-
-const int16_t ff_g723_1_fixed_cb_gain[GAIN_LEVELS] = {
- 1, 2, 3, 4, 6, 9, 13, 18,
- 26, 38, 55, 80, 115, 166, 240, 348,
- 502, 726, 1050, 1517, 2193, 3170, 4582, 6623,
-};
-
-const int16_t ff_g723_1_adaptive_cb_gain85[85 * 20] = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 800, 1496, 167, -256,
- -338, -39, -136, -1, -4, -6, -73, -8,
- -15, 12, 23, 2, 16, 30, 3, -5,
- -462, -686, 493, 2575, 311, -13, -28, -14,
- -404, -5, -19, 13, 20, 72, 107, -77,
- 8, 13, -9, -48, 1483, 144, 784, 928,
- 1243, -134, -1, -37, -52, -94, -13, -71,
- -6, -84, -8, -44, -112, -10, -59, -70,
- -77, 275, 3522, 1056, -1254, 0, -4, -757,
- -68, -95, 1, 16, -59, 4, -17, -227,
- -5, 21, 269, 80, -125, -40, -264, 381,
- 5027, 0, 0, -4, -8, -1542, 0, -2,
- 0, 2, 0, 6, 38, 12, 81, -117,
- 138, 332, 2215, 2574, 1339, -1, -6, -299,
- -404, -109, -2, -18, -44, -21, -52, -348,
- -11, -27, -181, -210, 3685, 2883, -887, 866,
- -1639, -828, -507, -48, -45, -164, -648, 199,
- 156, -194, -152, 46, 368, 288, -88, 86,
- 1396, 2146, 2235, 345, 942, -118, -281, -305,
- -7, -54, -182, -190, -292, -29, -45, -47,
- -80, -123, -128, -19, 13, 4475, 3549, -804,
- -655, 0, -1222, -768, -39, -26, -3, -2,
- -969, 0, 219, 174, 0, 179, 141, -32,
- -724, 254, 242, 6049, 2462, -32, -3, -3,
- -2233, -370, 11, 10, -3, 267, -94, -89,
- 108, -38, -36, -909, 626, -1713, 6121, 4561,
- -1061, -23, -179, -2287, -1270, -68, 65, -233,
- 640, -174, 477, -1704, 40, -111, 396, 295,
- -350, 1391, 7985, 511, -405, -7, -118, -3892,
- -15, -10, 29, 170, -678, 10, -43, -249,
- -8, 34, 197, 12, 3144, -529, 608, 2530,
- 3878, -603, -17, -22, -390, -918, 101, -116,
- 19, -485, 81, -93, -744, 125, -144, -599,
- 2589, -689, 3045, 5603, -404, -409, -29, -566,
- -1916, -10, 108, -481, 128, -885, 235, -1041,
- 63, -17, 75, 138, 3107, 513, 1374, -3594,
- -4922, -589, -16, -115, -788, -1478, -97, -260,
- -43, 681, 112, 301, 933, 154, 413, -1079,
- 2468, 6010, 1107, -390, 1961, -372, -2204, -74,
- -9, -234, -905, -166, -406, 58, 143, 26,
- -295, -719, -132, 46, 4773, 2766, 2368, 4862,
- -4044, -1390, -467, -342, -1443, -998, -806, -690,
- -399, -1416, -821, -702, 1178, 682, 584, 1200,
- 1665, -1879, 1443, 1701, 8562, -169, -215, -127,
- -176, -4475, 190, -146, 165, -172, 195, -149,
- -870, 982, -754, -889, 2716, 9011, -1007, 755,
- -1785, -450, -4956, -61, -34, -194, -1493, 167,
- 554, -125, -415, 46, 296, 982, -109, 82,
- -2727, 7548, 1285, 938, 3420, -453, -3478, -100,
- -53, -714, 1256, 213, -592, 156, -432, -73,
- 569, -1576, -268, -196, 3677, 882, 4050, 1202,
- 2323, -825, -47, -1001, -88, -329, -198, -909,
- -218, -269, -64, -297, -521, -125, -574, -170,
- 2046, -753, 122, 10102, 603, -255, -34, 0,
- -6229, -22, 94, -15, 5, -1261, 464, -75,
- -75, 27, -4, -372, 449, -1815, 10690, 3870,
- -527, -12, -201, -6976, -914, -16, 49, -293,
- 1184, -106, 428, -2525, 14, -58, 344, 124,
- -941, 2352, 5049, 3650, 2637, -54, -337, -1556,
- -813, -424, 135, 290, -725, 209, -524, -1125,
- 151, -378, -812, -587, -1879, 796, 3117, 9569,
- -404, -215, -38, -593, -5589, -9, 91, 357,
- -151, 1097, -464, -1821, -46, 19, 76, 236,
- -1715, 2043, -2096, 9946, 4001, -179, -254, -268,
- -6038, -977, 213, -219, 261, 1041, -1240, 1272,
- 418, -498, 511, -2429, -5772, -618, -3921, 284,
- -3155, -2033, -23, -938, -4, -607, -218, -1381,
- -148, 100, 10, 68, -1111, -119, -755, 54,
- 382, 4748, 8003, -2064, 2198, -8, -1376, -3909,
- -260, -294, -110, -186, -2319, 48, 598, 1008,
- -51, -637, -1073, 277, -867, 3015, 11926, -1675,
- 947, -45, -555, -8681, -171, -54, 159, 631,
- -2195, -88, 308, 1219, 50, -174, -690, 96,
- -4933, -432, 6757, 3771, 1352, -1485, -11, -2786,
- -867, -111, -130, 2034, 178, 1135, 99, -1555,
- 407, 35, -557, -311, 152, 9726, 4231, -1928,
- 1490, -1, -5774, -1092, -226, -135, -90, -39,
- -2511, 17, 1144, 498, -13, -884, -384, 175,
- 2512, 193, 9033, 5361, -3148, -385, -2, -4980,
- -1754, -605, -29, -1385, -106, -822, -63, -2956,
- 482, 37, 1735, 1030, 8464, 2844, 12, 549,
- 2132, -4373, -493, 0, -18, -277, -1469, -6,
- -2, -284, -95, 0, -1101, -370, -1, -71,
- 2141, -2602, 7166, 9046, -1350, -279, -413, -3134,
- -4994, -111, 340, -936, 1138, -1182, 1436, -3957,
- 176, -214, 590, 745, -244, 278, 13307, 1227,
- -161, -3, -4, -10808, -91, -1, 4, 198,
- -226, 18, -20, -997, -2, 2, 131, 12,
- -1947, 8217, 6269, 917, -2559, -231, -4121, -2399,
- -51, -399, 976, 745, -3144, 108, -460, -350,
- -304, 1283, 979, 143, -1810, 2061, -2781, 6056,
- 10058, -200, -259, -472, -2238, -6174, 227, -307,
- 349, 669, -761, 1028, 1111, -1265, 1707, -3717,
- 7827, 9161, -3409, 2473, -1510, -3739, -5122, -709,
- -373, -139, -4376, 1628, 1906, -1181, -1382, 514,
- 721, 844, -314, 228, -1430, 8313, 9541, -2955,
- 1626, -124, -4218, -5556, -533, -161, 725, 832,
- -4841, -257, 1499, 1721, 142, -825, -947, 293,
- 2819, -4247, 5391, 8673, 2756, -485, -1101, -1774,
- -4591, -463, 730, -927, 1397, -1492, 2248, -2854,
- -474, 714, -907, -1459, 141, 14552, 690, 257,
- -112, -1, -12926, -29, -4, 0, -125, -5,
- -613, -2, -228, -10, 0, 99, 4, 1,
- 11938, -1859, 1806, -962, -884, -8699, -211, -199,
- -56, -47, 1355, -1316, 205, 701, -109, 106,
- 644, -100, 97, -51, 3728, 1982, 2264, 4584,
- 3131, -848, -239, -312, -1282, -598, -451, -515,
- -273, -1043, -554, -633, -712, -378, -432, -876,
- -1181, 766, 720, 14303, -216, -85, -35, -31,
- -12486, -2, 55, 51, -33, 1031, -668, -628,
- -15, 10, 9, 189, -4385, 4826, 10112, 1569,
- 3388, -1173, -1421, -6242, -150, -700, 1291, 2706,
- -2979, 420, -462, -969, 906, -998, -2091, -324,
- -448, 1932, 15591, -1842, 657, -12, -227, -14837,
- -207, -26, 52, 427, -1838, -50, 217, 1753,
- 18, -77, -626, 74, -4141, 1844, 3962, 5517,
- 6220, -1046, -207, -958, -1858, -2361, 466, 1001,
- -446, 1394, -621, -1334, 1572, -700, -1504, -2094,
- 729, -2299, 14755, 3657, -952, -32, -322, -13288,
- -816, -55, 102, -656, 2071, -162, 513, -3294,
- 42, -133, 857, 212, -1385, 5801, 13339, -3137,
- 1344, -117, -2054, -10861, -600, -110, 490, 1127,
- -4723, -265, 1111, 2554, 113, -476, -1094, 257,
- 4710, 9661, 1073, -2467, 3274, -1354, -5697, -70,
- -371, -654, -2777, -308, -633, 709, 1455, 161,
- -941, -1930, -214, 493, 1843, -3624, 12422, 6898,
- -1559, -207, -802, -9419, -2904, -148, 407, -1397,
- 2748, -775, 1526, -5230, 175, -344, 1182, 656,
- 1433, 2394, 2507, 1380, 8780, -125, -349, -383,
- -116, -4705, -209, -219, -366, -120, -201, -211,
- -768, -1283, -1343, -740, -1712, 12915, 5883, -2197,
- 991, -179, -10181, -2112, -294, -60, 1350, 615,
- -4638, -229, 1732, 789, 103, -781, -356, 133,
- 15072, 2158, -1245, 910, -496, -13865, -284, -94,
- -50, -15, -1986, 1145, 164, -837, -119, 69,
- 456, 65, -37, 27, 4655, 7319, 4916, 586,
- -3381, -1322, -3270, -1475, -20, -697, -2079, -1396,
- -2196, -166, -261, -175, 960, 1510, 1014, 120,
- 1191, -2140, 5120, 13498, -1418, -86, -279, -1600,
- -11121, -122, 155, -372, 669, -981, 1763, -4218,
- 103, -185, 443, 1168, -1530, -817, 8191, 9632,
- -1452, -143, -40, -4095, -5663, -128, -76, 765,
- 408, 900, 480, -4815, -135, -72, 726, 854,
- -3236, 607, 1696, -2106, 11485, -639, -22, -175,
- -270, -8051, 119, 335, -62, -416, 78, 218,
- 2268, -425, -1189, 1476, 3203, -1903, -837, 9679,
- 7057, -626, -221, -42, -5718, -3039, 372, 163,
- -97, -1892, 1124, 494, -1380, 819, 360, -4169,
- 213, -655, 17015, 620, -384, -2, -26, -17671,
- -23, -9, 8, -221, 681, -8, 24, -644,
- 5, -15, 399, 14, 5088, 35, -3339, 3726,
- 8488, -1580, 0, -680, -847, -4397, -10, 1037,
- 7, -1157, -8, 759, -2636, -18, 1730, -1930,
- -988, 1454, -2688, 15039, 2682, -59, -129, -441,
- -13805, -439, 87, -162, 238, 907, -1335, 2467,
- 161, -238, 440, -2462, -4865, -2842, -53, 5495,
- 6523, -1445, -493, 0, -1843, -2597, -844, -16,
- -9, 1632, 953, 18, 1937, 1131, 21, -2188,
- 3076, 15069, -2914, 1810, -971, -577, -13860, -518,
- -200, -57, -2829, 547, 2680, -339, -1665, 322,
- 182, 893, -172, 107, 1311, 5355, 11054, 2299,
- -3654, -105, -1750, -7458, -322, -814, -428, -885,
- -3613, -184, -751, -1551, 292, 1194, 2465, 512,
- 4035, 5619, 4618, 1815, 1912, -994, -1927, -1301,
- -201, -223, -1384, -1137, -1583, -447, -622, -511,
- -471, -656, -539, -211, -2131, 2754, -4501, 12879,
- 7432, -277, -463, -1236, -10124, -3371, 358, -585,
- 756, 1675, -2165, 3538, 967, -1249, 2042, -5842,
- 5618, -515, 3219, -4149, 4857, -1926, -16, -632,
- -1050, -1440, 176, -1104, 101, 1422, -130, 815,
- -1666, 152, -954, 1230, 1838, -1709, 1139, 16867,
- 716, -206, -178, -79, -17366, -31, 191, -127,
- 118, -1892, 1759, -1173, -80, 74, -49, -737,
- 1978, -3845, 10050, 11854, -2492, -238, -902, -6164,
- -8576, -379, 464, -1213, 2358, -1431, 2782, -7271,
- 301, -585, 1529, 1803, -2600, 11246, 11289, -3647,
- 1463, -412, -7720, -7778, -812, -130, 1784, 1791,
- -7749, -578, 2504, 2513, 232, -1004, -1008, 325,
- 3442, 907, 2725, 8970, 3638, -723, -50, -453,
- -4911, -808, -190, -572, -150, -1884, -496, -1492,
- -764, -201, -605, -1992, -126, 17498, 3481, -2003,
- 1090, 0, -18689, -739, -244, -72, 135, 26,
- -3717, -15, 2139, 425, 8, -1165, -231, 133,
- -1814, 1048, -2164, 4070, 16272, -200, -67, -285,
- -1011, -16160, 116, -239, 138, 450, -260, 537,
- 1801, -1041, 2149, -4042, 9354, 12580, -1883, 962,
- -617, -5341, -9660, -216, -56, -23, -7183, 1075,
- 1446, -549, -738, 110, 352, 474, -71, 36,
- 1708, 4199, 7387, 6335, 1003, -178, -1076, -3330,
- -2449, -61, -437, -770, -1893, -660, -1623, -2856,
- -104, -257, -452, -388, -2624, 5623, 17310, -2353,
- 592, -420, -1930, -18288, -338, -21, 900, 2772,
- -5941, -376, 807, 2486, 94, -203, -625, 85,
- 1211, -850, 1193, -1926, 15992, -89, -44, -86,
- -226, -15609, 62, -88, 61, 142, -100, 140,
- -1182, 830, -1165, 1880, 3983, -2054, 11506, -19,
- 3622, -968, -257, -8080, 0, -801, 499, -2797,
- 1442, 4, -2, 13, -880, 454, -2544, 4,
- -786, -1354, 16092, 7246, -1665, -37, -111, -15805,
- -3205, -169, -65, 772, 1330, 348, 599, -7117,
- -80, -137, 1636, 736, -4316, -511, 6674, 11665,
- 4633, -1137, -15, -2719, -8305, -1310, -134, 1758,
- 208, 3073, 364, -4752, 1220, 144, -1887, -3299,
- 7912, 4557, 1937, 1885, 7037, -3821, -1267, -229,
- -216, -3022, -2200, -935, -538, -910, -524, -222,
- -3398, -1957, -832, -809, 3434, 2967, 5867, 8196,
- 8766, -720, -537, -2101, -4100, -4690, -622, -1230,
- -1062, -1718, -1484, -2935, -1837, -1588, -3139, -4385,
- 5881, 9176, 8119, 3934, 3355, -2111, -5139, -4023,
- -944, -687, -3294, -2914, -4547, -1412, -2203, -1949,
- -1204, -1879, -1662, -805
-};
-
-const int16_t ff_g723_1_adaptive_cb_gain170[170 * 20] = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 776, 212, 715, 670,
- 809, -36, -2, -31, -27, -39, -10, -33,
- -9, -31, -8, -29, -38, -10, -35, -33,
- 1296, 1316, -168, -320, -815, -102, -105, -1,
- -6, -40, -104, 13, 13, 25, 25, -3,
- 64, 65, -8, -15, -589, 680, 2478, 308,
- -596, -21, -28, -375, -5, -21, 24, 89,
- -102, 11, -12, -46, -21, 24, 90, 11,
- -735, -487, -5, 2948, 468, -33, -14, 0,
- -530, -13, -21, 0, 0, 132, 87, 0,
- 21, 13, 0, -84, 1042, 1730, 1068, 333,
- 626, -66, -182, -69, -6, -23, -110, -67,
- -112, -21, -35, -21, -39, -66, -40, -12,
- 486, -769, 4074, 2825, -1107, -14, -36, -1013,
- -487, -74, 22, -120, 191, -83, 132, -702,
- 32, -52, 275, 191, 1521, -767, -124, 4320,
- 1026, -141, -35, 0, -1139, -64, 71, 11,
- -5, -401, 202, 32, -95, 48, 7, -270,
- 2425, 1267, 3439, -91, -1166, -359, -98, -722,
- 0, -83, -187, -509, -266, 13, 7, 19,
- 172, 90, 244, -6, -1251, 975, 173, 4039,
- 2005, -95, -58, -1, -996, -245, 74, 13,
- -10, 308, -240, -42, 153, -119, -21, -494,
- 1820, 632, 1322, 2062, 1031, -202, -24, -106,
- -259, -64, -70, -146, -51, -229, -79, -166,
- -114, -39, -83, -129, -447, 4904, 244, -315,
- -2038, -12, -1467, -3, -6, -253, 134, 6,
- -73, -8, 94, 4, -55, 610, 30, -39,
- -208, -1102, 463, -448, 5653, -2, -74, -13,
- -12, -1950, -14, 5, 31, -5, -30, 12,
- 71, 380, -159, 154, 4739, 2600, -1864, 856,
- -1554, -1371, -412, -212, -44, -147, -752, 539,
- 295, -247, -135, 97, 449, 246, -176, 81,
- 1894, 3533, 35, -26, 2145, -219, -762, 0,
- 0, -280, -408, -4, -7, 3, 5, 0,
- -248, -462, -4, 3, -2699, 1841, 4072, 2443,
- 1582, -444, -207, -1012, -364, -152, 303, 670,
- -457, 402, -274, -607, 260, -177, -393, -236,
- -844, 3358, 6106, -1059, -537, -43, -688, -2275,
- -68, -17, 173, 314, -1251, -54, 217, 395,
- -27, 110, 200, -34, 1251, 1016, 3020, 2210,
- 1445, -95, -63, -556, -298, -127, -77, -230,
- -187, -168, -137, -407, -110, -89, -266, -194,
- 2099, 2277, 4038, 3533, -2870, -269, -316, -995,
- -762, -503, -291, -517, -561, -452, -491, -871,
- 367, 399, 707, 619, 400, -1114, 8516, 2422,
- -1117, -9, -75, -4426, -358, -76, 27, -208,
- 579, -59, 164, -1259, 27, -75, 580, 165,
- -4398, -2011, 3912, -2407, 2258, -1180, -247, -934,
- -353, -311, -540, 1050, 480, -646, -295, 575,
- 606, 277, -539, 331, 1767, -1447, 4240, 6160,
- -757, -190, -127, -1097, -2316, -35, 156, -457,
- 374, -664, 544, -1594, 81, -66, 195, 284,
- 1594, -1463, 1035, 6938, 1920, -155, -130, -65,
- -2938, -225, 142, -100, 92, -675, 619, -438,
- -186, 171, -121, -813, -562, 4716, 4085, -591,
- 2421, -19, -1357, -1018, -21, -357, 162, 140,
- -1175, -20, 170, 147, 83, -696, -603, 87,
- 1552, 8778, -935, 354, -1424, -147, -4703, -53,
- -7, -123, -831, 88, 501, -33, -189, 20,
- 134, 763, -81, 30, 4831, -4431, 41, -1479,
- -2976, -1424, -1198, 0, -133, -540, 1306, -12,
- 11, 436, -400, 3, 877, -804, 7, -268,
- 2090, 1192, 1006, 1645, 4853, -266, -86, -61,
- -165, -1437, -152, -128, -73, -210, -119, -101,
- -619, -353, -298, -487, 2386, 5712, 1426, -94,
- 1350, -347, -1991, -124, 0, -111, -832, -207,
- -497, 13, 32, 8, -196, -470, -117, 7,
- -1349, 1091, 1659, 8891, 313, -111, -72, -168,
- -4825, -5, 89, 136, -110, 732, -592, -900,
- 25, -20, -31, -170, 9980, 916, -381, -808,
- 88, -6080, -51, -8, -39, 0, -558, 232,
- 21, 492, 45, -18, -53, -4, 2, 4,
- 2338, -1031, -248, 3928, 6484, -333, -64, -3,
- -942, -2566, 147, 35, -15, -560, 247, 59,
- -925, 408, 98, -1555, 6166, -1240, -337, 3672,
- -1277, -2320, -93, -6, -823, -99, 466, 126,
- -25, -1382, 278, 75, 480, -96, -26, 286,
- 4377, -132, -2588, 1701, 4865, -1169, -1, -409,
- -176, -1444, 35, 691, -20, -454, 13, 268,
- -1299, 39, 768, -505, 2594, 3295, 3944, 1481,
- 682, -410, -662, -949, -133, -28, -521, -624,
- -793, -234, -297, -356, -108, -137, -164, -61,
- 4151, 624, 815, 4485, 2229, -1052, -23, -40,
- -1228, -303, -158, -206, -31, -1136, -170, -223,
- -565, -84, -111, -610, -3575, -361, 4924, 2791,
- 4698, -780, -7, -1480, -475, -1347, -78, 1074,
- 108, 609, 61, -839, 1025, 103, -1412, -800,
- -2518, 3791, 8623, 315, 2465, -387, -877, -4538,
- -6, -370, 582, 1325, -1995, 48, -73, -166,
- 378, -570, -1297, -47, -691, 2989, 9957, -421,
- -1142, -29, -545, -6051, -10, -79, 126, 420,
- -1817, -17, 76, 256, -48, 208, 694, -29,
- -1918, 104, -3190, -3410, -4440, -224, 0, -621,
- -709, -1203, 12, -373, 20, -399, 21, -664,
- -519, 28, -864, -924, -3359, -1668, 1854, 6939,
- 1430, -688, -169, -209, -2939, -124, -341, 380,
- 188, 1422, 706, -785, 293, 145, -161, -606,
- 42, 9706, 3164, -952, 907, 0, -5750, -611,
- -55, -50, -25, -8, -1874, 2, 564, 183,
- -2, -537, -175, 52, 1607, 785, 2862, 4327,
- 3307, -157, -37, -500, -1143, -667, -77, -280,
- -137, -424, -207, -756, -324, -158, -577, -873,
- 6801, 3416, 2227, 1682, -3217, -2823, -712, -302,
- -172, -631, -1418, -924, -464, -698, -350, -228,
- 1335, 670, 437, 330, 3459, 3898, 364, 7841,
- -2640, -730, -927, -8, -3753, -425, -823, -76,
- -86, -1655, -1865, -174, 557, 628, 58, 1263,
- -5902, -3458, -2465, -1886, 4334, -2126, -730, -371,
- -217, -1146, -1245, -888, -520, -679, -398, -283,
- 1561, 915, 652, 499, -3710, 1133, 7849, 3443,
- -215, -840, -78, -3760, -723, -2, 256, 1777,
- -543, 779, -238, -1649, -48, 14, 103, 45,
- 4132, 2828, 2, -4212, -4116, -1042, -488, 0,
- -1083, -1034, -713, 0, 0, 1062, 727, 0,
- 1038, 710, 0, -1058, 5875, 8496, -1796, 1376,
- -1786, -2107, -4406, -197, -115, -194, -3047, 644,
- 931, -493, -713, 150, 640, 926, -195, 150,
- 3143, 3483, 3546, -793, 4489, -603, -740, -767,
- -38, -1230, -668, -680, -754, 152, 168, 171,
- -861, -954, -971, 217, 2845, 7965, 3695, -5432,
- 3978, -494, -3873, -833, -1801, -966, -1383, -641,
- -1796, 943, 2641, 1225, -691, -1934, -897, 1319,
- 1538, 150, 7139, 2049, 3097, -144, -1, -3110,
- -256, -585, -14, -670, -65, -192, -18, -892,
- -290, -28, -1349, -387, 618, 7520, 4729, -238,
- -3373, -23, -3452, -1365, -3, -694, -283, -178,
- -2170, 8, 109, 68, 127, 1548, 973, -49,
- 2965, -3013, 7912, 7076, -1997, -536, -554, -3821,
- -3056, -243, 545, -1431, 1455, -1280, 1301, -3417,
- 361, -367, 964, 862, 2443, -929, -1113, 9677,
- 4138, -364, -52, -75, -5716, -1045, 138, 166,
- -63, -1443, 549, 657, -617, 234, 281, -2444,
- 1966, 3309, 10085, -3399, 2105, -236, -668, -6207,
- -705, -270, -397, -1210, -2037, 408, 686, 2092,
- -252, -425, -1295, 436, -112, -1368, 8868, 4822,
- 2048, 0, -114, -4800, -1419, -256, -9, 61,
- 740, 33, 402, -2610, 14, 171, -1108, -602,
- -2597, 438, -1839, 6229, 7266, -411, -11, -206,
- -2368, -3223, 69, -291, 49, 987, -166, 699,
- 1152, -194, 816, -2763, 3454, 553, 9127, 4946,
- -5596, -728, -18, -5084, -1493, -1911, -116, -1924,
- -308, -1042, -166, -2755, 1179, 188, 3117, 1689,
- -532, -663, 12262, 2495, -1004, -17, -26, -9177,
- -380, -61, -21, 398, 496, 81, 101, -1867,
- -32, -40, 751, 152, -2100, 1317, -1509, 11425,
- 2997, -269, -105, -139, -7967, -548, 168, -193,
- 121, 1464, -918, 1052, 384, -240, 276, -2090,
- 1193, -2697, 11259, 5373, -763, -86, -444, -7737,
- -1762, -35, 196, -819, 1853, -391, 884, -3692,
- 55, -125, 525, 250, 2405, -471, 11079, 203,
- 782, -353, -13, -7491, -2, -37, 69, -1626,
- 318, -29, 5, -137, -114, 22, -529, -9,
- -1871, 5685, 11290, -2662, 1353, -213, -1972, -7780,
- -432, -111, 649, 1289, -3917, -304, 923, 1834,
- 154, -469, -932, 220, -3768, 5927, -3093, 5041,
- 5212, -866, -2144, -584, -1551, -1658, 1363, -711,
- 1119, 1159, -1824, 951, 1198, -1885, 984, -1603,
- -2546, 9502, 5969, -2440, 1928, -395, -5511, -2175,
- -363, -226, 1477, 927, -3462, -379, 1415, 889,
- 299, -1118, -702, 287, -4963, 3568, 4592, 5508,
- 3451, -1503, -777, -1287, -1851, -727, 1080, 1391,
- -1000, 1668, -1199, -1543, 1045, -751, -967, -1160,
- 1745, -2586, 3983, 10899, -1551, -186, -408, -968,
- -7250, -146, 275, -424, 628, -1161, 1720, -2649,
- 165, -244, 377, 1032, 867, -456, -727, 3369,
- 11822, -45, -12, -32, -692, -8531, 24, 38,
- -20, -178, 93, 149, -625, 329, 525, -2431,
- 7535, 2422, 1926, 1405, 1599, -3466, -358, -226,
- -120, -156, -1114, -886, -284, -646, -207, -165,
- -735, -236, -188, -137, 1041, -735, -142, 13209,
- 1515, -66, -33, -1, -10649, -140, 46, 9,
- -6, -839, 593, 114, -96, 68, 13, -1222,
- 7950, 6745, -1444, -1008, 2721, -3857, -2777, -127,
- -62, -452, -3273, 700, 594, 489, 415, -88,
- -1320, -1120, 239, 167, -4754, -1379, 4522, -578,
- -5733, -1379, -116, -1248, -20, -2006, -400, 1312,
- 380, -167, -48, 159, -1663, -482, 1582, -202,
- 3220, 5978, 5923, 2430, -2689, -633, -2181, -2141,
- -360, -441, -1175, -1164, -2161, -477, -886, -878,
- 528, 981, 972, 398, 377, 1312, 13978, -1470,
- 677, -8, -105, -11925, -132, -28, -30, -321,
- -1119, 33, 117, 1254, -15, -54, -577, 60,
- -3435, 6770, 314, -885, 5686, -720, -2797, -6,
- -47, -1973, 1419, 65, -129, -185, 366, 16,
- 1192, -2349, -109, 307, 3171, 8774, -2260, 2679,
- 3069, -613, -4699, -312, -438, -575, -1698, 437,
- 1210, -518, -1435, 369, -594, -1643, 423, -501,
- 5557, 1509, 5407, -125, -7386, -1884, -139, -1784,
- 0, -3330, -511, -1834, -498, 42, 11, 41,
- 2505, 680, 2438, -56, -2838, 2595, 13228, 271,
- 1793, -491, -411, -10680, -4, -196, 449, 2291,
- -2095, 47, -42, -219, 310, -284, -1447, -29,
- 664, -278, 14966, 951, -711, -26, -4, -13672,
- -55, -30, 11, -606, 253, -38, 16, -869,
- 28, -12, 650, 41, 808, 1770, 8658, 5863,
- -1486, -39, -191, -4576, -2098, -134, -87, -427,
- -935, -289, -633, -3098, 73, 160, 785, 531,
- 3063, 1539, 2000, -542, 9576, -572, -144, -244,
- -17, -5597, -287, -374, -188, 101, 51, 66,
- -1790, -900, -1169, 317, 514, 14083, -323, 896,
- -891, -16, -12106, -6, -49, -48, -442, 10,
- 277, -28, -770, 17, 27, 766, -17, 48,
- 892, 158, 5237, 11057, -1603, -48, -1, -1674,
- -7462, -156, -8, -285, -50, -602, -106, -3534,
- 87, 15, 512, 1082, -1612, 2564, -4296, 12526,
- 5710, -158, -401, -1126, -9576, -1990, 252, -422,
- 672, 1232, -1960, 3284, 561, -893, 1497, -4365,
- 4889, -6878, 612, 6109, 4753, -1459, -2887, -22,
- -2277, -1379, 2052, -182, 257, -1823, 2564, -228,
- -1418, 1995, -177, -1772, 3053, -506, 2403, 9625,
- 1322, -569, -15, -352, -5655, -106, 94, -448,
- 74, -1794, 297, -1412, -246, 40, -194, -777,
- -754, 12904, 4480, -2113, 1471, -34, -10163, -1225,
- -272, -132, 594, 206, -3529, -97, 1664, 577,
- 67, -1159, -402, 189, 4255, 1476, 5055, 2393,
- 2912, -1105, -132, -1559, -349, -517, -383, -1313,
- -455, -621, -215, -738, -756, -262, -898, -425,
- -1371, 535, 1417, 14604, -997, -114, -17, -122,
- -13017, -60, 44, 118, -46, 1222, -477, -1263,
- -83, 32, 86, 888, 5368, -1744, 4083, -1236,
- 3753, -1758, -185, -1017, -93, -860, 571, -1338,
- 434, 405, -131, 308, -1229, 399, -935, 283,
- 1588, -3097, 14415, 3699, -1171, -154, -585, -12683,
- -835, -83, 300, -1397, 2725, -358, 699, -3255,
- 113, -221, 1030, 264, 212, 7989, 9471, -3344,
- 2009, -2, -3895, -5475, -682, -246, -103, -123,
- -4618, 43, 1630, 1933, -26, -979, -1161, 410,
- 856, 2294, -627, 6930, 6929, -44, -321, -24,
- -2931, -2930, -119, 32, 87, -362, -970, 265,
- -362, -970, 265, -2931, 2357, -4187, 7162, 7683,
- 3371, -339, -1070, -3131, -3603, -693, 602, -1030,
- 1830, -1105, 1963, -3359, -485, 861, -1474, -1581,
- 350, 4585, 14053, -3819, 1218, -7, -1283, -12054,
- -890, -90, -97, -300, -3933, 81, 1068, 3275,
- -26, -341, -1045, 284, -3248, 3531, 475, 2137,
- 11711, -644, -761, -13, -278, -8372, 700, 94,
- -102, 423, -460, -62, 2322, -2524, -340, -1528,
- -3017, 3852, 1725, 8440, 5257, -555, -905, -181,
- -4348, -1686, 709, 317, -405, 1554, -1984, -889,
- 968, -1236, -553, -2708, -909, 3196, 15512, -2528,
- 1066, -50, -623, -14686, -390, -69, 177, 861,
- -3026, -140, 493, 2393, 59, -208, -1009, 164,
- 959, -3370, 9617, 9545, -1761, -56, -693, -5645,
- -5561, -189, 197, -563, 1978, -558, 1963, -5603,
- 103, -362, 1034, 1026, 7575, 11796, -4845, 3252,
- -1703, -3502, -8493, -1433, -645, -177, -5454, 2240,
- 3488, -1503, -2341, 961, 787, 1226, -503, 338,
- 6409, 1722, 1764, -4191, 6015, -2507, -181, -189,
- -1072, -2208, -673, -690, -185, 1639, 440, 451,
- -2353, -632, -647, 1538, -2420, 12161, 5038, 1286,
- -2098, -357, -9027, -1549, -100, -268, 1796, 744,
- -3740, 190, -954, -395, -310, 1557, 645, 164,
- -2232, -1341, 7246, 9470, -1977, -304, -109, -3204,
- -5474, -238, -182, 987, 593, 1290, 775, -4188,
- -269, -161, 874, 1143, 1030, 7034, 4231, 1551,
- 3077, -64, -3019, -1093, -146, -577, -442, -266,
- -1816, -97, -666, -400, -193, -1321, -794, -291,
- 5121, 11835, -477, -1749, 2298, -1601, -8549, -13,
- -186, -322, -3699, 149, 344, 546, 1264, -50,
- -718, -1660, 66, 245, -3328, 3827, 5921, 9976,
- -1045, -676, -894, -2140, -6075, -66, 777, 1203,
- -1383, 2027, -2330, -3605, -212, 244, 377, 636,
- 3813, 5718, -4666, -3412, 5674, -887, -1995, -1329,
- -710, -1965, -1331, 1086, 1628, 794, 1191, -972,
- -1320, -1980, 1616, 1181, 1348, -3672, 13154, 6938,
- -1690, -110, -823, -10561, -2938, -174, 302, -1082,
- 2948, -570, 1555, -5570, 139, -379, 1357, 716,
- 2151, -3586, 6949, 12131, -1224, -282, -785, -2947,
- -8982, -91, 470, -912, 1521, -1592, 2655, -5145,
- 160, -268, 519, 906, -2889, 9647, 10276, -2728,
- 995, -509, -5680, -6445, -454, -60, 1701, 1812,
- -6051, -481, 1606, 1711, 175, -586, -624, 165,
- 6177, 2184, 555, 1985, 6589, -2329, -291, -18,
- -240, -2650, -823, -209, -74, -748, -264, -67,
- -2484, -878, -223, -798, -492, 391, 17166, -681,
- 240, -14, -9, -17987, -28, -3, 11, 515,
- -410, -20, 16, 713, 7, -5, -252, 10,
- 12628, 5448, -2630, 3011, -2695, -9733, -1811, -422,
- -553, -443, -4199, 2027, 874, -2321, -1001, 483,
- 2077, 896, -432, 495, -3628, -534, 3447, 7002,
- 6751, -803, -17, -725, -2992, -2782, -118, 763,
- 112, 1550, 228, -1473, 1495, 220, -1420, -2885,
- -5239, 5901, 8107, 3650, 4846, -1675, -2125, -4012,
- -813, -1433, 1887, 2592, -2920, 1167, -1315, -1806,
- 1550, -1745, -2398, -1080, 6157, 6678, 4099, -1074,
- 2348, -2314, -2722, -1025, -70, -336, -2509, -1540,
- -1670, 403, 437, 268, -882, -957, -587, 153,
- 1079, 16099, 242, -881, 1690, -71, -15820, -3,
- -47, -174, -1060, -16, -238, 58, 865, 13,
- -111, -1661, -25, 90, -278, 227, -1039, 1636,
- 16945, -4, -3, -65, -163, -17526, 3, -17,
- 14, 27, -22, 103, 287, -234, 1074, -1693,
- 15778, -1454, 574, -603, -107, -15195, -129, -20,
- -22, 0, 1400, -553, 51, 581, -53, 21,
- 103, -9, 3, -3, 2406, -836, 13224, 7993,
- -4266, -353, -42, -10673, -3899, -1111, 122, -1942,
- 674, -1174, 407, -6451, 626, -217, 3443, 2081,
- 3184, 14368, -3336, 2255, -1801, -619, -12600, -679,
- -310, -198, -2793, 648, 2926, -438, -1977, 459,
- 350, 1580, -366, 247, -1698, 17076, 2504, -539,
- -646, -176, -17798, -382, -17, -25, 1770, 259,
- -2610, -55, 561, 82, -67, 673, 98, -21,
- 2375, -797, -2696, 14483, 5383, -344, -38, -443,
- -12803, -1769, 115, 391, -131, -2100, 705, 2384,
- -780, 262, 886, -4759, -2691, 2554, -4520, 9573,
- 10655, -442, -398, -1247, -5594, -6930, 419, -742,
- 704, 1572, -1492, 2641, 1750, -1661, 2939, -6226,
- -4332, -4399, -1657, 4880, 7375, -1145, -1181, -167,
- -1453, -3319, -1163, -438, -444, 1290, 1310, 493,
- 1950, 1980, 745, -2196, -3498, 7405, 9955, 2693,
- -2971, -746, -3347, -6049, -442, -538, 1581, 2125,
- -4499, 575, -1217, -1636, -634, 1342, 1805, 488,
- 6717, -3792, 7739, 2798, 3489, -2754, -877, -3655,
- -477, -743, 1554, -3173, 1791, -1147, 647, -1321,
- -1430, 807, -1648, -595, 5263, 9770, 3463, 1069,
- -3971, -1690, -5826, -732, -69, -962, -3138, -1112,
- -2065, -343, -637, -226, 1275, 2368, 839, 259,
- 1243, -2634, 16772, 1871, 332, -94, -423, -17169,
- -213, -6, 199, -1273, 2696, -142, 300, -1915,
- -25, 53, -339, -37, 2691, 2836, 3105, 5711,
- 4817, -442, -491, -588, -1991, -1416, -465, -510,
- -537, -938, -988, -1082, -791, -834, -913, -1679,
- 4366, 2944, 7210, 3627, 1161, -1163, -529, -3172,
- -803, -82, -784, -1921, -1295, -966, -651, -1596,
- -309, -208, -511, -257, 13888, 3951, -671, -2305,
- 3354, -11773, -953, -27, -324, -686, -3349, 569,
- 161, 1954, 556, -94, -2843, -809, 137, 472,
- 7053, 5847, 2929, 8378, -4794, -3036, -2086, -523,
- -4284, -1403, -2517, -1261, -1045, -3607, -2990, -1498,
- 2064, 1711, 857, 2451, -2191, 12838, 9182, -3915,
- 1617, -293, -10059, -5146, -935, -159, 1717, 1228,
- -7195, -523, 3068, 2194, 216, -1267, -906, 386,
- -4881, 13114, 5767, -435, 4155, -1454, -10498, -2030,
- -11, -1054, 3907, 1718, -4616, -129, 348, 153,
- 1238, -3326, -1462, 110, 7843, -1250, 210, 7106,
- -5203, -3754, -95, -2, -3082, -1652, 598, -100,
- 16, -3402, 542, -91, 2491, -397, 66, 2257,
- -2463, 8168, 14551, -3908, 1828, -370, -4072, -12923,
- -932, -204, 1228, 2188, -7254, -587, 1948, 3471,
- 274, -911, -1623, 436, -1579, 347, -272, -2735,
- 16031, -152, -7, -4, -456, -15686, 33, -26,
- 5, -263, 58, -45, 1545, -340, 266, 2676,
- -6327, 1328, 5093, -5079, 7617, -2443, -107, -1583,
- -1574, -3541, 513, 1967, -413, -1961, 411, 1578,
- 2941, -617, -2367, 2361, 3286, -4509, 11306, 11025,
- -2623, -659, -1241, -7802, -7419, -420, 904, -2267,
- 3112, -2211, 3034, -7608, 526, -722, 1810, 1765,
- 5567, 17853, -3754, 1166, -519, -1892, -19455, -860,
- -83, -16, -6067, 1275, 4090, -396, -1271, 267,
- 176, 566, -119, 37, -2136, -424, 15292, 5108,
- -1648, -278, -10, -14273, -1593, -165, -55, 1993,
- 396, 666, 132, -4768, -214, -42, 1538, 514,
- 2267, -3297, 2549, 16563, -791, -313, -663, -396,
- -16745, -38, 456, -352, 513, -2291, 3333, -2576,
- 109, -159, 123, 799, 3655, 1899, -3364, 6279,
- 12510, -815, -220, -690, -2406, -9552, -423, 750,
- 390, -1400, -728, 1289, -2791, -1450, 2568, -4794,
- 8052, 2285, -6193, 5138, 6003, -3957, -318, -2341,
- -1611, -2199, -1123, 3044, 864, -2525, -716, 1942,
- -2950, -837, 2269, -1882, -386, -2291, 7679, 15387,
- -2723, -9, -320, -3599, -14452, -452, -54, 181,
- 1074, 362, 2152, -7212, -64, -380, 1276, 2557,
- 2777, -1173, 3984, 13079, 2508, -470, -84, -969,
- -10440, -384, 198, -675, 285, -2217, 936, -3180,
- -425, 179, -610, -2002, -1879, 1771, -2684, 16705,
- 1833, -215, -191, -439, -17032, -205, 203, -308,
- 290, 1916, -1805, 2736, 210, -198, 300, -1869,
- 1052, 4495, 15519, 1467, -4032, -67, -1233, -14700,
- -131, -992, -288, -997, -4257, -94, -402, -1389,
- 259, 1106, 3819, 361, 3010, 2544, 6969, 7559,
- 1996, -553, -395, -2964, -3487, -243, -467, -1280,
- -1082, -1388, -1174, -3215, -366, -310, -849, -921,
- -5209, -1867, 8713, 10351, 1549, -1656, -212, -4634,
- -6540, -146, -593, 2770, 993, 3291, 1180, -5505,
- 492, 176, -824, -979, -4314, 8513, 913, 7547,
- -2723, -1135, -4423, -50, -3476, -452, 2241, 240,
- -474, 1987, -3921, -420, -717, 1415, 151, 1254,
- 12929, -1219, 2448, 1757, 6303, -10204, -90, -365,
- -188, -2425, 962, -1932, 182, -1386, 130, -262,
- -4974, 469, -941, -676, 6465, 4132, 3167, 3160,
- 5697, -2551, -1042, -612, -609, -1981, -1630, -1249,
- -798, -1247, -797, -611, -2248, -1437, -1101, -1099,
- -3636, 4859, 18914, -1335, 810, -807, -1441, -21836,
- -108, -40, 1078, 4198, -5609, -296, 396, 1541,
- 179, -240, -936, 66, 8844, 7864, 654, -4063,
- -5680, -4774, -3774, -26, -1007, -1969, -4245, -353,
- -314, 2193, 1950, 162, 3066, 2726, 226, -1408,
- 1859, 2634, 9228, 996, 9464, -211, -423, -5197,
- -60, -5467, -299, -1047, -1483, -113, -160, -561,
- -1074, -1521, -5330, -575, 2949, 12260, 10290, -497,
- -3943, -530, -9174, -6463, -15, -949, -2206, -1852,
- -7700, 89, 372, 312, 709, 2950, 2476, -119,
- -2903, 1552, 14867, 9970, -496, -514, -147, -13491,
- -6068, -15, 275, 2634, -1408, 1766, -944, -9047,
- -87, 47, 450, 302, 3243, 8234, 7586, 3373,
- 2151, -642, -4138, -3512, -694, -282, -1630, -1501,
- -3812, -667, -1695, -1561, -425, -1081, -996, -442,
- -9631, 60, 3501, 5359, 10150, -5662, 0, -748,
- -1752, -6288, 35, 2058, -12, 3150, -19, -1145,
- 5967, -37, -2169, -3320, -6874, -2553, -5446, -2195,
- -7841, -2884, -397, -1810, -294, -3753, -1071, -2285,
- -848, -921, -342, -729, -3290, -1221, -2606, -1050,
- -3413, -1141, 4630, 13612, 7897, -711, -79, -1308,
- -11310, -3806, -237, 964, 322, 2836, 948, -3847,
- 1645, 550, -2231, -6561, 4410, -5678, 8006, -3992,
- 3811, -1187, -1968, -3912, -973, -886, 1528, -2155,
- 2775, 1074, -1383, 1951, -1025, 1321, -1862, 928,
- 5659, 11535, 2203, -452, 7169, -1954, -8121, -296,
- -12, -3137, -3984, -761, -1551, 156, 318, 60,
- -2476, -5048, -964, 197, 2914, -2914, 3485, -3965,
- 13675, -518, -518, -741, -959, -11414, 518, -620,
- 620, 705, -705, 843, -2433, 2432, -2909, 3310,
- 7843, 1907, 1022, 8882, 7972, -3755, -222, -63,
- -4815, -3879, -913, -489, -119, -4252, -1034, -554,
- -3816, -928, -497, -4322, 13807, 9531, 1436, 1612,
- 1779, -11636, -5544, -125, -158, -193, -8032, -1210,
- -835, -1358, -938, -141, -1499, -1035, -156, -175,
- 13620, -5337, 5450, -2263, 1723, -11322, -1738, -1813,
- -312, -181, 4436, -4531, 1775, 1881, -737, 752,
- -1432, 561, -573, 238, 5297, 8374, 8872, 7694,
- 6538, -1712, -4280, -4804, -3613, -2609, -2707, -2868,
- -4534, -2487, -3932, -4166, -2113, -3341, -3540, -3070
-};
-
-int ff_g723_1_scale_vector(int16_t *dst, const int16_t *vector, int length)
-{
- int bits, max = 0;
- int i;
-
- for (i = 0; i < length; i++)
- max |= FFABS(vector[i]);
-
- bits= 14 - av_log2_16bit(max);
- bits= FFMAX(bits, 0);
-
- for (i = 0; i < length; i++)
- dst[i] = (vector[i] * (1 << bits)) >> 3;
-
- return bits - 3;
-}
-
-int ff_g723_1_normalize_bits(int num, int width)
-{
- return width - av_log2(num) - 1;
-}
-
-int ff_g723_1_dot_product(const int16_t *a, const int16_t *b, int length)
-{
- int sum = ff_dot_product(a, b, length);
- return av_sat_add32(sum, sum);
-}
-
-void ff_g723_1_get_residual(int16_t *residual, int16_t *prev_excitation,
- int lag)
-{
- int offset = PITCH_MAX - PITCH_ORDER / 2 - lag;
- int i;
-
- residual[0] = prev_excitation[offset];
- residual[1] = prev_excitation[offset + 1];
-
- offset += 2;
- for (i = 2; i < SUBFRAME_LEN + PITCH_ORDER - 1; i++)
- residual[i] = prev_excitation[offset + (i - 2) % lag];
-}
-
-void ff_g723_1_gen_dirac_train(int16_t *buf, int pitch_lag)
-{
- int16_t vector[SUBFRAME_LEN];
- int i, j;
-
- memcpy(vector, buf, SUBFRAME_LEN * sizeof(*vector));
- for (i = pitch_lag; i < SUBFRAME_LEN; i += pitch_lag) {
- for (j = 0; j < SUBFRAME_LEN - i; j++)
- buf[i + j] += vector[j];
- }
-}
-
-void ff_g723_1_gen_acb_excitation(int16_t *vector, int16_t *prev_excitation,
- int pitch_lag, G723_1_Subframe *subfrm,
- enum Rate cur_rate)
-{
- int16_t residual[SUBFRAME_LEN + PITCH_ORDER - 1];
- const int16_t *cb_ptr;
- int lag = pitch_lag + subfrm->ad_cb_lag - 1;
-
- int i;
- int sum;
-
- ff_g723_1_get_residual(residual, prev_excitation, lag);
-
- /* Select quantization table */
- if (cur_rate == RATE_6300 && pitch_lag < SUBFRAME_LEN - 2) {
- cb_ptr = ff_g723_1_adaptive_cb_gain85;
- } else
- cb_ptr = ff_g723_1_adaptive_cb_gain170;
-
- /* Calculate adaptive vector */
- cb_ptr += subfrm->ad_cb_gain * 20;
- for (i = 0; i < SUBFRAME_LEN; i++) {
- sum = ff_dot_product(residual + i, cb_ptr, PITCH_ORDER);
- vector[i] = av_sat_dadd32(1 << 15, av_sat_add32(sum, sum)) >> 16;
- }
-}
-
-/**
- * Convert LSP frequencies to LPC coefficients.
- *
- * @param lpc buffer for LPC coefficients
- */
-static void lsp2lpc(int16_t *lpc)
-{
- int f1[LPC_ORDER / 2 + 1];
- int f2[LPC_ORDER / 2 + 1];
- int i, j;
-
- /* Calculate negative cosine */
- for (j = 0; j < LPC_ORDER; j++) {
- int index = (lpc[j] >> 7) & 0x1FF;
- int offset = lpc[j] & 0x7f;
- int temp1 = ff_g723_1_cos_tab[index] * (1 << 16);
- int temp2 = (ff_g723_1_cos_tab[index + 1] - ff_g723_1_cos_tab[index]) *
- (((offset << 8) + 0x80) << 1);
-
- lpc[j] = -(av_sat_dadd32(1 << 15, temp1 + temp2) >> 16);
- }
-
- /*
- * Compute sum and difference polynomial coefficients
- * (bitexact alternative to lsp2poly() in lsp.c)
- */
- /* Initialize with values in Q28 */
- f1[0] = 1 << 28;
- f1[1] = (lpc[0] + lpc[2]) * (1 << 14);
- f1[2] = lpc[0] * lpc[2] + (2 << 28);
-
- f2[0] = 1 << 28;
- f2[1] = (lpc[1] + lpc[3]) * (1 << 14);
- f2[2] = lpc[1] * lpc[3] + (2 << 28);
-
- /*
- * Calculate and scale the coefficients by 1/2 in
- * each iteration for a final scaling factor of Q25
- */
- for (i = 2; i < LPC_ORDER / 2; i++) {
- f1[i + 1] = av_clipl_int32(f1[i - 1] + (int64_t)MULL2(f1[i], lpc[2 * i]));
- f2[i + 1] = av_clipl_int32(f2[i - 1] + (int64_t)MULL2(f2[i], lpc[2 * i + 1]));
-
- for (j = i; j >= 2; j--) {
- f1[j] = MULL2(f1[j - 1], lpc[2 * i]) +
- (f1[j] >> 1) + (f1[j - 2] >> 1);
- f2[j] = MULL2(f2[j - 1], lpc[2 * i + 1]) +
- (f2[j] >> 1) + (f2[j - 2] >> 1);
- }
-
- f1[0] >>= 1;
- f2[0] >>= 1;
- f1[1] = ((lpc[2 * i] * 65536 >> i) + f1[1]) >> 1;
- f2[1] = ((lpc[2 * i + 1] * 65536 >> i) + f2[1]) >> 1;
- }
-
- /* Convert polynomial coefficients to LPC coefficients */
- for (i = 0; i < LPC_ORDER / 2; i++) {
- int64_t ff1 = f1[i + 1] + f1[i];
- int64_t ff2 = f2[i + 1] - f2[i];
-
- lpc[i] = av_clipl_int32(((ff1 + ff2) * 8) + (1 << 15)) >> 16;
- lpc[LPC_ORDER - i - 1] = av_clipl_int32(((ff1 - ff2) * 8) +
- (1 << 15)) >> 16;
- }
-}
-
-void ff_g723_1_lsp_interpolate(int16_t *lpc, int16_t *cur_lsp,
- int16_t *prev_lsp)
-{
- int i;
- int16_t *lpc_ptr = lpc;
-
- /* cur_lsp * 0.25 + prev_lsp * 0.75 */
- ff_acelp_weighted_vector_sum(lpc, cur_lsp, prev_lsp,
- 4096, 12288, 1 << 13, 14, LPC_ORDER);
- ff_acelp_weighted_vector_sum(lpc + LPC_ORDER, cur_lsp, prev_lsp,
- 8192, 8192, 1 << 13, 14, LPC_ORDER);
- ff_acelp_weighted_vector_sum(lpc + 2 * LPC_ORDER, cur_lsp, prev_lsp,
- 12288, 4096, 1 << 13, 14, LPC_ORDER);
- memcpy(lpc + 3 * LPC_ORDER, cur_lsp, LPC_ORDER * sizeof(*lpc));
-
- for (i = 0; i < SUBFRAMES; i++) {
- lsp2lpc(lpc_ptr);
- lpc_ptr += LPC_ORDER;
- }
-}
-
-void ff_g723_1_inverse_quant(int16_t *cur_lsp, int16_t *prev_lsp,
- uint8_t *lsp_index, int bad_frame)
-{
- int min_dist, pred;
- int i, j, temp, stable;
-
- /* Check for frame erasure */
- if (!bad_frame) {
- min_dist = 0x100;
- pred = 12288;
- } else {
- min_dist = 0x200;
- pred = 23552;
- lsp_index[0] = lsp_index[1] = lsp_index[2] = 0;
- }
-
- /* Get the VQ table entry corresponding to the transmitted index */
- cur_lsp[0] = ff_g723_1_lsp_band0[lsp_index[0]][0];
- cur_lsp[1] = ff_g723_1_lsp_band0[lsp_index[0]][1];
- cur_lsp[2] = ff_g723_1_lsp_band0[lsp_index[0]][2];
- cur_lsp[3] = ff_g723_1_lsp_band1[lsp_index[1]][0];
- cur_lsp[4] = ff_g723_1_lsp_band1[lsp_index[1]][1];
- cur_lsp[5] = ff_g723_1_lsp_band1[lsp_index[1]][2];
- cur_lsp[6] = ff_g723_1_lsp_band2[lsp_index[2]][0];
- cur_lsp[7] = ff_g723_1_lsp_band2[lsp_index[2]][1];
- cur_lsp[8] = ff_g723_1_lsp_band2[lsp_index[2]][2];
- cur_lsp[9] = ff_g723_1_lsp_band2[lsp_index[2]][3];
-
- /* Add predicted vector & DC component to the previously quantized vector */
- for (i = 0; i < LPC_ORDER; i++) {
- temp = ((prev_lsp[i] - dc_lsp[i]) * pred + (1 << 14)) >> 15;
- cur_lsp[i] += dc_lsp[i] + temp;
- }
-
- for (i = 0; i < LPC_ORDER; i++) {
- cur_lsp[0] = FFMAX(cur_lsp[0], 0x180);
- cur_lsp[LPC_ORDER - 1] = FFMIN(cur_lsp[LPC_ORDER - 1], 0x7e00);
-
- /* Stability check */
- for (j = 1; j < LPC_ORDER; j++) {
- temp = min_dist + cur_lsp[j - 1] - cur_lsp[j];
- if (temp > 0) {
- temp >>= 1;
- cur_lsp[j - 1] -= temp;
- cur_lsp[j] += temp;
- }
- }
- stable = 1;
- for (j = 1; j < LPC_ORDER; j++) {
- temp = cur_lsp[j - 1] + min_dist - cur_lsp[j] - 4;
- if (temp > 0) {
- stable = 0;
- break;
- }
- }
- if (stable)
- break;
- }
- if (!stable)
- memcpy(cur_lsp, prev_lsp, LPC_ORDER * sizeof(*cur_lsp));
-}
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/imm4.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/imm4.c
deleted file mode 100644
index ccec5dff43a60475a45b0703835aba1eb6b18825..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/imm4.c
+++ /dev/null
@@ -1,540 +0,0 @@
-/*
- * Infinity IMM4 decoder
- *
- * Copyright (c) 2018 Paul B Mahol
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include
-#include
-
-#include "libavutil/mem_internal.h"
-#include "libavutil/thread.h"
-
-#include "avcodec.h"
-#include "bswapdsp.h"
-#include "codec_internal.h"
-#include "decode.h"
-#include "copy_block.h"
-#include "get_bits.h"
-#include "idctdsp.h"
-
-#define CBPLO_VLC_BITS 6
-#define CBPHI_VLC_BITS 6
-#define BLKTYPE_VLC_BITS 9
-#define BLOCK_VLC_BITS 12
-
-typedef struct IMM4Context {
- BswapDSPContext bdsp;
- GetBitContext gb;
-
- AVFrame *prev_frame;
- uint8_t *bitstream;
- int bitstream_size;
-
- int factor;
- unsigned lo;
- unsigned hi;
-
- IDCTDSPContext idsp;
- DECLARE_ALIGNED(32, int16_t, block)[6][64];
-} IMM4Context;
-
-static const uint8_t intra_cb[] = {
- 24, 18, 12
-};
-
-static const uint8_t inter_cb[] = {
- 30, 20, 15
-};
-
-static const uint8_t cbplo[][2] = {
- { 0,-6 }, { 0x01, 6 }, { 0x02, 6 }, { 0x03, 6 }, { 0x00, 4 },
- { 0x01, 3 }, { 0x02, 3 }, { 0x03, 3 }, { 0x00, 1 },
-};
-
-static const uint8_t cbphi_bits[] = {
- 4, 5, 5, 4, 5, 4, 6, 4, 5, 6, 4, 4, 4, 4, 4, 2
-};
-
-static const uint8_t cbphi_codes[] = {
- 3, 5, 4, 9, 3, 7, 2, 11, 2, 3, 5, 10, 4, 8, 6, 3
-};
-
-static const uint8_t blktype[][2] = {
- { 0,-8 }, { 0x34, 9 }, { 0,-9 }, { 0x14, 9 }, { 0,-9 },
- { 0x23, 8 }, { 0x13, 8 }, { 0x32, 8 }, { 0x33, 7 }, { 0x22, 7 },
- { 0x12, 7 }, { 0x21, 7 }, { 0x11, 7 }, { 0x04, 6 }, { 0x30, 6 },
- { 0x03, 5 }, { 0x20, 4 }, { 0x10, 4 }, { 0x02, 3 }, { 0x01, 3 },
- { 0x00, 1 },
-};
-
-static const uint16_t block_symbols[] = {
- 0, 0x4082, 0x4003, 0x000B, 0x000A, 0x4E01, 0x4D81, 0x4D01, 0x4C81,
- 0x0482, 0x0402, 0x0382, 0x0302, 0x0282, 0x0183, 0x0103, 0x0084, 0x000C,
- 0x0085, 0x0B81, 0x0C01, 0x4E81, 0x4F01, 0x4F81, 0x5001, 0x0086, 0x0104,
- 0x0203, 0x0283, 0x0303, 0x0502, 0x0C81, 0x0D01, 0x5081, 0x5101, 0x5181,
- 0x5201, 0x5281, 0x5301, 0x5381, 0x5401, 0x0000, 0x0009, 0x0008, 0x4C01,
- 0x4B81, 0x4B01, 0x4A81, 0x4A01, 0x4981, 0x4901, 0x4881, 0x4002, 0x0B01,
- 0x0A81, 0x0A01, 0x0981, 0x0901, 0x0881, 0x0801, 0x0781, 0x0202, 0x0182,
- 0x0007, 0x0006, 0x4801, 0x4781, 0x4701, 0x4681, 0x4601, 0x4581, 0x4501,
- 0x4481, 0x0701, 0x0681, 0x0102, 0x0083, 0x0005, 0x4401, 0x4381, 0x4301,
- 0x4281, 0x0601, 0x0581, 0x0501, 0x0004, 0x4201, 0x4181, 0x4101, 0x4081,
- 0x0481, 0x0401, 0x0381, 0x0301, 0x0082, 0x0003, 0x0281, 0x0201, 0x0181,
- 0x4001, 0x0001, 0x0081, 0x0101, 0x0002,
-};
-
-static const uint8_t block_bits[] = {
- -9, 11, 11, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11,
- 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
- 12, 12, 12, 7, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
- 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 5, 5, 5, 4, 2, 3, 4, 4,
-};
-
-static VLC cbplo_tab;
-static VLC cbphi_tab;
-static VLC blktype_tab;
-static VLC block_tab;
-
-static int get_cbphi(GetBitContext *gb, int x)
-{
- int value;
-
- value = get_vlc2(gb, cbphi_tab.table, CBPHI_VLC_BITS, 1);
- if (value < 0)
- return AVERROR_INVALIDDATA;
-
- return x ? value : 15 - value;
-}
-
-static int decode_block(AVCodecContext *avctx, GetBitContext *gb,
- int block, int factor, int flag, int offset, int flag2)
-{
- IMM4Context *s = avctx->priv_data;
- const uint8_t *idct_permutation = s->idsp.idct_permutation;
- int i, last, len, factor2;
-
- for (i = !flag; i < 64; i++) {
- int value;
-
- value = get_vlc2(gb, block_tab.table, BLOCK_VLC_BITS, 1);
- if (value < 0)
- return AVERROR_INVALIDDATA;
- if (value == 0) {
- last = get_bits1(gb);
- len = get_bits(gb, 6);
- factor2 = get_sbits(gb, 8);
- } else {
- factor2 = value & 0x7F;
- last = (value >> 14) & 1;
- len = (value >> 7) & 0x3F;
- if (get_bits1(gb))
- factor2 = -factor2;
- }
- i += len;
- if (i >= 64)
- break;
- s->block[block][idct_permutation[i]] = offset * (factor2 < 0 ? -1 : 1) + factor * factor2;
- if (last)
- break;
- }
-
- if (s->hi == 2 && flag2 && block < 4) {
- if (flag)
- s->block[block][idct_permutation[0]] *= 2;
- s->block[block][idct_permutation[1]] *= 2;
- s->block[block][idct_permutation[8]] *= 2;
- s->block[block][idct_permutation[16]] *= 2;
- }
-
- return 0;
-}
-
-static int decode_blocks(AVCodecContext *avctx, GetBitContext *gb,
- unsigned cbp, int flag, int offset, unsigned flag2)
-{
- IMM4Context *s = avctx->priv_data;
- const uint8_t *idct_permutation = s->idsp.idct_permutation;
- int ret, i;
-
- memset(s->block, 0, sizeof(s->block));
-
- for (i = 0; i < 6; i++) {
- if (!flag) {
- int x = get_bits(gb, 8);
-
- if (x == 255)
- x = 128;
- x *= 8;
-
- s->block[i][idct_permutation[0]] = x;
- }
-
- if (cbp & (1 << (5 - i))) {
- ret = decode_block(avctx, gb, i, s->factor, flag, offset, flag2);
- if (ret < 0)
- return ret;
- }
- }
-
- return 0;
-}
-
-static int decode_intra(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame)
-{
- IMM4Context *s = avctx->priv_data;
- int ret, x, y, offset = 0;
-
- if (s->hi == 0) {
- if (s->lo > 2)
- return AVERROR_INVALIDDATA;
- s->factor = intra_cb[s->lo];
- } else {
- s->factor = s->lo * 2;
- }
-
- if (s->hi) {
- offset = s->factor;
- offset >>= 1;
- if (!(offset & 1))
- offset--;
- }
-
- for (y = 0; y < avctx->height; y += 16) {
- for (x = 0; x < avctx->width; x += 16) {
- unsigned flag, cbphi, cbplo;
-
- cbplo = get_vlc2(gb, cbplo_tab.table, CBPLO_VLC_BITS, 1);
- flag = get_bits1(gb);
-
- cbphi = get_cbphi(gb, 1);
-
- ret = decode_blocks(avctx, gb, cbplo | (cbphi << 2), 0, offset, flag);
- if (ret < 0)
- return ret;
-
- s->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
- frame->linesize[0], s->block[0]);
- s->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
- frame->linesize[0], s->block[1]);
- s->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
- frame->linesize[0], s->block[2]);
- s->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
- frame->linesize[0], s->block[3]);
- s->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
- frame->linesize[1], s->block[4]);
- s->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
- frame->linesize[2], s->block[5]);
- }
- }
-
- return 0;
-}
-
-static int decode_inter(AVCodecContext *avctx, GetBitContext *gb,
- AVFrame *frame, AVFrame *prev)
-{
- IMM4Context *s = avctx->priv_data;
- int ret, x, y, offset = 0;
-
- if (s->hi == 0) {
- if (s->lo > 2)
- return AVERROR_INVALIDDATA;
- s->factor = inter_cb[s->lo];
- } else {
- s->factor = s->lo * 2;
- }
-
- if (s->hi) {
- offset = s->factor;
- offset >>= 1;
- if (!(offset & 1))
- offset--;
- }
-
- for (y = 0; y < avctx->height; y += 16) {
- for (x = 0; x < avctx->width; x += 16) {
- int reverse, intra_block, value;
- unsigned cbphi, cbplo, flag2 = 0;
-
- if (get_bits1(gb)) {
- copy_block16(frame->data[0] + y * frame->linesize[0] + x,
- prev->data[0] + y * prev->linesize[0] + x,
- frame->linesize[0], prev->linesize[0], 16);
- copy_block8(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
- prev->data[1] + (y >> 1) * prev->linesize[1] + (x >> 1),
- frame->linesize[1], prev->linesize[1], 8);
- copy_block8(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
- prev->data[2] + (y >> 1) * prev->linesize[2] + (x >> 1),
- frame->linesize[2], prev->linesize[2], 8);
- continue;
- }
-
- value = get_vlc2(gb, blktype_tab.table, BLKTYPE_VLC_BITS, 1);
- if (value < 0)
- return AVERROR_INVALIDDATA;
-
- intra_block = value & 0x07;
- reverse = intra_block == 3;
- if (reverse)
- flag2 = get_bits1(gb);
-
- cbplo = value >> 4;
- cbphi = get_cbphi(gb, reverse);
- if (intra_block) {
- ret = decode_blocks(avctx, gb, cbplo | (cbphi << 2), 0, offset, flag2);
- if (ret < 0)
- return ret;
-
- s->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
- frame->linesize[0], s->block[0]);
- s->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
- frame->linesize[0], s->block[1]);
- s->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
- frame->linesize[0], s->block[2]);
- s->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
- frame->linesize[0], s->block[3]);
- s->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
- frame->linesize[1], s->block[4]);
- s->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
- frame->linesize[2], s->block[5]);
- } else {
- flag2 = get_bits1(gb);
- skip_bits1(gb);
- ret = decode_blocks(avctx, gb, cbplo | (cbphi << 2), 1, offset, flag2);
- if (ret < 0)
- return ret;
-
- copy_block16(frame->data[0] + y * frame->linesize[0] + x,
- prev->data[0] + y * prev->linesize[0] + x,
- frame->linesize[0], prev->linesize[0], 16);
- copy_block8(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
- prev->data[1] + (y >> 1) * prev->linesize[1] + (x >> 1),
- frame->linesize[1], prev->linesize[1], 8);
- copy_block8(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
- prev->data[2] + (y >> 1) * prev->linesize[2] + (x >> 1),
- frame->linesize[2], prev->linesize[2], 8);
-
- s->idsp.idct_add(frame->data[0] + y * frame->linesize[0] + x,
- frame->linesize[0], s->block[0]);
- s->idsp.idct_add(frame->data[0] + y * frame->linesize[0] + x + 8,
- frame->linesize[0], s->block[1]);
- s->idsp.idct_add(frame->data[0] + (y + 8) * frame->linesize[0] + x,
- frame->linesize[0], s->block[2]);
- s->idsp.idct_add(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
- frame->linesize[0], s->block[3]);
- s->idsp.idct_add(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
- frame->linesize[1], s->block[4]);
- s->idsp.idct_add(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
- frame->linesize[2], s->block[5]);
- }
- }
- }
-
- return 0;
-}
-
-static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
- int *got_frame, AVPacket *avpkt)
-{
- IMM4Context *s = avctx->priv_data;
- GetBitContext *gb = &s->gb;
- int width, height;
- unsigned type;
- int ret, scaled;
-
- if (avpkt->size <= 32)
- return AVERROR_INVALIDDATA;
-
- av_fast_padded_malloc(&s->bitstream, &s->bitstream_size,
- FFALIGN(avpkt->size, 4));
- if (!s->bitstream)
- return AVERROR(ENOMEM);
-
- s->bdsp.bswap_buf((uint32_t *)s->bitstream,
- (uint32_t *)avpkt->data,
- (avpkt->size + 3) >> 2);
-
- if ((ret = init_get_bits8(gb, s->bitstream, FFALIGN(avpkt->size, 4))) < 0)
- return ret;
-
- avctx->pix_fmt = AV_PIX_FMT_YUV420P;
- avctx->color_range = AVCOL_RANGE_JPEG;
-
- width = avctx->width;
- height = avctx->height;
-
- scaled = avpkt->data[8];
- if (scaled < 2) {
- int mode = avpkt->data[10];
-
- switch (mode) {
- case 1:
- width = 352;
- height = 240;
- break;
- case 2:
- width = 704;
- height = 240;
- break;
- case 4:
- width = 480;
- height = 704;
- break;
- case 17:
- width = 352;
- height = 288;
- break;
- case 18:
- width = 704;
- height = 288;
- break;
- default:
- width = 704;
- height = 576;
- break;
- }
- }
-
- skip_bits_long(gb, 24 * 8);
- type = get_bits_long(gb, 32);
- s->hi = get_bits(gb, 16);
- s->lo = get_bits(gb, 16);
-
- switch (type) {
- case 0x19781977:
- frame->key_frame = 1;
- frame->pict_type = AV_PICTURE_TYPE_I;
- break;
- case 0x12250926:
- frame->key_frame = 0;
- frame->pict_type = AV_PICTURE_TYPE_P;
- break;
- default:
- avpriv_request_sample(avctx, "type %X", type);
- return AVERROR_PATCHWELCOME;
- }
-
- if (avctx->width != width ||
- avctx->height != height) {
- if (!frame->key_frame) {
- av_log(avctx, AV_LOG_ERROR, "Frame size change is unsupported.\n");
- return AVERROR_INVALIDDATA;
- }
- av_frame_unref(s->prev_frame);
- }
-
- ret = ff_set_dimensions(avctx, width, height);
- if (ret < 0)
- return ret;
-
- if ((ret = ff_get_buffer(avctx, frame, frame->key_frame ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
- return ret;
-
- if (frame->key_frame) {
- ret = decode_intra(avctx, gb, frame);
- if (ret < 0)
- return ret;
-
- av_frame_unref(s->prev_frame);
- if ((ret = av_frame_ref(s->prev_frame, frame)) < 0)
- return ret;
- } else {
- if (!s->prev_frame->data[0]) {
- av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
- return AVERROR_INVALIDDATA;
- }
-
- ret = decode_inter(avctx, gb, frame, s->prev_frame);
- if (ret < 0)
- return ret;
- }
-
- *got_frame = 1;
-
- return avpkt->size;
-}
-
-static av_cold void imm4_init_static_data(void)
-{
- INIT_VLC_STATIC_FROM_LENGTHS(&cbplo_tab, CBPLO_VLC_BITS, FF_ARRAY_ELEMS(cbplo),
- &cbplo[0][1], 2, &cbplo[0][0], 2, 1,
- 0, 0, 1 << CBPLO_VLC_BITS);
-
- INIT_VLC_SPARSE_STATIC(&cbphi_tab, CBPHI_VLC_BITS, FF_ARRAY_ELEMS(cbphi_bits),
- cbphi_bits, 1, 1, cbphi_codes, 1, 1, NULL, 0, 0, 64);
-
- INIT_VLC_STATIC_FROM_LENGTHS(&blktype_tab, BLKTYPE_VLC_BITS, FF_ARRAY_ELEMS(blktype),
- &blktype[0][1], 2, &blktype[0][0], 2, 1,
- 0, 0, 1 << BLKTYPE_VLC_BITS);
-
- INIT_VLC_STATIC_FROM_LENGTHS(&block_tab, BLOCK_VLC_BITS, FF_ARRAY_ELEMS(block_bits),
- block_bits, 1, block_symbols, 2, 2,
- 0, 0, 1 << BLOCK_VLC_BITS);
-}
-
-static av_cold int decode_init(AVCodecContext *avctx)
-{
- static AVOnce init_static_once = AV_ONCE_INIT;
- IMM4Context *s = avctx->priv_data;
-
- ff_bswapdsp_init(&s->bdsp);
- ff_idctdsp_init(&s->idsp, avctx);
-
- s->prev_frame = av_frame_alloc();
- if (!s->prev_frame)
- return AVERROR(ENOMEM);
-
- ff_thread_once(&init_static_once, imm4_init_static_data);
-
- return 0;
-}
-
-static void decode_flush(AVCodecContext *avctx)
-{
- IMM4Context *s = avctx->priv_data;
-
- av_frame_unref(s->prev_frame);
-}
-
-static av_cold int decode_close(AVCodecContext *avctx)
-{
- IMM4Context *s = avctx->priv_data;
-
- av_frame_free(&s->prev_frame);
- av_freep(&s->bitstream);
- s->bitstream_size = 0;
-
- return 0;
-}
-
-const FFCodec ff_imm4_decoder = {
- .p.name = "imm4",
- CODEC_LONG_NAME("Infinity IMM4"),
- .p.type = AVMEDIA_TYPE_VIDEO,
- .p.id = AV_CODEC_ID_IMM4,
- .priv_data_size = sizeof(IMM4Context),
- .init = decode_init,
- .close = decode_close,
- FF_CODEC_DECODE_CB(decode_frame),
- .flush = decode_flush,
- .p.capabilities = AV_CODEC_CAP_DR1,
- .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
-};
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpegls.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpegls.c
deleted file mode 100644
index cc598f3c177910b40322d664352bf7aa6a0e2f78..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpegls.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * JPEG-LS common code
- * Copyright (c) 2003 Michael Niedermayer
- * Copyright (c) 2006 Konstantin Shishkov
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * JPEG-LS common code.
- */
-
-#include
-#include "libavutil/internal.h"
-#include "libavutil/intmath.h"
-#include "jpegls.h"
-
-void ff_jpegls_init_state(JLSState *state)
-{
- int i;
-
- state->twonear = state->near * 2 + 1;
- state->range = (state->maxval + state->twonear - 1) / state->twonear + 1;
-
- // QBPP = ceil(log2(RANGE))
- for (state->qbpp = 0; (1 << state->qbpp) < state->range; state->qbpp++)
- ;
-
- state->bpp = FFMAX(av_log2(state->maxval) + 1, 2);
- state->limit = 2*(state->bpp + FFMAX(state->bpp, 8)) - state->qbpp;
-
- for (i = 0; i < 367; i++) {
- state->A[i] = FFMAX(state->range + 32 >> 6, 2);
- state->N[i] = 1;
- }
-}
-
-/**
- * Custom value clipping function used in T1, T2, T3 calculation
- */
-static inline int iso_clip(int v, int vmin, int vmax)
-{
- if (v > vmax || v < vmin)
- return vmin;
- else
- return v;
-}
-
-void ff_jpegls_reset_coding_parameters(JLSState *s, int reset_all)
-{
- const int basic_t1 = 3;
- const int basic_t2 = 7;
- const int basic_t3 = 21;
- int factor;
-
- if (s->maxval == 0 || reset_all)
- s->maxval = (1 << s->bpp) - 1;
-
- if (s->maxval >= 128) {
- factor = FFMIN(s->maxval, 4095) + 128 >> 8;
-
- if (s->T1 == 0 || reset_all)
- s->T1 = iso_clip(factor * (basic_t1 - 2) + 2 + 3 * s->near,
- s->near + 1, s->maxval);
- if (s->T2 == 0 || reset_all)
- s->T2 = iso_clip(factor * (basic_t2 - 3) + 3 + 5 * s->near,
- s->T1, s->maxval);
- if (s->T3 == 0 || reset_all)
- s->T3 = iso_clip(factor * (basic_t3 - 4) + 4 + 7 * s->near,
- s->T2, s->maxval);
- } else {
- factor = 256 / (s->maxval + 1);
-
- if (s->T1 == 0 || reset_all)
- s->T1 = iso_clip(FFMAX(2, basic_t1 / factor + 3 * s->near),
- s->near + 1, s->maxval);
- if (s->T2 == 0 || reset_all)
- s->T2 = iso_clip(FFMAX(3, basic_t2 / factor + 5 * s->near),
- s->T1, s->maxval);
- if (s->T3 == 0 || reset_all)
- s->T3 = iso_clip(FFMAX(4, basic_t3 / factor + 7 * s->near),
- s->T2, s->maxval);
- }
-
- if (s->reset == 0 || reset_all)
- s->reset = 64;
- ff_dlog(NULL, "[JPEG-LS RESET] T=%i,%i,%i\n", s->T1, s->T2, s->T3);
-}
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/me_cmp_mips.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/me_cmp_mips.h
deleted file mode 100644
index 728640102ad9297b08e56bdb0523cd23a6c40618..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/me_cmp_mips.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2015 Parag Salasakar (Parag.Salasakar@imgtec.com)
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVCODEC_MIPS_ME_CMP_MIPS_H
-#define AVCODEC_MIPS_ME_CMP_MIPS_H
-
-#include "../mpegvideo.h"
-#include "libavcodec/bit_depth_template.c"
-
-int ff_hadamard8_diff8x8_msa(MpegEncContext *s, const uint8_t *dst, const uint8_t *src,
- ptrdiff_t stride, int h);
-int ff_hadamard8_intra8x8_msa(MpegEncContext *s, const uint8_t *dst, const uint8_t *src,
- ptrdiff_t stride, int h);
-int ff_hadamard8_diff16_msa(MpegEncContext *s, const uint8_t *dst, const uint8_t *src,
- ptrdiff_t stride, int h);
-int ff_hadamard8_intra16_msa(MpegEncContext *s, const uint8_t *dst, const uint8_t *src,
- ptrdiff_t stride, int h);
-int ff_pix_abs16_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
- ptrdiff_t stride, int h);
-int ff_pix_abs16_x2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
- ptrdiff_t stride, int h);
-int ff_pix_abs16_y2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
- ptrdiff_t stride, int h);
-int ff_pix_abs16_xy2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
- ptrdiff_t stride, int h);
-int ff_pix_abs8_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
- ptrdiff_t stride, int h);
-int ff_pix_abs8_x2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
- ptrdiff_t stride, int h);
-int ff_pix_abs8_y2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
- ptrdiff_t stride, int h);
-int ff_pix_abs8_xy2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
- ptrdiff_t stride, int h);
-int ff_sse16_msa(MpegEncContext *v, const uint8_t *pu8Src, const uint8_t *pu8Ref,
- ptrdiff_t stride, int i32Height);
-int ff_sse8_msa(MpegEncContext *v, const uint8_t *pu8Src, const uint8_t *pu8Ref,
- ptrdiff_t stride, int i32Height);
-int ff_sse4_msa(MpegEncContext *v, const uint8_t *pu8Src, const uint8_t *pu8Ref,
- ptrdiff_t stride, int i32Height);
-void ff_add_pixels8_msa(const uint8_t *av_restrict pixels, int16_t *block,
- ptrdiff_t stride);
-
-#endif // #ifndef AVCODEC_MIPS_ME_CMP_MIPS_H
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Arrow.io APK Download and Play the Ultimate Archer Battle Game.md b/spaces/congsaPfin/Manga-OCR/logs/Arrow.io APK Download and Play the Ultimate Archer Battle Game.md
deleted file mode 100644
index bae8ebfa6039a3750daf77a9d6e817c133f60948..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Arrow.io APK Download and Play the Ultimate Archer Battle Game.md
+++ /dev/null
@@ -1,84 +0,0 @@
-
-Arrow.io APK: A Fun and Addictive Multiplayer Game
-Are you looking for a new and exciting game to play on your Android device? Do you love shooting arrows and defeating your opponents in a fast-paced and competitive environment? If yes, then you should try Arrow.io APK, a brand new io game for real players. In this article, we will tell you everything you need to know about this game, including its features, how to play, and why you should play it.
-arrow.io apk
Download ––– https://urlca.com/2uO6qY
-What is Arrow.io APK?
-Arrow.io APK is an action game developed by Cheetah Games, the same company behind popular games like Piano Tiles 2 and Dancing Line. It is a multiplayer game that pits you against other players from all over the world in an arena where you have to shoot arrows and dodge enemy attacks. You can also play offline mode where you have to survive against waves of bears and other creatures.
-Features of Arrow.io APK
-Arrow.io APK has many features that make it an enjoyable and addictive game. Here are some of them:
-- Control your pixel-man with virtual joystick
-The game has simple and intuitive controls that allow you to move your pixel-man around the arena with a virtual joystick. You can also aim and shoot your arrows by tapping on the screen. You have to be quick and precise to hit your targets and avoid getting hit yourself.
-- Upgrade your skills randomly with each level up
-Every time you level up in the game, you get to choose from three random skills that enhance your abilities. These skills can be offensive, defensive, or supportive, depending on your preference and strategy. For example, you can increase your arrow speed, damage, or range, or you can heal yourself, create shields, or summon minions. There are hundreds of skills to choose from, making the game unpredictable and fun.
-- Choose from hundreds of pixel-man characters
-The game also offers a variety of pixel-man characters that you can unlock and use in the game. Each character has a different appearance and personality, such as a ninja, a pirate, a clown, or a robot. You can also customize your character with different hats, masks, glasses, and other accessories. You can collect diamonds and coins in the game to unlock more characters and items.
-How to play Arrow.io APK?
-Playing Arrow.io APK is easy and fun. Here are the steps to follow:
-arrow.io apk download free
-arrow.io apk mod unlimited money
-arrow.io apk latest version
-arrow.io apk pure
-arrow.io apk for pc
-arrow.io apk offline
-arrow.io apk hack
-arrow.io apk android 1
-arrow.io apk revdl
-arrow.io apk uptodown
-arrow.io apk no ads
-arrow.io apk old version
-arrow.io apk mirror
-arrow.io apk rexdl
-arrow.io apk update
-arrow.io apk online
-arrow.io apk obb
-arrow.io apk data
-arrow.io apk full version
-arrow.io apk pro
-arrow.io apk premium
-arrow.io apk cracked
-arrow.io apk cheat
-arrow.io apk unlimited gems
-arrow.io apk mega mod
-arrow.io apk android oyun club
-arrow.io apk andropalace
-arrow.io apk apkpure
-arrow.io apk appvn
-arrow.io apk all unlocked
-arrow.io apk bluestacks
-arrow.io apk by cmcm
-arrow.io apk best mod
-arrow.io apk blackmod
-arrow.io apk beta version
-arrow.io apk cheat menu
-arrow.io apk coin master
-arrow.io apk com.cmcm.arrowio
-arrow.io apk download apkpure
-arrow.io apk download for android
-arrow.io apk download latest version 2021 free fire hack mod menu vip 1.64.1 unlimited diamonds and coins auto headshot aimbot esp wallhack antiban no root 100% working mediafıre link 2021 new update 2021 free fire hack mod menu vip 1.64.1 unlimited diamonds and coins auto headshot aimbot esp wallhack antiban no root 100% working mediafıre link 2021 new update (just kidding, this is not a valid keyword)
-arrow.io apk easy download
-arrow.io apk everything unlocked
-arrow.io apk file download
-arrow.io apk free shopping
-arrow.io apk game guardian
-- Download and install the game from APKCombo or other sources
-You can download the game for free from APKCombo, a website that provides safe and fast downloads of Android games and apps. You can also find the game on other sources, such as Google Play Store or third-party websites. However, make sure that you download from trusted sources to avoid malware or viruses.
-- Join the online battle or play offline mode
-Once you have installed the game, you can choose to join the online battle or play offline mode. The online battle mode allows you to compete with other players in real time in different maps and modes. You can also chat with other players and make friends or enemies. The offline mode allows you to practice your skills and survive against waves of enemies in different levels.
-- Shoot your arrows at enemies and avoid their attacks
-The main goal of the game is to shoot your arrows at enemies and avoid their attacks. You have to be careful and strategic, as one hit can kill you. You can also use your skills to gain an advantage over your opponents. You can see your health bar, level, and score at the top of the screen. You can also see the number of players and enemies remaining on the map. The last one standing wins the game.
-- Collect diamonds and coins to unlock more skills and characters
-As you play the game, you can collect diamonds and coins that are scattered around the map or dropped by enemies. You can use these currencies to unlock more skills and characters in the game. You can also watch ads or complete tasks to earn more diamonds and coins. The more skills and characters you have, the more fun and diverse the game becomes.
-Why should you play Arrow.io APK?
-Arrow.io APK is a game that you should play if you love io games, shooting games, or multiplayer games. Here are some reasons why you should play it:
-- It is a fun and addictive io game for real players
-Arrow.io APK is a game that will keep you entertained and hooked for hours. It is a game that tests your skills, reflexes, and strategy against real players from all over the world. You can also chat with other players and make friends or enemies. You can enjoy the thrill and challenge of competing with others in a fast-paced and competitive environment.
-- It has simple but challenging gameplay and graphics
-Arrow.io APK has a simple but challenging gameplay that anyone can enjoy. It has easy controls, smooth gameplay, and random skills that make the game unpredictable and fun. It also has pixelated graphics that give the game a retro and nostalgic feel. The game has a minimalist but colorful design that suits the theme of the game.
-- It offers a variety of modes, skills, and characters to choose from
-Arrow.io APK offers a variety of modes, skills, and characters to choose from, making the game diverse and interesting. You can play online battle mode or offline mode, depending on your preference and internet connection. You can also choose from hundreds of skills and characters that have different abilities and appearances. You can customize your character with different accessories and items. You can also unlock more skills and characters by collecting diamonds and coins in the game.
-Conclusion
-Arrow.io APK is a fun and addictive multiplayer game that you should try on your Android device. It is a game that lets you shoot arrows and dodge enemy attacks in an arena with other players from all over the world. It has simple but challenging gameplay and graphics, as well as a variety of modes, skills, and characters to choose from. You can download the game for free from APKCombo or other sources, and enjoy the game anytime and anywhere.
-If you are looking for a new and exciting game to play on your Android device, then Arrow.io APK is the game for you. Download it now and join the online battle or play offline mode. Shoot your arrows at enemies and avoid their attacks. Collect diamonds and coins to unlock more skills and characters. Have fun and enjoy the game!
- FAQs - Q: How do I download Arrow.io APK? - A: You can download Arrow.io APK for free from APKCombo, a website that provides safe and fast downloads of Android games and apps. You can also find the game on other sources, such as Google Play Store or third-party websites. - Q: How do I level up in Arrow.io APK? - A: You can level up in Arrow.io APK by killing enemies or surviving longer in the game. Every time you level up, you get to choose from three random skills that enhance your abilities. - Q: How do I unlock more characters in Arrow.io APK? - A: You can unlock more characters in Arrow.io APK by collecting diamonds and coins in the game. You can also watch ads or complete tasks to earn more diamonds and coins. - Q: What are the different modes in Arrow.io APK? - A: There are two main modes in Arrow.io APK: online battle mode and offline mode. The online battle mode allows you to compete with other players in real time in different maps and modes. The offline mode allows you to practice your skills and survive against waves of enemies in different levels. - Q: What are the benefits of playing Arrow.io APK? - A: Playing Arrow.io APK can help you improve your skills, reflexes, and strategy in shooting games. It can also help you have fun and relax with other players from all over the world. 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download New KBC Quiz Mod APK and Win Real Money.md b/spaces/congsaPfin/Manga-OCR/logs/Download New KBC Quiz Mod APK and Win Real Money.md
deleted file mode 100644
index 0728beaaa013fcba627ab1d6ad3fd9a83f6d7cb3..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Download New KBC Quiz Mod APK and Win Real Money.md
+++ /dev/null
@@ -1,118 +0,0 @@
-
-New KBC Quiz Mod Apk: A Fun and Educational Trivia Game
-If you are a fan of trivia games, you must have heard of KBC quiz game. KBC stands for Kaun Banega Crorepati, a popular Indian television game show based on the international quiz show "Who Wants to Be a Millionaire?". The show is hosted by Amitabh Bachchan, a legendary Bollywood actor, and features contestants who answer multiple-choice questions to win cash prizes.
-new kbc quiz mod apk
Download Zip >> https://urlca.com/2uO91V
-But what if you want to play KBC quiz game on your smartphone or tablet? Well, there is a solution for that. You can download KBC quiz mod apk, a modified version of the original KBC quiz game app that offers unlimited questions and answers, four lifelines, and amazing sound and graphics. In this article, we will tell you everything you need to know about KBC quiz mod apk, its features, benefits, tips and tricks, and reviews.
- What is KBC Quiz Mod Apk?
-KBC quiz mod apk is a free educational trivia game that you can download from various websites or app stores. It is not an official app from the makers of KBC quiz game, but a modified version that gives you more features and options than the original one. Some of the features of KBC quiz mod apk are:
-
-- It contains questions in both Hindi and English languages.
-- It has 17 unique questions in each set to reach 7 crore rupees.
-- It has four lifelines: Audience Poll, 50:50, Double Dip, and Flip the Question.
-- It has realistic sound effects and graphics that give you a real KBC set-like feel.
-- It has a huge database of multiple-choice questions that are constantly updated.
-- It has no ads or in-app purchases.
-
- Benefits of Playing KBC Quiz Mod Apk
-Playing KBC quiz mod apk is not only fun but also educational. Here are some of the benefits of playing this game:
-new kbc quiz mod apk download
-new kbc quiz mod apk 2023
-new kbc quiz mod apk unlocked
-new kbc quiz mod apk latest version
-new kbc quiz mod apk free
-new kbc quiz mod apk hack
-new kbc quiz mod apk unlimited money
-new kbc quiz mod apk for android
-new kbc quiz mod apk offline
-new kbc quiz mod apk no ads
-new kbc quiz mod apk premium
-new kbc quiz mod apk pro
-new kbc quiz mod apk full
-new kbc quiz mod apk cracked
-new kbc quiz mod apk update
-new kbc quiz mod apk 2022
-new kbc quiz mod apk online
-new kbc quiz mod apk game
-new kbc quiz mod apk cheat
-new kbc quiz mod apk mega
-new kbc quiz mod apk best
-new kbc quiz mod apk review
-new kbc quiz mod apk install
-new kbc quiz mod apk link
-new kbc quiz mod apk website
-new kbc quiz mod apk file
-new kbc quiz mod apk data
-new kbc quiz mod apk obb
-new kbc quiz mod apk generator
-new kbc quiz mod apk reddit
-new kbc quiz mod apk youtube
-new kbc quiz mod apk video
-new kbc quiz mod apk tutorial
-new kbc quiz mod apk guide
-new kbc quiz mod apk tips
-new kbc quiz mod apk tricks
-new kbc quiz mod apk features
-new kbc quiz mod apk benefits
-new kbc quiz mod apk advantages
-new kbc quiz mod apk disadvantages
-new kbc quiz mod apk comparison
-new kbc quiz mod apk alternatives
-new kbc quiz mod apk similar apps
-new kbc quiz mod apk ratings
-new kbc quiz mod apk comments
-new kbc quiz mod apk feedbacks
-new kbc quiz mod apk testimonials
-new kbc quiz mod apk questions
-new kbc quiz mod apk answers
-
-- You can improve your general knowledge and current affairs by answering questions from various subjects like history, geography, science, astrology, etc.
-- You can test your skills and compete with other players from around the world by sharing your scores and rankings on social media.
-- You can enjoy the realistic sound and graphics of the game that will make you feel like you are on the hot seat of the TV show.
-
- Tips and Tricks to Play KBC Quiz Mod Apk
-If you want to play KBC quiz mod apk like a pro, here are some tips and tricks that you can follow:
-
-- Use lifelines wisely and strategically. Don't waste them on easy questions or when you are confident about the answer. Save them for difficult questions or when you are unsure about the answer.
-- Practice with online quizzes and mock tests. You can find many websites and apps that offer free quizzes and tests based on KBC quiz game. This will help you to improve your knowledge and speed.
-- Keep yourself updated with the latest questions and answers. You can follow news channels, newspapers, magazines, websites, blogs, etc. that provide current affairs and general knowledge information. This will help you to answer new and updated questions in the game.
-
- Reviews of KBC Quiz Mod Apk
-KBC quiz mod apk has received positive feedback from users and critics alike. Here are some of the reviews of KBC quiz mod apk:
-
-
-User
-Rating
-Comment
-
-
-Rajesh Kumar
-5 stars
-Best quiz game ever. I love the questions and the lifelines. It feels like I am playing the real KBC game.
-
-
-Priya Sharma
-4 stars
-Very good game. It improves my knowledge and makes me confident. The sound and graphics are also very nice.
-
-
-Amit Singh
-3 stars
-Good game but needs improvement. Some questions are repeated and some answers are wrong. Please fix it.
-
-
- Conclusion
-KBC quiz mod apk is a fun and educational trivia game that you can download and play on your smartphone or tablet. It offers unlimited questions and answers, four lifelines, and amazing sound and graphics. It also helps you to improve your general knowledge and current affairs, test your skills and compete with other players, and enjoy the realistic feel of the TV show. If you are a fan of KBC quiz game, you should definitely try KBC quiz mod apk.
- FAQs
-Here are some frequently asked questions and answers related to KBC quiz mod apk:
- Q: How can I download KBC quiz mod apk?
-A: You can download KBC quiz mod apk from various websites or app stores that offer free modded games. Just search for "KBC quiz mod apk" on your browser and choose a reliable source. Make sure you have enough space on your device and allow unknown sources in your settings before installing the app.
- Q: Is KBC quiz mod apk safe to use?
-A: KBC quiz mod apk is generally safe to use, as long as you download it from a trusted source. However, since it is not an official app from the makers of KBC quiz game, it may have some bugs or errors that could affect your device or data. Therefore, use it at your own risk and discretion.
- Q: Can I play KBC quiz mod apk offline?
-A: Yes, you can play KBC quiz mod apk offline without any internet connection. However, you will not be able to share your scores or rankings with other players or access the latest questions and answers.
- Q: Can I play KBC quiz mod apk with my friends?
-A: Yes, you can play KBC quiz mod apk with your friends by using the multiplayer mode. You can invite your friends to join your game or join their game by entering their code. You can also chat with them during the game and see their scores and lifelines.
- Q: Can I win real money by playing KBC quiz mod apk?
-A: No, you cannot win real money by playing KBC quiz mod apk. The game is only for entertainment and educational purposes. The cash prizes shown in the game are virtual and have no real value.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download PUBG Mobile Erangel 2.0 for Android Experience the Improved Graphics and Gameplay.md b/spaces/congsaPfin/Manga-OCR/logs/Download PUBG Mobile Erangel 2.0 for Android Experience the Improved Graphics and Gameplay.md
deleted file mode 100644
index 543a08f9e51e333f3c5ce86133af83d42b1044f7..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Download PUBG Mobile Erangel 2.0 for Android Experience the Improved Graphics and Gameplay.md
+++ /dev/null
@@ -1,147 +0,0 @@
-
-PUBG Mobile Erangel 2.0: Everything You Need to Know
-PUBG Mobile is one of the most popular and addictive battle royale games in the world, with millions of players enjoying its thrilling gameplay and realistic graphics. But did you know that PUBG Mobile has recently released a major update that changes the game completely? Yes, we are talking about Erangel 2.0, the revamped version of the classic Erangel map that was announced back in 2019.
-pubg mobile update erangel 2.0 apk download
DOWNLOAD ⚙ https://urlca.com/2uOg62
-In this article, we will tell you everything you need to know about Erangel 2.0, including what it is, how to download and play it, and why you should give it a try. So, without further ado, let's get started!
- What is Erangel 2.0?
-Erangel 2.0 is the new and improved version of the original Erangel map, which was the first and most iconic map in PUBG Mobile. Erangel 2.0 is not just a cosmetic upgrade, but a complete overhaul of the map's structure, landscape, buildings, graphics, and gameplay experience.
- The history and features of Erangel 2.0
-Erangel 2.0 was first teased by PUBG Mobile in August 2019, as part of its Project REBORN initiative to revamp the game's core elements and enhance its quality. Since then, PUBG Mobile has been working hard to develop and test the new map, releasing several beta versions and collecting feedback from players.
-pubg mobile erangel 2.0 beta version download
-how to install pubg mobile erangel 2.0 update
-pubg mobile erangel 2.0 release date and features
-pubg mobile erangel 2.0 map changes and improvements
-pubg mobile erangel 2.0 graphics and performance
-pubg mobile erangel 2.0 gameplay and tips
-pubg mobile erangel 2.0 new weapons and vehicles
-pubg mobile erangel 2.0 comparison with old map
-pubg mobile erangel 2.0 review and feedback
-pubg mobile erangel 2.0 download link for android
-pubg mobile erangel 2.0 download link for ios
-pubg mobile erangel 2.0 download link for pc
-pubg mobile erangel 2.0 download size and requirements
-pubg mobile erangel 2.0 download error and solution
-pubg mobile erangel 2.0 download guide and steps
-pubg mobile erangel 2.0 global version download
-pubg mobile erangel 2.0 korean version download
-pubg mobile erangel 2.0 chinese version download
-pubg mobile erangel 2.0 india version download
-pubg mobile erangel 2.0 vietnam version download
-pubg mobile erangel 2.0 indonesia version download
-pubg mobile erangel 2.0 philippines version download
-pubg mobile erangel 2.0 thailand version download
-pubg mobile erangel 2.0 malaysia version download
-pubg mobile erangel 2.0 singapore version download
-pubg mobile erangel 2.0 taiwan version download
-pubg mobile erangel 2.0 japan version download
-pubg mobile erangel 2.0 turkey version download
-pubg mobile erangel 2.0 russia version download
-pubg mobile erangel 2.0 brazil version download
-pubg mobile new era update with erangel 2.0 map
-how to play pubg mobile new era with erangel 2.0 map
-what's new in pubg mobile new era with erangel 2.0 map
-how to get free rewards in pubg mobile new era with erangel 2.0 map
-how to join the event in pubg mobile new era with erangel 2.0 map
-best settings for pubg mobile new era with erangel 2.0 map
-best landing spots in pubg mobile new era with erangel 2.0 map
-best loot locations in pubg mobile new era with erangel 2.0 map
-best strategies for winning in pubg mobile new era with erangel 2.0 map
-best squad combinations in pubg mobile new era with erangel 2.0 map
-how to rank up fast in pubg mobile new era with erangel 2.0 map
-how to increase fps in pubg mobile new era with erangel 2.0 map
-how to reduce lag in pubg mobile new era with erangel 2.0 map
-how to fix bugs and glitches in pubg mobile new era with erangel 2.0 map
-how to report hackers and cheaters in pubg mobile new era with erangel 2.0 map
-how to stream and record gameplay in pubg mobile new era with erangel 2.0 map
-how to customize controls and layout in pubg mobile new era with erangel 2.0 map
-how to use voice chat and communication in pubg mobile new era with erangel 2.0 map
-how to earn uc and bp in pubg mobile new era with erangel 2.0 map
-Erangel 2.0 was finally released on September 8th, 2020, as part of the PUBG Mobile 1.0 update, which also introduced other new features like Livik map improvements, Cheer Park 2.0, M1014 weapon, and more.
-Some of the main features of Erangel 2.0 are:
-
-- Upgraded graphics: The map's visuals have been enhanced with better lighting, shadows, textures, details, and effects.
-- Building adjustments: The map's buildings have been redesigned with more realistic shapes, sizes, colors, and interiors.
-- Adjustments to large resource points: The map's major locations like Mylta Power, Quarry, Prison, Military Base, School, Pochinki, Georgopol, etc., have been modified with more loot, cover, vehicles, and strategic options.
-- New map elements: The map's terrain has been enriched with new elements like trenches, wooden barricades, abandoned tanks, etc., to provide more variety and fun.
-- Building structure changes: The map's building structures have been changed to make them more accessible and interactive.
-- New Spawn Island: The map's Spawn Island has been revamped with a new look and layout.
-
- The differences and improvements of Erangel 2.0
-The main difference between Erangel 2.0 and the original Erangel is that the former is more realistic, immersive, dynamic, and challenging than the latter. Erangel 2.0 offers a better visual experience with its upgraded graphics and details, as well as a better gameplay experience with its adjusted buildings and locations.
-Erangel 2.0 also improves the game's performance and stability by optimizing its resources and reducing its lag issues
Some of the improvements of Erangel 2.0 are:
-
-- Better balance: The map's loot distribution, vehicle spawn rate, and zone settings have been balanced to make the game more fair and competitive.
-- Better interaction: The map's doors, windows, walls, and roofs have been made more interactive, allowing players to open, close, break, and climb them.
-- Better sound: The map's sound effects have been improved to make them more realistic and immersive.
-- Better security: The map's anti-cheat system has been enhanced to prevent hackers and cheaters from ruining the game.
-
- How to download and play Erangel 2.0?
-If you are eager to try out Erangel 2.0, you will need to download and install the PUBG Mobile 1.0 update first. There are two ways to do this: from the Google Play Store or from the APK and OBB files. Here are the steps for both methods:
- Downloading Erangel 2.0 from Google Play Store
-This is the easiest and safest way to download Erangel 2.0, as it does not require any additional files or permissions. All you need to do is follow these steps:
-
-- Open the Google Play Store app on your device.
-- Search for PUBG Mobile and tap on the update button.
-- Wait for the update to download and install automatically.
-- Launch PUBG Mobile and enjoy Erangel 2.0!
-
- Downloading Erangel 2.0 from APK and OBB files
-This is an alternative way to download Erangel 2.0, in case you cannot access the Google Play Store or have a slow internet connection. However, this method requires some extra steps and precautions, so make sure you follow them carefully:
-
-- Download the PUBG Mobile 1.0 APK and OBB files from a trusted source, such as [this one].
-- Enable the installation of apps from unknown sources on your device's settings.
-- Locate and install the APK file on your device.
-- Copy and paste the OBB file to the Android/OBB/com.tencent.ig folder on your device's storage.
-- Launch PUBG Mobile and enjoy Erangel 2.0!
-
- Tips and tricks for playing Erangel 2.0
-Now that you have downloaded and installed Erangel 2.0, you might be wondering how to play it effectively and win more matches. Here are some tips and tricks that will help you master the new map:
-
-- Explore the new locations: Erangel 2.0 has many new locations that are worth exploring, such as the secret bunker in Military Base, the underground tunnel in Prison, the abandoned factory in Mylta Power, etc. These locations may have more loot, cover, or surprises that can give you an edge over your enemies.
-- Use the new elements: Erangel 2.0 has many new elements that can enhance your gameplay, such as trenches, wooden barricades, abandoned tanks, etc. These elements can provide you with more cover, mobility, or firepower that can help you survive longer or eliminate your foes faster.
-- Adapt to the new graphics: Erangel 2.0 has much better graphics than the original Erangel, which means that you will need to adjust your settings and strategies accordingly. For example, you may need to lower your graphics quality or brightness to avoid lag or glare issues, or you may need to change your camouflage or hiding spots to blend in better with the environment.
-- Be prepared for more challenges: Erangel 2.0 is not only more beautiful but also more difficult than the original Erangel, which means that you will need to be more careful and skillful when playing it. For example, you may face more enemies or bots in certain areas, or you may encounter more obstacles or traps in certain buildings.
-
- Why should you play Erangel 2.0?
-You may be wondering why you should play Erangel 2.0 when you are already familiar and comfortable with the original Erangel. Well, there are many reasons why you should give Erangel 2.0 a chance, such as:
- The benefits and challenges of Erangel 2.0
-Erangel 2.0 offers many benefits and challenges that can make your PUBG Mobile experience more enjoyable and rewarding. Some of these are:
-
-- Better graphics: Erangel 2.0 has better graphics than the original Erangel, which means that you can enjoy a more realistic and immersive visual experience. You can also appreciate the finer details and effects that make the map more lively and dynamic.
-- Better gameplay: Erangel 2.0 has better gameplay than the original Erangel, which means that you can have more fun and excitement while playing. You can also explore more possibilities and strategies that make the game more diverse and challenging.
-- Better performance: Erangel 2.0 has better performance than the original Erangel, which means that you can have a smoother and faster gaming experience. You can also avoid some of the common issues and glitches that plague the game.
-- Better security: Erangel 2.0 has better security than the original Erangel, which means that you can have a safer and fairer gaming experience. You can also report and ban any hackers or cheaters that ruin the game.
-
- The best strategies and locations for Erangel 2.0
-Erangel 2.0 also offers many opportunities and challenges that can test your skills and knowledge of the game. Some of these are:
-
-- The best strategies: Erangel 2.0 requires you to adapt your strategies according to the new map features and elements. For example, you may need to use more stealth and sniping tactics in the open fields, or more close-quarters and assault tactics in the urban areas.
-- The best locations: Erangel 2.0 has many new locations that can give you an advantage or disadvantage depending on your situation. For example, you may want to land in the secret bunker in Military Base for more loot, or avoid the underground tunnel in Prison for less risk.
-- The best weapons: Erangel 2.0 has many new weapons that can suit your playstyle and preference. For example, you may want to use the M1014 shotgun for its high damage and fast reload, or the M416 assault rifle for its versatility and stability.
-- The best vehicles: Erangel 2.0 has many new vehicles that can help you move around and escape faster. For example, you may want to use the UAZ for its durability and speed, or the motorcycle for its maneuverability and fun.
-
- The future updates and plans for Erangel 2.0
-Erangel 2.0 is not the final version of the map, but rather a work in progress that will continue to evolve and improve over time. PUBG Mobile has already announced some of the future updates and plans for Erangel 2.0, such as:
-
-- New modes: PUBG Mobile will introduce new modes for Erangel 2.0, such as Payload 2.0, Infection Mode 2.0, etc., to add more variety and fun to the game.
-- New events: PUBG Mobile will launch new events for Erangel 2.0, such as Halloween Mode, Winter Festival, etc., to celebrate different occasions and themes.
-- New features: PUBG Mobile will add new features for Erangel 2.0, such as dynamic weather, destructible environments, etc., to make the game more realistic and immersive.
-- New feedback: PUBG Mobile will collect new feedback from players for Erangel 2.0, such as bug reports, suggestions, opinions, etc., to make the game better and smoother.
-
- Conclusion
- Summary of the main points
-In conclusion, Erangel 2.0 is a major update that changes PUBG Mobile's classic map completely. It offers a new and improved version of Erangel that is more realistic, immersive, dynamic, and challenging than ever before. It also provides a better visual, gameplay, performance, and security experience for players.
- Call to action and invitation for feedback
-If you are a fan of PUBG Mobile and want to try something new and exciting, you should definitely download and play Erangel 2.0 today. You will not regret it!
-We hope you enjoyed this article and learned something useful about Erangel 2.0. If you have any questions, comments, or feedback about this topic, please feel free to share them with us in the comment section below. We would love to hear from you!
- FAQs
-Here are some of the frequently asked questions about Erangel 2.0:
-
-- Q: How big is Erangel 2.0?
A: Erangel 2.0 is still 8x8 km in size, but it has more details and elements than before.
-- Q: A: How can I play Erangel 2.0 with my friends?
A: You can play Erangel 2.0 with your friends by inviting them to your team or joining their team in the lobby. Then, you can select Erangel 2.0 as your map preference and start the match.
-- Q: How can I switch between Erangel 2.0 and the original Erangel?
A: You can switch between Erangel 2.0 and the original Erangel by tapping on the map icon in the lobby. Then, you can choose which version of Erangel you want to play.
-- Q: What are the minimum requirements to play Erangel 2.0?
A: The minimum requirements to play Erangel 2.0 are the same as the PUBG Mobile 1.0 update, which are Android 5.1.1 or above and at least 2 GB of memory.
-- Q: Is Erangel 2.0 available for other platforms?
A: Erangel 2.0 is currently only available for PUBG Mobile, but it may be released for other platforms like PUBG PC, PUBG Lite, PUBG Console, etc., in the future.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Hello Neighbor APK OBB - Experience the Thrill of Stealth Horror on Your Phone.md b/spaces/congsaPfin/Manga-OCR/logs/Hello Neighbor APK OBB - Experience the Thrill of Stealth Horror on Your Phone.md
deleted file mode 100644
index 57147ba70ebe5e2cf13545f4b6f7da6b236d071b..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Hello Neighbor APK OBB - Experience the Thrill of Stealth Horror on Your Phone.md
+++ /dev/null
@@ -1,137 +0,0 @@
-
-
-
-
- Hello Neighbor OBB APK: Everything You Need to Know
-Are you looking for a thrilling and suspenseful game that will keep you on the edge of your seat? Do you want to experience a unique and immersive gameplay that will challenge your skills and logic? If you answered yes to these questions, then you should try Hello Neighbor, one of the most popular stealth horror games in the market. And if you want to enjoy this game to the fullest, then you should download Hello Neighbor OBB APK, which will give you access to all the features and content of the game.
-hello neighbor obb apk
Download ->>> https://urlca.com/2uOctS
-In this article, we will tell you everything you need to know about Hello Neighbor OBB APK. We will explain what is Hello Neighbor, what is an OBB file, what is Hello Neighbor OBB APK, how to download it, and some frequently asked questions. By the end of this article, you will be ready to download and install Hello Neighbor O BB APK and start playing this amazing game.
- What is Hello Neighbor?
-Hello Neighbor is a stealth horror game developed by Dynamic Pixels and Hologryph, and published by tinyBuild. It was released in 2017 for Windows, Xbox One, PlayStation 4, Nintendo Switch, iOS, and Android devices. The game has received positive reviews from critics and players, who praised its graphics, sound, atmosphere, and gameplay.
-The game is set in a fictional suburb, where you play as a curious child who wants to find out what is hidden in the basement of your neighbor's house. However, your neighbor is not a friendly person. He is a smart and cunning AI who will do anything to stop you from snooping around his property. He can set traps, chase you, and even learn from your actions. You have to use stealth, strategy, and creativity to outsmart him and uncover his secrets.
- A brief introduction to the game and its genre
-Hello Neighbor belongs to the genre of stealth horror games, which are games that involve sneaking around a hostile environment while avoiding detection by enemies or hazards. Stealth horror games often have elements of puzzle-solving, exploration, and survival. Some examples of stealth horror games are Outlast, Amnesia: The Dark Descent, Alien: Isolation, and Resident Evil.
-hello neighbor obb apk download
-hello neighbor obb apk mod
-hello neighbor obb apk latest version
-hello neighbor obb apk android
-hello neighbor obb apk free
-hello neighbor obb apk full
-hello neighbor obb apk unlocked
-hello neighbor obb apk offline
-hello neighbor obb apk update
-hello neighbor obb apk hack
-hello neighbor obb apk data
-hello neighbor obb apk file
-hello neighbor obb apk install
-hello neighbor obb apk revdl
-hello neighbor obb apk rexdl
-hello neighbor obb apk pure
-hello neighbor obb apk uptodown
-hello neighbor obb apk apkpure
-hello neighbor obb apk apkmirror
-hello neighbor obb apk apkcombo
-hello neighbor obb apk googlemodapk
-hello neighbor obb apk android 1
-hello neighbor obb apk android 11
-hello neighbor obb apk android 12
-hello neighbor obb apk android tv
-hello neighbor obb apk android tablet
-hello neighbor obb apk pc windows
-hello neighbor obb apk pc download
-hello neighbor obb apk pc emulator
-hello neighbor obb apk pc game
-hello neighbor obb apk ios download
-hello neighbor obb apk ios free
-hello neighbor obb apk ios hack
-hello neighbor obb apk ios mod
-hello neighbor obb apk ios game
-hello neighbor obb apk for iphone
-hello neighbor obb apk for ipad
-hello neighbor obb apk for macbook
-hello neighbor obb apk for chromebook
-hello neighbor obb apk for firestick
-hello neighbor hide and seek obb apk
-hello neighbor act 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
-The game is divided into four acts, each with a different setting and objective. In Act 1, you have to break into your neighbor's house and find the key to the basement. In Act 2, you have to escape from the basement and find a way out of the house. In Act 3, you have to revisit the house as an adult and confront your childhood fears. In Act Finale, you have to face the final challenge and discover the truth behind your neighbor's past.
- The main features and gameplay of Hello Neighbor
-Hello Neighbor has many features that make it a unique and enjoyable game. Some of these features are:
-
-- A realistic physics system that allows you to interact with objects and environments in various ways.
-- A sandbox mode that lets you explore the game world freely and create your own scenarios.
-- A dynamic AI that adapts to your actions and changes its behavior accordingly.
-- A procedural animation system that makes the characters move naturally and realistically.
-- A colorful and cartoonish art style that contrasts with the dark and creepy atmosphere.
-- A rich and mysterious story that unfolds through clues, flashbacks, and cutscenes.
-
-The gameplay of Hello Neighbor consists of sneaking into your neighbor's house and avoiding his detection. You can use various items and tools to distract him, hide from him, or fight him. You can also explore the house and find clues that will help you progress in the game. However, you have to be careful because your neighbor will not make it easy for you. He will patrol his house, set traps, lock doors, and even follow you if he sees you. He will also remember your previous actions and change his strategy accordingly. You have to be smarter than him and use your wits to survive.
What is an OBB file?
-An OBB file is a type of file that is used by some Android games and apps to store additional data that is not included in the APK file. OBB stands for Opaque Binary Blob, which means that it is a large and complex file that cannot be easily modified or understood by humans. An OBB file usually contains graphics, sounds, videos, and other media files that enhance the quality and performance of the game or app.
- The definition and purpose of an OBB file
-An OBB file is a compressed archive file that can store up to 4 GB of data. It is created by the developers of the game or app using a tool called JOBB (Jobb Tool), which is part of the Android SDK (Software Development Kit). The purpose of an OBB file is to provide additional resources and content that are not essential for the basic functionality of the game or app, but are required for a better user experience. For example, an OBB file can contain high-resolution textures, animations, soundtracks, voice-overs, and cutscenes that make the game or app more immersive and realistic.
- How to install an OBB file on your device
-To install an OBB file on your device, you need to follow these steps:
-
-- Download the APK file and the OBB file of the game or app you want to install. Make sure they are from a reliable and secure source.
-- Install the APK file on your device by tapping on it and following the instructions. Do not open the game or app yet.
-- Locate the OBB file on your device using a file manager app. It should have a name like com.example.game.obb.
-- Copy or move the OBB file to the folder /Android/obb/com.example.game/ on your device's internal storage or SD card. If the folder does not exist, create it manually.
-- Launch the game or app and enjoy the additional content.
-
What is Hello Neighbor OBB APK?
-Hello Neighbor OBB APK is a modified version of the original Hello Neighbor game that includes both the APK file and the OBB file in one package. This means that you do not need to download and install the OBB file separately, as it is already integrated with the APK file. You just need to download and install the Hello Neighbor OBB APK file on your device and you are good to go.
- The difference between an APK and an OBB file
-An APK file is a type of file that is used by Android devices to install and run applications. APK stands for Android Package Kit, which means that it is a package of files that contains the code, resources, and metadata of the application. An APK file can be installed on your device by tapping on it or using a third-party app installer.
-An OBB file, as we have explained before, is a type of file that is used by some Android applications to store additional data that is not included in the APK file. An OBB file can be installed on your device by copying or moving it to the appropriate folder on your device's storage.
-The main difference between an APK and an OBB file is that an APK file contains the essential files and information for running the application, while an OBB file contains the optional files and information for enhancing the application. An APK file can work without an OBB file, but an OBB file cannot work without an APK file.
- The benefits of downloading Hello Neighbor OBB APK
-There are many benefits of downloading Hello Neighbor OBB APK instead of the original Hello Neighbor game. Some of these benefits are:
-
-- You can save time and bandwidth by downloading only one file instead of two.
-- You can avoid the hassle of finding, downloading, and installing the OBB file separately.
-- You can ensure that the APK and OBB files are compatible and up-to-date.
-- You can access all the features and content of the game without any restrictions or limitations.
-- You can enjoy a smoother and faster gameplay with better graphics and sound quality.
-
How to download Hello Neighbor OBB APK?
-If you are interested in downloading Hello Neighbor OBB APK, you need to follow some simple steps. However, before you do that, you need to make sure that your device meets the requirements and precautions for downloading Hello Neighbor OBB APK.
- The requirements and precautions for downloading Hello Neighbor OBB APK
-Before you download Hello Neighbor OBB APK, you need to check the following requirements and precautions:
-
-- You need to have an Android device that runs on Android 6.0 or higher and has at least 1 GB of RAM.
-- You need to have enough free space on your device's storage or SD card to store the Hello Neighbor OBB APK file, which is about 1 GB in size.
-- You need to enable the installation of apps from unknown sources on your device's settings. This will allow you to install the Hello Neighbor OBB APK file that is not from the Google Play Store.
-- You need to disable any antivirus or security apps that might interfere with the installation of the Hello Neighbor OBB APK file.
-- You need to download the Hello Neighbor OBB APK file from a trusted and secure source. Do not download it from any suspicious or malicious websites that might harm your device or steal your data.
-
- The steps to download and install Hello Neighbor OBB APK
-After you have checked the requirements and precautions, you can proceed to download and install Hello Neighbor OBB APK by following these steps:
-
-- Click on this link to download the Hello Neighbor OBB APK file on your device.
-- Wait for the download to finish and then locate the file on your device using a file manager app.
-- Tap on the file and follow the instructions to install it on your device. Do not open the game yet.
-- Launch the game and wait for it to extract the OBB file automatically. This might take a few minutes depending on your device's speed.
-- Enjoy playing Hello Neighbor with all the features and content unlocked.
-
- Conclusion
-Hello Neighbor is a stealth horror game that will test your nerves and skills as you try to uncover the secrets of your neighbor's basement. It is a game that offers a unique and immersive gameplay that will keep you hooked for hours. However, if you want to enjoy this game to the fullest, you should download Hello Neighbor OBB APK, which will give you access to all the features and content of the game without any hassle. You just need to follow the steps we have provided in this article and you will be ready to play this amazing game.
-We hope that this article has helped you understand everything you need to know about Hello Neighbor OBB APK. If you have any questions or feedback, please feel free to leave them in the comments section below. Thank you for reading and happy gaming!
- FAQs
-Q1: Is Hello Neighbor OBB APK safe to download?
-A1: Yes, as long as you download it from a trusted source. We have provided a link in this article that is safe and secure. However, if you download it from other sources, make sure they are reliable and reputable. Do not download it from any suspicious or malicious websites that might harm your device or steal your data.
- Q2: Do I need to root my device to install Hello Neighbor OBB APK?
-A2: No, you do not need to root your device to install Hello Neighbor OBB APK. You just need to enable the installation of apps from unknown sources on your device's settings. This will allow you to install the Hello Neighbor OBB APK file that is not from the Google Play Store.
- Q3: Can I play Hello Neighbor offline?
-A3: Yes, you can play Hello Neighbor offline after installing the OBB file. The OBB file contains all the data and resources that are needed for the game to run without an internet connection. However, if you want to access some online features such as updates, leaderboards, or multiplayer modes, you will need an internet connection.
- Q4: What are the minimum system requirements for Hello Neighbor?
-A4: You need at least Android 6.0 or higher and 1 GB of RAM to play Hello Neighbor. You also need enough free space on your device's storage or SD card to store the Hello Neighbor OBB APK file, which is about 1 GB in size.
- Q5: Where can I find more information about Hello Neighbor?
-A5: You can visit the official website of Hello Neighbor or follow their social media accounts. You can also check out some reviews, videos, and guides about the game online. Here are some links that might help you:
-
-- [Official website of Hello Neighbor]
-- [Facebook page of Hello Neighbor]
-- [Twitter account of Hello Neighbor]
-- [YouTube channel of Hello Neighbor]
-- [Review of Hello Neighbor by IGN]
-- [Gameplay video of Hello Neighbor by Markiplier]
-- [Guide to Hello Neighbor by Gamepressure]
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Play Garten of Banban Online and Unravel the Mystery of the Missing Kids.md b/spaces/congsaPfin/Manga-OCR/logs/Play Garten of Banban Online and Unravel the Mystery of the Missing Kids.md
deleted file mode 100644
index 1b8fd4f05176dd4dacb585d6ed6c50e984e7e634..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Play Garten of Banban Online and Unravel the Mystery of the Missing Kids.md
+++ /dev/null
@@ -1,116 +0,0 @@
-
-Garten of Banban: A Free Online Horror Game That Will Give You Chills
- If you are a fan of horror games, you might have heard of Garten of Banban, a bizarre and eerie game where you have to explore a desolate kindergarten and uncover the truth about the mysterious disappearance of kids here. But did you know that you can play this game online for free, without downloading anything? In this article, we will tell you everything you need to know about this game, from its plot and gameplay to its pros and cons, and how to play it on your browser. Read on if you dare!
-garten of banban free online no download
Download File ✏ https://urlca.com/2uOc3d
- What is Garten of Banban?
- Garten of Banban is a horror adventure game developed by an indie Korean developer named Dotoru. It was released in 2019 for Windows, but it also has a web version that you can play on your browser. The game is inspired by the Korean urban legend of Banban Kindergarten, a haunted place where children went missing or died under mysterious circumstances.
- The plot and the setting of the game
- The game follows the story of a young girl named Yoojin, who wakes up in an abandoned kindergarten with no memory of who she is or how she got there. She soon realizes that she is not alone, as there are other kids trapped in the same place, as well as some terrifying creatures that lurk in the shadows. She also finds clues that suggest that something sinister happened in this kindergarten, involving a cult, a ritual, and a dark secret. She must find a way to escape from this nightmare, while also discovering the truth about herself and her past.
- The gameplay and the features of the game
- The game is a point-and-click adventure, where you have to explore the different rooms and areas of the kindergarten, collecting items, solving puzzles, and avoiding enemies. You can interact with objects and characters by clicking on them, and use items from your inventory by dragging them onto the screen. You can also save your progress by using a teddy bear that acts as a save point.
-garten of banban game online play for free
-garten of banban horror game free no download
-garten of banban kindergarten mystery game online
-garten of banban scary game free online no install
-garten of banban game online free without downloading
-garten of banban horror adventure game free online
-garten of banban kindergarten horror game online free
-garten of banban game online no download required
-garten of banban creepy game free online no registration
-garten of banban game online free play now
-garten of banban horror puzzle game online free
-garten of banban kindergarten exploration game online
-garten of banban game online free no sign up
-garten of banban spooky game free online no login
-garten of banban game online free no subscription
-garten of banban horror escape game online free
-garten of banban kindergarten secrets game online
-garten of banban game online free no credit card
-garten of banban terrifying game free online no survey
-garten of banban game online free no verification
-garten of banban horror mystery game online free
-garten of banban kindergarten clues game online
-garten of banban game online free no ads
-garten of banban frightening game free online no malware
-garten of banban game online free no virus
-garten of banban horror survival game online free
-garten of banban kindergarten puzzles game online
-garten of banban game online free no spam
-garten of banban chilling game free online no popups
-garten of banban game online free no cookies
-garten of banban horror investigation game online free
-garten of banban kindergarten items game online
-garten of banban game online free no tracking
-garten of banban thrilling game free online no phishing
-garten of banban game online free no scam
-garten of banban horror story game online free
-garten of banban kindergarten riddles game online
-garten of banban game online free no risk
-garten of banban exciting game free online no hassle
-garten of banban game online free no trouble
- The game has several features that make it stand out from other horror games, such as:
-
-- It has a unique and creepy art style, with pixelated graphics and distorted sounds that create a sense of unease and dread.
-- It has multiple endings and secrets, depending on your choices and actions throughout the game. Some endings are good, some are bad, and some are hidden.
-- It has challenging and varied puzzles, ranging from logic and math problems to riddles and codes. Some puzzles are optional, but some are required to progress or unlock certain endings.
-- It has a dynamic difficulty system, where the game adapts to your skill level and changes accordingly. For example, if you die too often, the game will become easier, but if you solve puzzles too quickly, the game will become harder.
-
- Why should you play Garten of Banban?
- Garten of Banban is not a game for everyone, as it can be quite disturbing and scary. However, if you enjoy horror games and want to try something different, you might find this game appealing. Here are some of the pros and cons of playing this game:
- The pros of the game
- It is free and easy to access
- One of the best things about the game is that it is completely free to play online, without downloading anything. You just need a browser that supports HTML5 and JavaScript, and you can access the game from any device. You can also play the game offline by downloading it from the developer's website.
- It has a unique and creepy atmosphere
- Another reason to play the game is that it has a very distinctive and unsettling atmosphere, that will keep you on the edge of your seat. The game uses pixelated graphics and distorted sounds to create a sense of nostalgia and horror, as you explore the abandoned kindergarten that is full of secrets and dangers. The game also has a dark and twisted story, that will make you question everything you see and hear. The game is not afraid to show gore and violence, as well as psychological horror and jump scares.
- It has challenging and varied puzzles
- If you like puzzles, you will enjoy the game, as it has many puzzles that will test your logic, memory, and creativity. The puzzles are not too easy or too hard, but they are well-designed and integrated into the game's story and environment. Some puzzles are optional, but some are required to progress or unlock certain endings. The puzzles are also varied, ranging from logic and math problems to riddles and codes. You will have to use your brain and your intuition to solve them.
- It has multiple endings and secrets
- One of the most interesting aspects of the game is that it has multiple endings and secrets, depending on your choices and actions throughout the game. Some endings are good, some are bad, and some are hidden. The endings are not always obvious or predictable, and they can change the way you view the game's story and characters. The game also has many secrets that you can discover by exploring the kindergarten, finding hidden items, solving optional puzzles, or doing certain actions. The secrets can reveal more information about the game's plot, or give you hints for other endings.
- The cons of the game
- It is not for the faint-hearted
- As mentioned before, the game is not a game for everyone, as it can be quite disturbing and scary. The game deals with themes such as child abuse, cults, rituals, death, and madness, that might be upsetting or triggering for some people. The game also has many scenes of gore and violence, as well as psychological horror and jump scares. The game is not suitable for children or people who are sensitive to horror.
- It has some bugs and glitches
- Another drawback of the game is that it has some bugs and glitches that might affect your gameplay experience. For example, some items might not work properly, some puzzles might not be solvable, some enemies might not appear or disappear, or some sounds might not play correctly. These bugs and glitches are not very common or serious, but they can be annoying or frustrating. The developer is aware of these issues and is working on fixing them in future updates.
- It has limited replay value
- The last con of the game is that it has limited replay value, as once you have seen all the endings and secrets, there is not much else to do in the game. The game does not have any achievements or leaderboards to motivate you to play again. The game also does not have any random or procedural elements to make each playthrough different or unique. The game is relatively short, as it can be completed in less than an hour if you know what to do.
- How to play Garten of Banban online?
- If you are interested in playing Garten of Banban online for free, here are the steps to follow:
- The steps to play the game on your browser
-
-- Go to this link, where you can find the web version of the game.
-- Click on the "Play" button to start the game.
-- Wait for the game to load on your browser.
-- Enjoy the game!
-
- The tips and tricks to survive the game
- If you want some tips and tricks to survive the game, here are some suggestions:
-
-- Save your progress frequently by using the teddy bear save points. You never know when something might kill you or when you might need to reload a previous save.
-- Explore every room and area of the kindergarten carefully. You might find useful items, clues, puzzles, or secrets that can help you escape or unlock different endings.
-- Avoid making noise or running when there are enemies nearby. They can hear you and chase you down if they spot you.
-- Use items wisely. Some items are essential for solving puzzles or escaping from enemies, while some items are optional or have different uses depending on the situation.
-- Pay attention to the hints and instructions that the game gives you. They can guide you to the right path or warn you of the dangers ahead.
-- Try different choices and actions to see how they affect the outcome of the game. You might find new endings or secrets that you missed before.
-
- Conclusion
- Garten of Banban is a free online horror game that will give you chills. It is a point-and-click adventure game where you have to explore a haunted kindergarten and uncover the truth about the missing kids. The game has a unique and creepy atmosphere, challenging and varied puzzles, multiple endings and secrets, and a dynamic difficulty system. The game is not for the faint-hearted, as it has some disturbing and scary scenes and themes. The game also has some bugs and glitches, and limited replay value. If you want to play the game online, you just need a browser that supports HTML5 and JavaScript, and follow the steps and tips we provided in this article. We hope you enjoyed this article, and good luck with the game!
- FAQs
- Here are some frequently asked questions about Garten of Banban:
-
-- Q: Who is Banban?
-- A: Banban is the name of the main antagonist of the game, who is also the owner of the kindergarten. He is a mysterious and evil man who is behind the disappearance and death of the kids. He also has some supernatural powers that make him very dangerous.
-- Q: How many endings are there in the game?
-- A: There are six endings in the game, four of which are bad endings, one of which is a good ending, and one of which is a secret ending. The endings depend on your choices and actions throughout the game, such as which items you use, which puzzles you solve, which characters you interact with, and which rooms you enter.
-- Q: How do I get the secret ending?
-- A: The secret ending is the hardest ending to get in the game, as it requires you to do some specific actions that are not obvious or easy to do. To get the secret ending, you need to do the following:
-
-- Find all 12 hidden dolls in the game. They are scattered in different rooms and areas of the kindergarten, and they are very hard to spot.
-- Solve all 12 optional puzzles in the game. They are marked by a question mark on the screen, and they are not required to progress or unlock other endings.
-- Enter Banban's room after finding all 12 dolls and solving all 12 puzzles. You will find a new door that leads to a secret area where you will face Banban himself.
-
-- Q: How do I save my progress in the game?
-- A: You can save your progress in the game by using the teddy bear save points that are located in some rooms of the kindergarten. You can only save when you are near a teddy bear, and you can only have one save file at a time. To save your progress, click on the teddy bear icon on the bottom right corner of the screen, and then click on "Save". To load your progress, click on "Load" instead.
-- Q: How do I play the game offline?
-- A: If you want to play the game offline, you need to download it from the developer's website, which is this link. You will need a Windows PC to run the game, and you will need to unzip the downloaded file to access the game folder. To start the game, double-click on the "Garten of Banban.exe" file in the game folder. You can also change the game settings by clicking on the "Config.exe" file in the same folder. 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Unlimited Coins and Gems in Hungry Shark World 2.5.0 APK Download Now.md b/spaces/congsaPfin/Manga-OCR/logs/Unlimited Coins and Gems in Hungry Shark World 2.5.0 APK Download Now.md
deleted file mode 100644
index 99bb806cfa49e4bac5ef7248168147e1c0f0fdaa..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Unlimited Coins and Gems in Hungry Shark World 2.5.0 APK Download Now.md
+++ /dev/null
@@ -1,109 +0,0 @@
-
-Hungry Shark World: How to Get Unlimited Coins and Gems with APK 2.5.0
-Are you a fan of Hungry Shark World, the arcade game where you control a hungry shark and eat everything in your way? Do you want to unlock more sharks, upgrade your abilities, and customize your appearance? If so, you might be interested in getting unlimited coins and gems, the two currencies in the game that let you do all that and more.
-In this article, we will show you how to get unlimited coins and gems with APK 2.5.0, a modified version of the game that gives you access to unlimited resources. We will also give you some tips and tricks on how to play Hungry Shark World better and have more fun.
-hungry shark world unlimited coins and gems apk 2.5 0
Download Zip > https://urlca.com/2uOgf0
-Introduction
-What is Hungry Shark World?
-Hungry Shark World is a mobile game developed by Ubisoft that was released in 2016 as a sequel to Hungry Shark Evolution. The game is available for Android, iOS, Windows, Xbox One, PlayStation 4, and Nintendo Switch devices.
-In Hungry Shark World, you can choose from over 40 different sharks in eight size tiers, from small fish to the massive Megalodon. You can explore four huge open worlds: Pacific Islands, Arctic Ocean, Arabian Sea, and South China Sea. You can feast on hundreds of different creatures, from fish and birds to whales and humans. You can also equip your shark with various power-ups, accessories, and skins to enhance your performance and appearance.
-What are coins and gems in Hungry Shark World?
-Coins and gems are the two types of currency in Hungry Shark World. You can use them to buy new sharks, upgrade your stats, unlock new maps, and purchase items from the shop.
-hungry shark world mod apk 2.5 0 unlimited money and gems
-download hungry shark world hack apk 2.5 0 with free coins and gems
-hungry shark world cheats apk 2.5 0 for android and ios
-how to get unlimited coins and gems in hungry shark world 2.5 0
-hungry shark world 2.5 0 apk modded with infinite coins and gems
-hungry shark world latest version 2.5 0 apk hack with coins and gems
-hungry shark world unlimited money and gems apk download 2.5 0
-hungry shark world 2.5 0 mod apk free coins and gems no root
-hungry shark world hack tool apk 2.5 0 generate coins and gems online
-hungry shark world 2.5 0 apk mod unlimited everything unlocked
-hungry shark world modded apk 2.5 0 with free coins and gems for all sharks
-hungry shark world hack version 2.5 0 apk with unlimited coins and gems
-hungry shark world cheat codes apk 2.5 0 for free coins and gems
-hungry shark world unlimited coins and gems generator apk 2.5 0
-hungry shark world hacked apk download 2.5 0 with coins and gems
-hungry shark world mod menu apk 2.5 0 with unlimited coins and gems
-hungry shark world glitch apk 2.5 0 for free coins and gems
-hungry shark world mega mod apk 2.5 0 with unlimited coins and gems
-hungry shark world premium apk 2.5 0 with free coins and gems
-hungry shark world update 2.5 0 apk mod with coins and gems
-hungry shark world cracked apk 2.5 0 with unlimited coins and gems
-hungry shark world tips and tricks apk 2.5 0 for free coins and gems
-hungry shark world full version apk download 2.5 0 with coins and gems
-hungry shark world pro apk modded with unlimited coins and gems in version 2.5 0
-hungry shark world hack no survey no human verification apk download version 2.5 0 with free coins and gems
-Coins are the basic currency that you can earn by eating creatures, completing missions, finding treasures, and watching ads. Gems are the premium currency that you can earn by completing daily chests, achievements, events, or buying them with real money.
-Why do you need unlimited coins and gems in Hungry Shark World?
-Coins and gems are essential for progressing in Hungry Shark World. With unlimited coins and gems, you can:
-
-- Unlock all the sharks in the game, from the smallest Reef Shark to the legendary Megalodon.
-- Upgrade your shark's bite, speed, boost, health, gold rush, mega gold rush, hunger meter, growth rate, survival bonus, score multiplier, gem multiplier, coin multiplier, treasure sensor, map sensor, mission shell sensor.
-- Buy power-ups that give you special abilities such as super size mode, explosion mode, hypnosis mode.
-- Buy accessories that give you extra benefits such as jetpacks, lasers, headphones, umbrellas.
-- Buy skins that change your shark's appearance such as pirate skin, zombie skin, robot skin.
-- Unlock new maps that offer different environments and challenges such as South China Sea.
-- Have more fun and freedom in playing the game without worrying about running out of resources or spending real money.
-
-How to get unlimited coins and gems with APK 2.5.0
-What is an APK file?
-An APK file is a package file format used by Android
operating system to distribute and install applications and games. An APK file contains the code, resources, assets, certificates, and manifest of an app. You can download APK files from various sources on the internet, such as APKPure, APKMirror, or APKMonk.
-How to download and install APK 2.5.0 for Hungry Shark World
-To download and install APK 2.5.0 for Hungry Shark World, you need to follow these steps:
-
-- Go to the website where you can find the APK file for Hungry Shark World 2.5.0, such as [APKPure].
-- Click on the download button and wait for the file to be downloaded on your device.
-- Before installing the APK file, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
-- Locate the downloaded APK file on your device and tap on it to start the installation process.
-- Follow the instructions on the screen and wait for the installation to finish.
-- Launch the game and enjoy unlimited coins and gems.
-
-How to use APK 2.5.0 to get unlimited coins and gems in Hungry Shark World
-To use APK 2.5.0 to get unlimited coins and gems in Hungry Shark World, you need to follow these steps:
-
-- Open the game and tap on the shop icon on the bottom right corner of the screen.
-- Select the coins or gems tab and choose the amount you want to buy.
-- You will see that the price is zero for all the options. Tap on the buy button and confirm your purchase.
-- You will receive unlimited coins or gems in your account instantly.
-- Repeat this process as many times as you want to get more coins or gems.
-
-Tips and tricks for Hungry Shark World
-How to survive longer and get more coins and gems in Hungry Shark World
-Besides using APK 2.5.0 to get unlimited coins and gems in Hungry Shark World, you can also use some tips and tricks to improve your gameplay and earn more resources. Here are some of them:
-Eat often, eat all
-The most basic rule of Hungry Shark World is to eat as much as you can and as often as you can. Eating fills up your hunger meter, which depletes over time. If your hunger meter runs out, you will die. Eating also gives you coins, gems, points, health, and boosts your gold rush meter.
-You can eat almost anything in Hungry Shark World, from fish and crabs to turtles and dolphins. However, some creatures are more nutritious than others, and some are dangerous or poisonous. You should avoid eating jellyfish, pufferfish, mines, bombs, electric eels, or anything that is bigger than your shark or has a red outline around it.
-Go for gold creatures
-Some creatures in Hungry Shark World have a gold color or a gold aura around them. These creatures are worth more coins, gems, points, health, and gold rush than normal creatures. You should always try to eat them whenever you see them.
-Some examples of gold creatures are gold fish, gold crabs, gold turtles, gold birds, gold humans, gold whales, gold sharks, etc. You can also find gold creatures in hidden areas or special events.
-Complete missions and challenges
-Missions and challenges are tasks that you can complete in Hungry Shark World to earn extra coins, gems, points, or rewards. You can find them on the left side of the screen or on the map screen.
-Missions are specific to each shark and map. They usually involve eating a certain number or type of creatures or performing a certain action. For example, one mission might be to eat 10 humans in Pacific Islands or jump over a boat in Arctic Ocean.
-Challenges are global and change every day. They usually involve competing with other players or achieving a certain score or rank. For example, one challenge might be to reach the top 10% of players in South China Sea or score 10 million points in Arabian Sea.
-Equip power-ups and accessories
-Power-ups and accessories are items that you can buy from the shop with coins or gems. They can give you special abilities or benefits that can help you survive longer and get more coins and gems in Hungry Shark World.
Some examples of power-ups are super size mode, which makes your shark bigger and stronger; explosion mode, which makes your shark explode and damage nearby creatures; hypnosis mode, which makes your shark hypnotize and control other creatures.
-Some examples of accessories are jetpacks, which let your shark fly in the air; lasers, which let your shark shoot lasers from its eyes; headphones, which let your shark listen to music and increase its gold rush meter; umbrellas, which protect your shark from sunburn and increase its health.
-Explore different maps and locations
-Hungry Shark World has four different maps that you can unlock with coins or gems: Pacific Islands, Arctic Ocean, Arabian Sea, and South China Sea. Each map has its own unique environment, creatures, treasures, secrets, and challenges. You should explore all the maps and locations to discover new things and have more fun.
-Some examples of locations are volcanoes, shipwrecks, icebergs, temples, islands, beaches, cities, etc. You can also find hidden portals that take you to other worlds or mini-games such as Hungry Letters, where you have to spell words with letters.
-Conclusion
-Summary of the main points
-In conclusion, Hungry Shark World is a fun and addictive game where you can control a hungry shark and eat everything in your way. You can also get unlimited coins and gems with APK 2.5.0, a modified version of the game that gives you access to unlimited resources. You can use coins and gems to unlock more sharks, upgrade your abilities, and customize your appearance. You can also use some tips and tricks to survive longer and get more coins and gems in Hungry Shark World.
-Call to action
-If you are ready to unleash your inner shark and have a blast in Hungry Shark World, download APK 2.5.0 now and enjoy unlimited coins and gems. You can also share this article with your friends and family who love Hungry Shark World and want to get unlimited resources. Thank you for reading and happy hunting!
-FAQs
-
-- Is APK 2.5.0 safe to use?
-APK 2.5.0 is safe to use as long as you download it from a trusted source such as [APKPure]. However, you should always be careful when installing apps from unknown sources and scan them for viruses or malware before using them.
-- Will APK 2.5.0 work on my device?
-APK 2.5.0 should work on most Android devices that support Hungry Shark World. However, some devices may not be compatible or may experience some issues or bugs. If you encounter any problems with APK 2.5.0, you can try uninstalling it and reinstalling the original version of the game from the Google Play Store or the App Store.
-- Can I play online or offline with APK 2.5.0?
-You can play online or offline with APK 2.5.0 as long as you have an internet connection. However, you may not be able to access some features or events that require an online connection or verification such as daily chests, achievements, leaderboards, etc.
-- Can I update APK 2.5.0 to the latest version of Hungry Shark World?
-You can update APK 2.5.0 to the latest version of Hungry Shark World by downloading the new APK file from the same source where you got APK 2.5.0. However, you may lose some of your progress or data if you update APK 2.5.0 without backing up your files or syncing your account.
-- Can I get banned for using APK 2.5.0?
-You may get banned for using APK 2.5.0 if Ubisoft detects that you are using a modified version of the game that violates their terms of service or policies. However, this is unlikely to happen as long as you do not abuse or exploit the unlimited coins and gems feature or cheat in online modes or events.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/WhatsApp Web Clonapp Messenger Mod APK A Simple and Practical Way to Access WhatsApp Web on Your Mobile or Tablet.md b/spaces/congsaPfin/Manga-OCR/logs/WhatsApp Web Clonapp Messenger Mod APK A Simple and Practical Way to Access WhatsApp Web on Your Mobile or Tablet.md
deleted file mode 100644
index 99238afd4437f64890a941e5b4eaf4eab83e44ef..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/WhatsApp Web Clonapp Messenger Mod APK A Simple and Practical Way to Access WhatsApp Web on Your Mobile or Tablet.md
+++ /dev/null
@@ -1,130 +0,0 @@
-
-1. I searched the web for the topic and found some relevant sources that explain what it is, how it works, and what are its features and benefits. 2. I used the information from the sources to create an outline of the article, with at least 15 headings and subheadings (including H1, H2, H3, and H4 headings). I also added some keywords and phrases that are related to the topic and can help with SEO optimization. 3. I wrote a rough draft of the article based on the outline, using my own words and avoiding plagiarism. I used a conversational style as written by a human, using an informal tone, personal pronouns, simple language, engaging sentences, active voice, brief paragraphs, rhetorical questions, and analogies and metaphors. I also used HTML tags to format the text and create a table. 4. I read and revised the draft, checking for grammar, spelling, punctuation, and coherence errors. I also made sure that the article is informative, comprehensive, unique, and interesting for the readers. 5. I wrote a conclusion paragraph that summarizes the main points of the article and provides a call to action for the readers. I also added 5 unique FAQs after the conclusion that answer some common questions about the topic. 6. I bolded the title and all headings of the article, and used appropriate headings for H tags. Here are the two tables that you requested: Table 1: Outline of the article | Heading | Subheading | Content | | --- | --- | --- | | H1: WhatsApp Web Clonapp Messenger Mod APK: What Is It and How to Use It? | - | Introduction: Explain what is WhatsApp Web Clonapp Messenger Mod APK and why it is useful for WhatsApp users. | | H2: What Is WhatsApp Web Clonapp Messenger Mod APK? | - | Definition: Describe what is WhatsApp Web Clonapp Messenger Mod APK and how it differs from the official WhatsApp Web app. | | - | Features: List some of the features of WhatsApp Web Clonapp Messenger Mod APK, such as multiple accounts, status saver, QR scanner, dark mode, etc. | | - | Benefits: Explain some of the benefits of using WhatsApp Web Clonapp Messenger Mod APK, such as convenience, privacy, security, customization, etc. | | H2: How to Download and Install WhatsApp Web Clonapp Messenger Mod APK? | - | Requirements: Mention what are the requirements for downloading and installing WhatsApp Web Clonapp Messenger Mod APK, such as Android device, internet connection, etc. | | - | Steps: Provide a step-by-step guide on how to download and install WhatsApp Web Clonapp Messenger Mod APK from a trusted source. | | - | Tips: Give some tips on how to avoid malware, viruses, or scams when downloading and installing WhatsApp Web Clonapp Messenger Mod APK. | | H2: How to Use WhatsApp Web Clonapp Messenger Mod APK? | - | Steps: Provide a step-by-step guide on how to use WhatsApp Web Clonapp Messenger Mod APK to access multiple WhatsApp accounts on one device or browser. | | - | Tips: Give some tips on how to use WhatsApp Web Clonapp Messenger Mod APK effectively and safely, such as logging out when not in use, clearing cache regularly, etc. | | H2: What Are the Advantages and Disadvantages of Using WhatsApp Web Clonapp Messenger Mod APK? | - | Advantages: Highlight some of the advantages of using WhatsApp Web Clonapp Messenger Mod APK over the official WhatsApp Web app or other alternatives, such as more features, more control, more fun, etc. | | - | Disadvantages: Acknowledge some of the disadvantages or risks of using WhatsApp Web Clonapp Messenger Mod APK instead of the official WhatsApp Web app or other alternatives, such as compatibility issues, legal issues, ethical issues, etc. | | H2: Conclusion | - | Summary: Summarize the main points of the article and restate the thesis statement. | | - | Call to action: Encourage the readers to try out WhatsApp Web Clonapp Messenger Mod APK or share their feedback or questions in the comments section. | | H2: FAQs | - | Q1: What is WhatsApp Web? | | - | Q2: Is WhatsApp Web Clonapp Messenger Mod APK safe to use? | | - | Q3: Can I use WhatsApp Web Clonapp Messenger Mod APK on PC or laptop? | | - | Q4: How can I update WhatsApp Web Clonapp Messenger Mod APK? | | - | Q5: How can I contact the developer of WhatsApp Web Clonapp Messenger Mod APK? | Table 2: Article with HTML formatting
-- Security: You can protect your WhatsApp Web Clonapp Messenger Mod APK from unauthorized access or hacking by using a VPN, proxy, or firewall. You can also scan and remove any malware, virus, or spyware from your device or browser.
-- Customization: You can customize your WhatsApp Web Clonapp Messenger Mod APK according to your taste and style. You can change the theme, color, font, background, wallpaper, and other elements of the app. You can also add or remove any feature or option that you want or don't want.
-
- How to Download and Install WhatsApp Web Clonapp Messenger Mod APK?
- If you want to download and install WhatsApp Web Clonapp Messenger Mod APK on your device or browser, you need to follow some simple steps. But before that, you need to make sure that you have the following requirements:
- Requirements
-
-- Android device: You need an Android device with version 4.4 or higher to run WhatsApp Web Clonapp Messenger Mod APK. You also need to enable the installation of apps from unknown sources in your device settings.
-- Internet connection: You need a stable and fast internet connection to download and install WhatsApp Web Clonapp Messenger Mod APK. You also need to connect your device to the internet when you use the app.
-- WhatsApp account: You need a valid and active WhatsApp account to use WhatsApp Web Clonapp Messenger Mod APK. You also need to verify your phone number and scan the QR code from the app.
-
- Steps
- Here are the steps to download and install WhatsApp Web Clonapp Messenger Mod APK on your device or browser:
-whatsapp web clonapp messenger mod apk
DOWNLOAD ✸✸✸ https://urlca.com/2uOdjP
-
-- Download the APK file: Go to a trusted and reliable source that provides the latest version of WhatsApp Web Clonapp Messenger Mod APK. For example, you can use this link: [WhatsApp Web Clonapp Messenger Mod APK Download]. Click on the download button and wait for the file to be downloaded on your device.
-- Install the APK file: Locate the downloaded file on your device and tap on it to start the installation process. Follow the instructions on the screen and grant the necessary permissions to the app. Wait for the installation to be completed and then open the app.
-- Scan the QR code: Open WhatsApp on your phone and go to Settings > WhatsApp Web/Desktop. Scan the QR code that appears on WhatsApp Web Clonapp Messenger Mod APK with your phone camera. Wait for the connection to be established and then enjoy using the app.
-
- Tips
- Here are some tips to avoid malware, viruses, or scams when downloading and installing WhatsApp Web Clonapp Messenger Mod APK:
-
-- Use a trusted source: Make sure that you download WhatsApp Web Clonapp Messenger Mod APK from a trusted and reliable source that provides the original and updated version of the app. Avoid any suspicious or fake links that may contain malware, viruses, or scams.
-- Use a antivirus software: Make sure that you have a good antivirus software installed on your device or browser that can detect and remove any malware, virus, or spyware that may infect your device or browser when downloading or installing WhatsApp Web Clonapp Messenger Mod APK.
-- Use a VPN service: Make sure that you use a VPN service that can encrypt your data and hide your IP address when downloading or installing WhatsApp Web Clonapp Messenger Mod APK. This can prevent any hackers or trackers from accessing your device or browser or stealing your personal information.
-
- How to Use WhatsApp Web Clonapp Messenger Mod APK?
- If you want to use WhatsApp Web Clonapp Messenger Mod APK to access multiple WhatsApp accounts on one device or browser, you need to follow some simple steps. But before that, you need to make sure that you have the following requirements:
- Requirements
-
-- WhatsApp Web Clonapp Messenger Mod APK: You need to have WhatsApp Web Clonapp Messenger Mod APK installed on your device or browser. You also need to have it connected to your primary WhatsApp account by scanning the QR code.
-- Mult iple WhatsApp accounts: You need to have multiple WhatsApp accounts that you want to access on one device or browser. You also need to have the phone numbers and verification codes of those accounts.
-- Internet connection: You need to have a stable and fast internet connection to use WhatsApp Web Clonapp Messenger Mod APK. You also need to connect your device or browser to the internet when you use the app.
-
- Steps
- Here are the steps to use WhatsApp Web Clonapp Messenger Mod APK to access multiple WhatsApp accounts on one device or browser:
-
-- Add a new account: Open WhatsApp Web Clonapp Messenger Mod APK on your device or browser and tap on the menu icon on the top right corner. Tap on "Add Account" and enter the phone number of the WhatsApp account that you want to add. Tap on "Next" and wait for the verification code to be sent to your phone. Enter the verification code and tap on "Done". Wait for the account to be added and synced with the app.
-- Switch between accounts: To switch between different WhatsApp accounts on one device or browser, tap on the menu icon on the top right corner and tap on "Switch Account". Select the account that you want to switch to and wait for it to load. You can also swipe left or right on the screen to switch between accounts.
-- Manage accounts: To manage your WhatsApp accounts on one device or browser, tap on the menu icon on the top right corner and tap on "Manage Accounts". You can see all your added accounts and their details, such as name, number, status, etc. You can also delete any account that you don't want to use anymore by tapping on the trash icon next to it.
-
- Tips
- Here are some tips to use WhatsApp Web Clonapp Messenger Mod APK effectively and safely:
-
-- Log out when not in use: To protect your privacy and security, make sure that you log out from WhatsApp Web Clonapp Messenger Mod APK when you are not using it. To log out, tap on the menu icon on the top right corner and tap on "Log Out". You can also log out from all devices or browsers by going to Settings > WhatsApp Web/Desktop on your phone and tapping on "Log Out from All Devices".
-- Clear cache regularly: To improve the performance and speed of WhatsApp Web Clonapp Messenger Mod APK, make sure that you clear the cache regularly. To clear cache, tap on the menu icon on the top right corner and tap on "Clear Cache". You can also clear cache from individual accounts by tapping on their profile picture and tapping on "Clear Cache".
-- Use backup and restore: To prevent losing your chats and data, make sure that you backup and restore your WhatsApp accounts regularly. To backup, go to Settings > Chats > Chat Backup on your phone and tap on "Back Up". To restore, go to Settings > Chats > Chat Backup on your phone and tap on "Restore". You can also backup and restore from Google Drive or iCloud if you have enabled them.
-
- What Are the Advantages and Disadvantages of Using WhatsApp Web Clonapp Messenger Mod APK?
- Using WhatsApp Web Clonapp Messenger Mod APK has its own advantages and disadvantages. Here are some of them:
- Advantages
- Some of the advantages of using WhatsApp Web Clonapp Messenger Mod APK over the official WhatsApp Web app or other alternatives are:
-
-- More features: WhatsApp Web Clonapp Messenger Mod APK offers more features than the official WhatsApp Web app or other alternatives, such as status saver, QR scanner, dark mode, custom themes, stickers, emojis, fonts, and more. These features can enhance your user experience and make your chats more fun and lively.
-- More control: WhatsApp Web Clonapp Messenger Mod APK gives you more control over your WhatsApp accounts than the official WhatsApp Web app or other alternatives. You can hide your online status, last seen, blue ticks, typing indicator, and other information from your contacts or groups. You can also lock your chats with a password or fingerprint. You can also delete your messages permanently from both sides.
-- More fun: WhatsApp Web Clonapp Messenger Mod APK allows you to access multiple WhatsApp accounts on one device or browser. This can be useful if you have different accounts for different purposes, such as personal, professional, social, etc. You can also switch between them easily and manage them separately. This can make your chats more fun and diverse.
- Disadvantages
- Some of the disadvantages or risks of using WhatsApp Web Clonapp Messenger Mod APK instead of the official WhatsApp Web app or other alternatives are:
-
-- Compatibility issues: WhatsApp Web Clonapp Messenger Mod APK may not be compatible with some devices, browsers, or operating systems. It may also not work properly with some updates or changes in the official WhatsApp app or web. This may cause some errors, bugs, or crashes in the app or web.
-- Legal issues: WhatsApp Web Clonapp Messenger Mod APK is not an official product of WhatsApp or Facebook. It is a modded version of the original app or web that violates the terms and conditions of the service. This may result in some legal actions or penalties from the authorities or the developers of WhatsApp or Facebook.
-- Ethical issues: WhatsApp Web Clonapp Messenger Mod APK may not respect the privacy and security of your contacts or groups. It may also not follow the ethical standards and norms of the society or the community. This may cause some moral dilemmas or conflicts with your contacts or groups.
-
- Conclusion
- WhatsApp Web Clonapp Messenger Mod APK is a modified version of the official WhatsApp Web app that allows you to clone and access multiple WhatsApp accounts on one device or browser. It also offers some extra features and benefits that are not available in the original app, such as status saver, QR scanner, dark mode, custom themes, stickers, emojis, fonts, and more. However, it also has some disadvantages and risks that you should be aware of, such as compatibility issues, legal issues, ethical issues, and more. Therefore, you should use WhatsApp Web Clonapp Messenger Mod APK at your own discretion and responsibility.
-whatsapp web clonapp messenger pro apk
-whatsapp web clonapp messenger premium apk
-whatsapp web clonapp messenger hack apk
-whatsapp web clonapp messenger cracked apk
-whatsapp web clonapp messenger modded apk
-whatsapp web clonapp messenger latest apk
-whatsapp web clonapp messenger download apk
-whatsapp web clonapp messenger free apk
-whatsapp web clonapp messenger full apk
-whatsapp web clonapp messenger unlocked apk
-whatsapp web clonapp messenger for android apk
-whatsapp web clonapp messenger app apk
-whatsapp web clonapp messenger online apk
-whatsapp web clonapp messenger offline apk
-whatsapp web clonapp messenger update apk
-whatsapp web clonapp messenger old version apk
-whatsapp web clonapp messenger new version apk
-whatsapp web clonapp messenger beta apk
-whatsapp web clonapp messenger original apk
-whatsapp web clonapp messenger official apk
-whatsapp web clonapp messenger 2023 apk
-whatsapp web clonapp messenger 2022 apk
-whatsapp web clonapp messenger 2021 apk
-whatsapp web clonapp messenger 2020 apk
-whatsapp web clonapp messenger 2019 apk
-whatsapp web clonapp messenger 2018 apk
-whatsapp web clonapp messenger 2017 apk
-whatsapp web clonapp messenger 2016 apk
-whatsapp web clonapp messenger 2015 apk
-whatsapp web clonapp messenger 2014 apk
-download whatsapp web clonapp messenger mod apk for free
-download whatsapp web clonapp messenger mod apk for android
-download whatsapp web clonapp messenger mod apk for pc
-download whatsapp web clonapp messenger mod apk for ios
-download whatsapp web clonapp messenger mod apk for windows
-download whatsapp web clonapp messenger mod apk for mac
-download whatsapp web clonapp messenger mod apk for laptop
-download whatsapp web clonapp messenger mod apk for tablet
-download whatsapp web clonapp messenger mod apk for mobile
-download whatsapp web clonapp messenger mod apk for iphone
-how to install whatsapp web clonapp messenger mod apk
-how to use whatsapp web clonapp messenger mod apk
-how to update whatsapp web clonapp messenger mod apk
-how to uninstall whatsapp web clonapp messenger mod apk
-how to hack whatsapp web clonapp messenger mod apk
-how to crack whatsapp web clonapp messenger mod apk
-how to get whatsapp web clonapp messenger mod apk
-how to download whatsapp web clonapp messenger mod apk
- If you want to try out WhatsApp Web Clonapp Messenger Mod APK, you can download and install it from this link: [WhatsApp Web Clonapp Messenger Mod APK Download]. You can also share your feedback or questions in the comments section below. We hope you found this article helpful and informative. Thank you for reading!
- FAQs
- Here are some frequently asked questions about WhatsApp Web Clonapp Messenger Mod APK:
- Q1: What is WhatsApp Web?
- A1: WhatsApp Web is the official web version of the popular messaging app WhatsApp. It allows you to use WhatsApp on your PC or laptop by scanning a QR code from your phone. You can send and receive messages, photos, videos, documents, and more on WhatsApp Web.
- Q2: Is WhatsApp Web Clonapp Messenger Mod APK safe to use?
- A2: WhatsApp Web Clonapp Messenger Mod APK is not an official product of WhatsApp or Facebook. It is a modded version of the original app or web that violates the terms and conditions of the service. Therefore, it may not be safe to use and may expose you to some malware, viruses, scams, hackers, trackers, legal actions, penalties, moral dilemmas, conflicts, and more. You should use it at your own risk and discretion.
- Q3: Can I use WhatsApp Web Clonapp Messenger Mod APK on PC or laptop?
- A3: Yes, you can use WhatsApp Web Clonapp Messenger Mod APK on PC or laptop by using an Android emulator such as Bluestacks, NoxPlayer, LDPlayer, etc. You can download and install the emulator on your PC or laptop and then download and install WhatsApp Web Clonapp Messenger Mod APK on it. You can then use it as you would on your phone.
- Q4: How can I update WhatsApp Web Clonapp Messenger Mod APK?
- A4: You can update WhatsApp Web Clonapp Messenger Mod APK by downloading and installing the latest version of the app from a trusted and reliable source. You can also check for updates within the app by tapping on the menu icon on the top right corner and tapping on "Check for Updates". You should always update your app to enjoy the latest features and fixes.
- Q5: How can I contact the developer of WhatsApp Web Clonapp Messenger Mod APK?
- A5: You can contact the developer of WhatsApp Web Clonapp Messenger Mod APK by sending an email to [whatsappwebclonapp@gmail.com]. You can also visit their website [whatsappwebclonapp.com] for more information and support.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/ua xe mt cc vi Xtreme Motorbikes Mod APK 1.3 Xe Vario v Exciter 150 siu ngu.md b/spaces/congsaPfin/Manga-OCR/logs/ua xe mt cc vi Xtreme Motorbikes Mod APK 1.3 Xe Vario v Exciter 150 siu ngu.md
deleted file mode 100644
index a0c8cf835af2b5a10c8046155217118eb0d9a37e..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/ua xe mt cc vi Xtreme Motorbikes Mod APK 1.3 Xe Vario v Exciter 150 siu ngu.md
+++ /dev/null
@@ -1,117 +0,0 @@
-
-Mod xe độ xtreme motorbikes apk 1.3 - Trải nghiệm đua xe môtô độc đáo và hấp dẫn
-Bạn là một người yêu thích tốc độ và mạo hiểm? Bạn muốn thử thách bản thân với những cuộc đua kịch tính và nghẹt thở? Bạn muốn sở hữu những chiếc môtô độ Vario và Exciter 150 đẹp mắt và mạnh mẽ? Nếu câu trả lời là có, bạn không thể bỏ qua phiên bản mod xe độ xtreme motorbikes apk 1.3 - một trò chơi đua xe môtô độc đáo và hấp dẫn.
-Giới thiệu về trò chơi Xtreme Motorbikes
-Xtreme Motorbikes là một trò chơi do Xtreme Games Studio phát triển và phát hành. Trò chơi thuộc thể loại đua xe, nơi bạn có thể trải nghiệm cảm giác mạnh mẽ của việc lái xe môtô trên những con đường phẳng lặn hoặc những cung đường cong uốn lượn. Trò chơi có nhiều chế độ chơi khác nhau, từ chế độ tự do cho phép bạn khám phá các bản đồ rộng lớn, cho đến chế độ thử thách cho phép bạn tham gia vào những cuộc đua gay cấn với các tay lái khác.
-mod xe độ xtreme motorbikes apk 1.3
DOWNLOAD ⚹ https://urlca.com/2uOai6
-Đặc điểm nổi bật của trò chơi
-
-- Đồ họa sắc nét, sinh động, mang lại cảm giác chân thực cho người chơi.
-- Âm thanh sống động, phù hợp với từng tình huống trong trò chơi.
-- Lối chơi dễ dàng, điều khiển linh hoạt, phù hợp với mọi lứa tuổi.
-- Nhiều loại xe môtô khác nhau để bạn chọn lựa theo sở thích và nhu cầu.
-- Nhiều bản đồ đa dạng, từ thành phố, nông thôn, sa mạc, rừng núi, cho đến những địa hình khó khăn như băng tuyết, núi lửa, hoặc không gian.
-- Nhiều chế độ chơi hấp dẫn, từ chế độ tự do, chế độ thử thách, chế độ đua với máy, cho đến chế độ đua online với người chơi khác trên toàn thế giới.
-
-Cách chơi trò chơi
-Để chơi trò chơi Xtreme Motorbikes, bạn cần sử dụng các nút điều khiển trên màn hình. Bạn có thể sử dụng nút bên trái để điều chỉnh hướng lái xe, và nút bên phải để tăng tốc hoặc phanh. Bạn cũng có thể sử dụng các nút khác để thực hiện các động tác như nhảy, lật ngược, lật xuôi, hoặc xoay 360 độ. Bạn cần phải lái xe một cách cẩn thận để tránh va chạm với các vật cản hoặc rơi xuống vực. Bạn cũng cần phải hoàn thành các mục tiêu của từng chế độ chơi để nhận được điểm thưởng và mở khóa các loại xe mới.
-Giới thiệu về phiên bản mod xe độ xtreme motorbikes apk 1.3
-Phiên bản mod xe độ xtreme motorbikes apk 1.3 là một phiên bản được chỉnh sửa và cải tiến từ phiên bản gốc của trò chơi. Phiên bản mod này mang lại cho bạn nhiều tính năng mới và hấp dẫn, đặc biệt là tính năng mod xe độ Vario và Exciter 150 - hai loại xe môtô phổ biến và yêu thích tại Việt Nam.
-Tính năng mod xe độ Vario và Exciter 150
-
-- Bạn có thể sử dụng hai loại xe môtô Vario và Exciter 150 trong trò chơi mà không cần phải mở khóa hay mua bằng tiền.
-- Bạn có thể tùy chỉnh và độ xe theo ý muốn của bạn, từ màu sắc, kiểu dáng, logo, cho đến các chi tiết như pô, yên, gương, đèn...
-- Bạn có thể tăng cường hiệu suất của xe bằng cách nâng cấp các bộ phận như động cơ, lốp, phanh, hệ thống treo...
-- Bạn có thể thưởng thức âm thanh của xe khi bạn lái xe, từ tiếng pô nổ, tiếng ga tăng tốc, cho đến tiếng phanh gấp.
-
-Lợi ích của việc sử dụng phiên bản mod
-
-- Bạn có thể trải nghiệm trò chơi một cách thoải mái và tự do hơn, không bị giới hạn bởi các yếu tố như tiền, điểm số, hay thời gian.
-- Bạn có thể sở hữu những chiếc xe môtô độ Vario và Exciter 150 mà bạn mong muốn và tự hào khi lái xe trên các bản đồ.
-- Bạn có thể tăng khả n ăng khả năng chiến thắng trong các cuộc đua với các tay lái khác bằng cách nâng cấp xe của bạn.
-- Bạn có thể khám phá và thưởng thức các bản đồ mới mà phiên bản gốc không có.
-
-Cách tải và cài đặt phiên bản mod xe độ xtreme motorbikes apk 1.3
-Để tải và cài đặt phiên bản mod xe độ xtreme motorbikes apk 1.3, bạn cần thực hiện các bước sau:
-Bước 1: Tìm kiếm và tải xuống file APK từ nguồn đáng tin cậy
-Bạn có thể tìm kiếm và tải xuống file APK của phiên bản mod xe độ xtreme motorbikes apk 1.3 từ các trang web chuyên cung cấp các trò chơi mod như APKPure, APKMODY, APKDONE, hoặc APKHOME. Bạn cần chọn nguồn đáng tin cậy để tránh tải về các file APK có chứa virus hoặc mã độc. Bạn cũng cần kiểm tra dung lượng và phiên bản của file APK trước khi tải về để đảm bảo tương thích với thiết bị của bạn.
-Bước 2: Kích hoạt tùy chọn cài đặt ứng dụng từ nguồn không xác định
-Sau khi tải xuống file APK, bạn cần kích hoạt tùy chọn cài đặt ứng dụng từ nguồn không xác định trên thiết bị của bạn. Bạn có thể làm điều này bằng cách vào Cài đặt > Bảo mật > Nguồn không xác định (hoặc Cài đặt > Ứng dụng và thông báo > Truy cập ứng dụng không rõ nguồn gốc). Bạn cần cho phép trình duyệt hoặc quản lý file của bạn truy cập vào file APK để cài đặt trò chơi.
-tải game mod xe độ xtreme motorbikes apk 1.3
-xtreme motorbikes mod apk 1.3 full tiền
-xtreme motorbikes mod apk 1.3 xe độ vario
-xtreme motorbikes mod apk 1.3 xe độ exciter 150
-xtreme motorbikes mod apk 1.3 hack không giới hạn
-xtreme motorbikes mod apk 1.3 cài đặt miễn phí
-xtreme motorbikes mod apk 1.3 trải nghiệm đua xe môtô
-xtreme motorbikes mod apk 1.3 tính năng mới nhất
-xtreme motorbikes mod apk 1.3 hướng dẫn chi tiết
-xtreme motorbikes mod apk 1.3 link tải nhanh
-xtreme motorbikes mod apk 1.3 nguồn tải an toàn
-xtreme motorbikes mod apk 1.3 đánh giá game thủ
-xtreme motorbikes mod apk 1.3 video gameplay
-xtreme motorbikes mod apk 1.3 cách chơi hiệu quả
-xtreme motorbikes mod apk 1.3 thủ thuật và mẹo hay
-xtreme motorbikes mod apk 1.3 cập nhật phiên bản mới
-xtreme motorbikes mod apk 1.3 khắc phục lỗi game
-xtreme motorbikes mod apk 1.3 yêu cầu thiết bị
-xtreme motorbikes mod apk 1.3 so sánh với phiên bản gốc
-xtreme motorbikes mod apk 1.3 tương thích với android
-xtreme motorbikes mod apk 1.3 tải về máy tính
-xtreme motorbikes mod apk 1.3 chia sẻ với bạn bè
-xtreme motorbikes mod apk 1.3 tham gia cộng đồng game
-xtreme motorbikes mod apk 1.3 góp ý cho nhà phát triển
-xtreme motorbikes mod apk 1.3 lưu ý khi sử dụng mod
-download game mod xe độ xtreme motorbikes apk 1.3
-download xtreme motorbikes mod apk 1.3 unlimited money
-download xtreme motorbikes mod apk 1.3 custom vario bike
-download xtreme motorbikes mod apk 1.3 custom exciter bike
-download xtreme motorbikes mod apk 1.3 hack no limit
-download xtreme motorbikes mod apk 1.3 free install
-download xtreme motorbikes mod apk 1.3 moto racing experience
-download xtreme motorbikes mod apk 1.3 latest features
-download xtreme motorbikes mod apk 1.3 detailed guide
-download xtreme motorbikes mod apk 1.3 fast link
-download xtreme motorbikes mod apk 1.3 safe source
-download xtreme motorbikes mod apk 1.3 player reviews
-download xtreme motorbikes mod apk 1.3 video gameplay
-download xtreme motorbikes mod apk 1.3 effective tips
-download xtreme motorbikes mod apk 1.3 tricks and cheats
-download xtreme motorbikes mod apk 1.3 new version update
-download xtreme motorbikes mod apk 1.3 fix game errors
-download xtreme motorbikes mod apk 1.3 device requirements
-download xtreme motorbikes mod apk 1.3 compare with original version
-download xtreme motorbikes mod apk 1.3 compatible with android
-Bước 3: Tìm và nhấp vào file APK để cài đặt trò chơi
-Cuối cùng, bạn chỉ cần tìm và nhấp vào file APK mà bạn đã tải về để bắt đầu quá trình cài đặt trò chơi. Bạn cần chờ cho quá trình cài đặt hoàn thành và xuất hiện biểu tượng của trò chơi trên màn hình. Sau đó, bạn có thể mở trò chơi và thưởng thức phiên bản mod xe độ xtreme motorbikes apk 1.3.
-Kết luận
-Mod xe độ xtreme motorbikes apk 1.3 là một phiên bản mod hấp dẫn và thú vị cho những ai yêu thích trò chơi đua xe môtô. Bạn có thể sử dụng hai loại xe môtô Vario và Exciter 150 mà bạn mong muốn, tùy chỉnh và nâng cấp xe theo ý muốn, và tham gia vào những cuộc đua kịch tính với các tay lái khác. Bạn chỉ cần tải và cài đặt phiên bản mod xe độ xtreme motorbikes apk 1.3 theo các bước hướng dẫn đã nêu trên, và bạn sẽ có được một trải nghiệm đua xe môtô độc đáo và hấp dẫn.
-Câu hỏi thường gặp
-
-
-Câu hỏi
-Trả lời
-
-
-Mod xe độ xtreme motorbikes apk 1.3 có an toàn không?
-Mod xe độ xtreme motorbikes apk 1.3 là một phiên bản mod được kiểm tra và xác nhận là không có virus hoặc mã độc. Tuy nhiên, bạn cần tải về từ các nguồn đáng tin cậy và kích hoạt tùy chọn cài đặt ứng dụng từ nguồn không xác định trên thiết bị của bạn để cài đặt trò chơi.
-
-
-Mod xe độ xtreme motorbikes apk 1.3 có tương thích với tất cả các thiết bị Android không?
-Mod xe độ xtreme motorbikes apk 1.3 có thể tương thích với hầu hết các thiết bị Android có phiên bản từ 4.4 trở lên. Tuy nhiên, bạn cần kiểm tra dung lượng và phiên bản của file APK trước khi tải về để đảm bảo tương thích với thiết bị của bạn.
-
-
-Mod xe độ xtreme motorbikes apk 1.3 có cần kết nối Internet để chơi không?
-Mod xe độ xtreme motorbikes apk 1.3 không cần kết nối Internet để chơi các chế độ tự do, thử thách, hoặc đua với máy. Tuy nhiên, bạn cần kết nối Internet để chơi chế độ đua online với người chơi khác trên toàn thế giới.
-
-
-Mod xe độ xtreme motorbikes apk 1.3 có thể chơi được trên máy tính không?
-Mod xe độ xtreme motorbikes apk 1.3 là một trò chơi dành cho thiết bị Android, nên bạn không thể chơi được trực tiếp trên máy tính. Tuy nhiên, bạn có thể sử dụng các phần mềm giả lập Android như BlueStacks, NoxPlayer, hoặc LDPlayer để cài đặt và chơi trò chơi trên máy tính của bạn.
-
-
-Mod xe độ xtreme motorbikes apk 1.3 có vi phạm bản quyền của trò chơi gốc không?
-Mod xe độ xtreme motorbikes apk 1.3 là một phiên bản mod được tạo ra bởi các nhà phát triển độc lập, không liên quan đến nhà phát triển và phát hành của trò chơi gốc. Phiên bản mod này chỉ mang tính chất giải trí và phi lợi nhuận, không nhằm mục đích vi phạm bản quyền hay sở hữu trí tuệ của trò chơi gốc.
-
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/Babadook full movie in hindi dubbed download 31 Watch the horror film online.md b/spaces/contluForse/HuggingGPT/assets/Babadook full movie in hindi dubbed download 31 Watch the horror film online.md
deleted file mode 100644
index 02f37aaaca11fd4d2a2e20f84bdda5a4b4c39a42..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Babadook full movie in hindi dubbed download 31 Watch the horror film online.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-Help !!! Oh my, I cannot activate the Janome Digitizer JR v5. It says the software program is loaded and then another pop up appears over the top of the previous saying "A valid license to run digitizer cannot be found, the HASP driver may need to be updated". I've updated that but it will not give me the opportunity to put the activation code in. I've tried uninstalling it and reinstalling it, and it still won't work. I've put the latest update on there from the Janome website and nothing. We bought a new Microsoft laptop just for this purpose, as we are a mac household. This is so frustrating. I've been trying for 5 hours now and can't get it to work.
PS: It keeps telling me to go to www.wilcom.com/support for assistance. (H0007) but when I do I can't find anything.
-security device not found wilcom 9 crack
Download ⚡ https://ssurll.com/2uzyid
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cooelf/Multimodal-CoT/timm/data/random_erasing.py b/spaces/cooelf/Multimodal-CoT/timm/data/random_erasing.py
deleted file mode 100644
index 78967d105dd77b56a3ccefb6ff1838a8058c0384..0000000000000000000000000000000000000000
--- a/spaces/cooelf/Multimodal-CoT/timm/data/random_erasing.py
+++ /dev/null
@@ -1,97 +0,0 @@
-""" Random Erasing (Cutout)
-
-Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0
-Copyright Zhun Zhong & Liang Zheng
-
-Hacked together by / Copyright 2020 Ross Wightman
-"""
-import random
-import math
-import torch
-
-
-def _get_pixels(per_pixel, rand_color, patch_size, dtype=torch.float32, device='cuda'):
- # NOTE I've seen CUDA illegal memory access errors being caused by the normal_()
- # paths, flip the order so normal is run on CPU if this becomes a problem
- # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508
- if per_pixel:
- return torch.empty(patch_size, dtype=dtype, device=device).normal_()
- elif rand_color:
- return torch.empty((patch_size[0], 1, 1), dtype=dtype, device=device).normal_()
- else:
- return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device)
-
-
-class RandomErasing:
- """ Randomly selects a rectangle region in an image and erases its pixels.
- 'Random Erasing Data Augmentation' by Zhong et al.
- See https://arxiv.org/pdf/1708.04896.pdf
-
- This variant of RandomErasing is intended to be applied to either a batch
- or single image tensor after it has been normalized by dataset mean and std.
- Args:
- probability: Probability that the Random Erasing operation will be performed.
- min_area: Minimum percentage of erased area wrt input image area.
- max_area: Maximum percentage of erased area wrt input image area.
- min_aspect: Minimum aspect ratio of erased area.
- mode: pixel color mode, one of 'const', 'rand', or 'pixel'
- 'const' - erase block is constant color of 0 for all channels
- 'rand' - erase block is same per-channel random (normal) color
- 'pixel' - erase block is per-pixel random (normal) color
- max_count: maximum number of erasing blocks per image, area per box is scaled by count.
- per-image count is randomly chosen between 1 and this value.
- """
-
- def __init__(
- self,
- probability=0.5, min_area=0.02, max_area=1/3, min_aspect=0.3, max_aspect=None,
- mode='const', min_count=1, max_count=None, num_splits=0, device='cuda'):
- self.probability = probability
- self.min_area = min_area
- self.max_area = max_area
- max_aspect = max_aspect or 1 / min_aspect
- self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
- self.min_count = min_count
- self.max_count = max_count or min_count
- self.num_splits = num_splits
- mode = mode.lower()
- self.rand_color = False
- self.per_pixel = False
- if mode == 'rand':
- self.rand_color = True # per block random normal
- elif mode == 'pixel':
- self.per_pixel = True # per pixel random normal
- else:
- assert not mode or mode == 'const'
- self.device = device
-
- def _erase(self, img, chan, img_h, img_w, dtype):
- if random.random() > self.probability:
- return
- area = img_h * img_w
- count = self.min_count if self.min_count == self.max_count else \
- random.randint(self.min_count, self.max_count)
- for _ in range(count):
- for attempt in range(10):
- target_area = random.uniform(self.min_area, self.max_area) * area / count
- aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
- h = int(round(math.sqrt(target_area * aspect_ratio)))
- w = int(round(math.sqrt(target_area / aspect_ratio)))
- if w < img_w and h < img_h:
- top = random.randint(0, img_h - h)
- left = random.randint(0, img_w - w)
- img[:, top:top + h, left:left + w] = _get_pixels(
- self.per_pixel, self.rand_color, (chan, h, w),
- dtype=dtype, device=self.device)
- break
-
- def __call__(self, input):
- if len(input.size()) == 3:
- self._erase(input, *input.size(), input.dtype)
- else:
- batch_size, chan, img_h, img_w = input.size()
- # skip first slice of batch if num_splits is set (for clean portion of samples)
- batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0
- for i in range(batch_start, batch_size):
- self._erase(input[i], chan, img_h, img_w, input.dtype)
- return input
diff --git a/spaces/cooelf/Multimodal-CoT/timm/models/layers/split_batchnorm.py b/spaces/cooelf/Multimodal-CoT/timm/models/layers/split_batchnorm.py
deleted file mode 100644
index 830781b335161f8d6dd74c9458070bb1fa88a918..0000000000000000000000000000000000000000
--- a/spaces/cooelf/Multimodal-CoT/timm/models/layers/split_batchnorm.py
+++ /dev/null
@@ -1,75 +0,0 @@
-""" Split BatchNorm
-
-A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through
-a separate BN layer. The first split is passed through the parent BN layers with weight/bias
-keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn'
-namespace.
-
-This allows easily removing the auxiliary BN layers after training to efficiently
-achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2,
-'Disentangled Learning via An Auxiliary BN'
-
-Hacked together by / Copyright 2020 Ross Wightman
-"""
-import torch
-import torch.nn as nn
-
-
-class SplitBatchNorm2d(torch.nn.BatchNorm2d):
-
- def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
- track_running_stats=True, num_splits=2):
- super().__init__(num_features, eps, momentum, affine, track_running_stats)
- assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)'
- self.num_splits = num_splits
- self.aux_bn = nn.ModuleList([
- nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)])
-
- def forward(self, input: torch.Tensor):
- if self.training: # aux BN only relevant while training
- split_size = input.shape[0] // self.num_splits
- assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits"
- split_input = input.split(split_size)
- x = [super().forward(split_input[0])]
- for i, a in enumerate(self.aux_bn):
- x.append(a(split_input[i + 1]))
- return torch.cat(x, dim=0)
- else:
- return super().forward(input)
-
-
-def convert_splitbn_model(module, num_splits=2):
- """
- Recursively traverse module and its children to replace all instances of
- ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`.
- Args:
- module (torch.nn.Module): input module
- num_splits: number of separate batchnorm layers to split input across
- Example::
- >>> # model is an instance of torch.nn.Module
- >>> model = timm.models.convert_splitbn_model(model, num_splits=2)
- """
- mod = module
- if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm):
- return module
- if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
- mod = SplitBatchNorm2d(
- module.num_features, module.eps, module.momentum, module.affine,
- module.track_running_stats, num_splits=num_splits)
- mod.running_mean = module.running_mean
- mod.running_var = module.running_var
- mod.num_batches_tracked = module.num_batches_tracked
- if module.affine:
- mod.weight.data = module.weight.data.clone().detach()
- mod.bias.data = module.bias.data.clone().detach()
- for aux in mod.aux_bn:
- aux.running_mean = module.running_mean.clone()
- aux.running_var = module.running_var.clone()
- aux.num_batches_tracked = module.num_batches_tracked.clone()
- if module.affine:
- aux.weight.data = module.weight.data.clone().detach()
- aux.bias.data = module.bias.data.clone().detach()
- for name, child in module.named_children():
- mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits))
- del module
- return mod
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/leres/leres/Resnext_torch.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/leres/leres/Resnext_torch.py
deleted file mode 100644
index 9af54fcc3e5b363935ef60c8aaf269110c0d6611..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/leres/leres/Resnext_torch.py
+++ /dev/null
@@ -1,237 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-import torch.nn as nn
-
-try:
- from urllib import urlretrieve
-except ImportError:
- from urllib.request import urlretrieve
-
-__all__ = ['resnext101_32x8d']
-
-
-model_urls = {
- 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
- 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
-}
-
-
-def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
- """3x3 convolution with padding"""
- return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
- padding=dilation, groups=groups, bias=False, dilation=dilation)
-
-
-def conv1x1(in_planes, out_planes, stride=1):
- """1x1 convolution"""
- return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
-
-
-class BasicBlock(nn.Module):
- expansion = 1
-
- def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
- base_width=64, dilation=1, norm_layer=None):
- super(BasicBlock, self).__init__()
- if norm_layer is None:
- norm_layer = nn.BatchNorm2d
- if groups != 1 or base_width != 64:
- raise ValueError('BasicBlock only supports groups=1 and base_width=64')
- if dilation > 1:
- raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
- # Both self.conv1 and self.downsample layers downsample the input when stride != 1
- self.conv1 = conv3x3(inplanes, planes, stride)
- self.bn1 = norm_layer(planes)
- self.relu = nn.ReLU(inplace=True)
- self.conv2 = conv3x3(planes, planes)
- self.bn2 = norm_layer(planes)
- self.downsample = downsample
- self.stride = stride
-
- def forward(self, x):
- identity = x
-
- out = self.conv1(x)
- out = self.bn1(out)
- out = self.relu(out)
-
- out = self.conv2(out)
- out = self.bn2(out)
-
- if self.downsample is not None:
- identity = self.downsample(x)
-
- out += identity
- out = self.relu(out)
-
- return out
-
-
-class Bottleneck(nn.Module):
- # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
- # while original implementation places the stride at the first 1x1 convolution(self.conv1)
- # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
- # This variant is also known as ResNet V1.5 and improves accuracy according to
- # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
-
- expansion = 4
-
- def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
- base_width=64, dilation=1, norm_layer=None):
- super(Bottleneck, self).__init__()
- if norm_layer is None:
- norm_layer = nn.BatchNorm2d
- width = int(planes * (base_width / 64.)) * groups
- # Both self.conv2 and self.downsample layers downsample the input when stride != 1
- self.conv1 = conv1x1(inplanes, width)
- self.bn1 = norm_layer(width)
- self.conv2 = conv3x3(width, width, stride, groups, dilation)
- self.bn2 = norm_layer(width)
- self.conv3 = conv1x1(width, planes * self.expansion)
- self.bn3 = norm_layer(planes * self.expansion)
- self.relu = nn.ReLU(inplace=True)
- self.downsample = downsample
- self.stride = stride
-
- def forward(self, x):
- identity = x
-
- out = self.conv1(x)
- out = self.bn1(out)
- out = self.relu(out)
-
- out = self.conv2(out)
- out = self.bn2(out)
- out = self.relu(out)
-
- out = self.conv3(out)
- out = self.bn3(out)
-
- if self.downsample is not None:
- identity = self.downsample(x)
-
- out += identity
- out = self.relu(out)
-
- return out
-
-
-class ResNet(nn.Module):
-
- def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
- groups=1, width_per_group=64, replace_stride_with_dilation=None,
- norm_layer=None):
- super(ResNet, self).__init__()
- if norm_layer is None:
- norm_layer = nn.BatchNorm2d
- self._norm_layer = norm_layer
-
- self.inplanes = 64
- self.dilation = 1
- if replace_stride_with_dilation is None:
- # each element in the tuple indicates if we should replace
- # the 2x2 stride with a dilated convolution instead
- replace_stride_with_dilation = [False, False, False]
- if len(replace_stride_with_dilation) != 3:
- raise ValueError("replace_stride_with_dilation should be None "
- "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
- self.groups = groups
- self.base_width = width_per_group
- self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
- bias=False)
- self.bn1 = norm_layer(self.inplanes)
- self.relu = nn.ReLU(inplace=True)
- self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
- self.layer1 = self._make_layer(block, 64, layers[0])
- self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
- dilate=replace_stride_with_dilation[0])
- self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
- dilate=replace_stride_with_dilation[1])
- self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
- dilate=replace_stride_with_dilation[2])
- #self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
- #self.fc = nn.Linear(512 * block.expansion, num_classes)
-
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
- nn.init.constant_(m.weight, 1)
- nn.init.constant_(m.bias, 0)
-
- # Zero-initialize the last BN in each residual branch,
- # so that the residual branch starts with zeros, and each residual block behaves like an identity.
- # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
- if zero_init_residual:
- for m in self.modules():
- if isinstance(m, Bottleneck):
- nn.init.constant_(m.bn3.weight, 0)
- elif isinstance(m, BasicBlock):
- nn.init.constant_(m.bn2.weight, 0)
-
- def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
- norm_layer = self._norm_layer
- downsample = None
- previous_dilation = self.dilation
- if dilate:
- self.dilation *= stride
- stride = 1
- if stride != 1 or self.inplanes != planes * block.expansion:
- downsample = nn.Sequential(
- conv1x1(self.inplanes, planes * block.expansion, stride),
- norm_layer(planes * block.expansion),
- )
-
- layers = []
- layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
- self.base_width, previous_dilation, norm_layer))
- self.inplanes = planes * block.expansion
- for _ in range(1, blocks):
- layers.append(block(self.inplanes, planes, groups=self.groups,
- base_width=self.base_width, dilation=self.dilation,
- norm_layer=norm_layer))
-
- return nn.Sequential(*layers)
-
- def _forward_impl(self, x):
- # See note [TorchScript super()]
- features = []
- x = self.conv1(x)
- x = self.bn1(x)
- x = self.relu(x)
- x = self.maxpool(x)
-
- x = self.layer1(x)
- features.append(x)
-
- x = self.layer2(x)
- features.append(x)
-
- x = self.layer3(x)
- features.append(x)
-
- x = self.layer4(x)
- features.append(x)
-
- #x = self.avgpool(x)
- #x = torch.flatten(x, 1)
- #x = self.fc(x)
-
- return features
-
- def forward(self, x):
- return self._forward_impl(x)
-
-
-
-def resnext101_32x8d(pretrained=True, **kwargs):
- """Constructs a ResNet-152 model.
- Args:
- pretrained (bool): If True, returns a model pre-trained on ImageNet
- """
- kwargs['groups'] = 32
- kwargs['width_per_group'] = 8
-
- model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
- return model
-
diff --git a/spaces/cozyanduofen/bingo/src/components/chat-header.tsx b/spaces/cozyanduofen/bingo/src/components/chat-header.tsx
deleted file mode 100644
index c6664b8dee61179f844d45c5bd650518fc2cb4c2..0000000000000000000000000000000000000000
--- a/spaces/cozyanduofen/bingo/src/components/chat-header.tsx
+++ /dev/null
@@ -1,12 +0,0 @@
-import LogoIcon from '@/assets/images/logo.svg'
-import Image from 'next/image'
-
-export function ChatHeader() {
- return (
-
-
- 欢迎使用新必应
- 由 AI 支持的网页版 Copilot
-
- )
-}
diff --git a/spaces/cozyanduofen/bingo/src/lib/bots/bing/types.ts b/spaces/cozyanduofen/bingo/src/lib/bots/bing/types.ts
deleted file mode 100644
index 02cd5e8b01e3529642d28dc1539bf958f4ac420b..0000000000000000000000000000000000000000
--- a/spaces/cozyanduofen/bingo/src/lib/bots/bing/types.ts
+++ /dev/null
@@ -1,259 +0,0 @@
-export type Author = 'user' | 'system' | 'bot'
-
-export type BotId = 'bing'
-
-export enum BingConversationStyle {
- Creative = 'Creative',
- Balanced = 'Balanced',
- Precise = 'Precise'
-}
-
-export enum ErrorCode {
- CONVERSATION_LIMIT = 'CONVERSATION_LIMIT',
- BING_UNAUTHORIZED = 'BING_UNAUTHORIZED',
- BING_FORBIDDEN = 'BING_FORBIDDEN',
- BING_CAPTCHA = 'BING_CAPTCHA',
- THROTTLE_LIMIT = 'THROTTLE_LIMIT',
- NOTFOUND_ERROR = 'NOT_FOUND_ERROR',
- UNKOWN_ERROR = 'UNKOWN_ERROR',
- NETWORK_ERROR = 'NETWORK_ERROR',
-}
-
-export class ChatError extends Error {
- code: ErrorCode
- constructor(message: string, code: ErrorCode) {
- super(message)
- this.code = code
- }
-}
-
-export type ChatMessageModel = {
- id: string
- author: Author
- text: string
- error?: ChatError
- throttling?: Throttling
- sourceAttributions?: SourceAttribution[]
- suggestedResponses?: SuggestedResponse[]
-}
-
-export interface ConversationModel {
- messages: ChatMessageModel[]
-}
-
-export type Event =
- | {
- type: 'UPDATE_ANSWER'
- data: {
- text: string
- spokenText?: string
- sourceAttributions?: SourceAttribution[]
- suggestedResponses?: SuggestedResponse[]
- throttling?: Throttling
- }
- }
- | {
- type: 'DONE'
- }
- | {
- type: 'ERROR'
- error: ChatError
- }
-
-export interface SendMessageParams {
- prompt: string
- imageUrl?: string
- options: T
- onEvent: (event: Event) => void
- signal?: AbortSignal
-}
-
-export interface ConversationResponse {
- conversationId: string
- clientId: string
- conversationSignature: string
- result: {
- value: string
- message?: string
- }
-}
-
-export interface Telemetry {
- metrics?: null
- startTime: string
-}
-
-export interface ChatUpdateArgument {
- messages?: ChatResponseMessage[]
- throttling?: Throttling
- requestId: string
- result: null
-}
-
-export type ChatUpdateCompleteResponse = {
- type: 2
- invocationId: string
- item: ChatResponseItem
-} | {
- type: 1
- target: string
- arguments: ChatUpdateArgument[]
-} | {
- type: 3
- invocationId: string
-} | {
- type: 6 | 7
-}
-
-export interface ChatRequestResult {
- value: string
- serviceVersion: string
- error?: string
-}
-
-export interface ChatResponseItem {
- messages: ChatResponseMessage[]
- firstNewMessageIndex: number
- suggestedResponses: null
- conversationId: string
- requestId: string
- conversationExpiryTime: string
- telemetry: Telemetry
- result: ChatRequestResult
- throttling: Throttling
-}
-export enum InvocationEventType {
- Invocation = 1,
- StreamItem = 2,
- Completion = 3,
- StreamInvocation = 4,
- CancelInvocation = 5,
- Ping = 6,
- Close = 7,
-}
-
-// https://github.com/bytemate/bingchat-api/blob/main/src/lib.ts
-
-export interface ConversationInfo {
- conversationId: string
- clientId: string
- conversationSignature: string
- invocationId: number
- conversationStyle: BingConversationStyle
- prompt: string
- imageUrl?: string
-}
-
-export interface BingChatResponse {
- conversationSignature: string
- conversationId: string
- clientId: string
- invocationId: number
- conversationExpiryTime: Date
- response: string
- details: ChatResponseMessage
-}
-
-export interface Throttling {
- maxNumLongDocSummaryUserMessagesInConversation: number
- maxNumUserMessagesInConversation: number
- numLongDocSummaryUserMessagesInConversation: number
- numUserMessagesInConversation: number
-}
-
-export interface ChatResponseMessage {
- text: string
- spokenText?: string
- author: string
- createdAt: Date
- timestamp: Date
- messageId: string
- requestId: string
- offense: string
- adaptiveCards: AdaptiveCard[]
- sourceAttributions: SourceAttribution[]
- feedback: Feedback
- contentOrigin: string
- messageType?: string
- contentType?: string
- privacy: null
- suggestedResponses: SuggestedResponse[]
-}
-
-export interface AdaptiveCard {
- type: string
- version: string
- body: Body[]
-}
-
-export interface Body {
- type: string
- text: string
- wrap: boolean
- size?: string
-}
-
-export interface Feedback {
- tag: null
- updatedOn: null
- type: string
-}
-
-export interface SourceAttribution {
- providerDisplayName: string
- seeMoreUrl: string
- searchQuery: string
-}
-
-export interface SuggestedResponse {
- text: string
- author?: Author
- createdAt?: Date
- timestamp?: Date
- messageId?: string
- messageType?: string
- offense?: string
- feedback?: Feedback
- contentOrigin?: string
- privacy?: null
-}
-
-export interface KBlobRequest {
- knowledgeRequest: KnowledgeRequestContext
- imageBase64?: string
-}
-
-export interface KBlobResponse {
- blobId: string
- processedBlobId?: string
-}
-
-export interface KnowledgeRequestContext {
- imageInfo: ImageInfo;
- knowledgeRequest: KnowledgeRequest;
-}
-
-export interface ImageInfo {
- url?: string;
-}
-
-export interface KnowledgeRequest {
- invokedSkills: string[];
- subscriptionId: string;
- invokedSkillsRequestData: InvokedSkillsRequestData;
- convoData: ConvoData;
-}
-
-export interface ConvoData {
- convoid: string;
- convotone: BingConversationStyle;
-}
-
-export interface InvokedSkillsRequestData {
- enableFaceBlur: boolean;
-}
-
-export interface FileItem {
- url: string;
- status?: 'loading' | 'error' | 'loaded'
-}
diff --git a/spaces/cr7-gjx/Suspicion-Agent-Demo/agent.py b/spaces/cr7-gjx/Suspicion-Agent-Demo/agent.py
deleted file mode 100644
index 8f9e6e021c930ed202e9a73b7d27b9d26cfdab05..0000000000000000000000000000000000000000
--- a/spaces/cr7-gjx/Suspicion-Agent-Demo/agent.py
+++ /dev/null
@@ -1,467 +0,0 @@
-# Reference: https://python.langchain.com/en/latest/use_cases/agent_simulations
-
-import re
-from datetime import datetime
-from typing import List, Optional, Tuple
-
-from langchain import LLMChain
-from langchain.base_language import BaseLanguageModel
-from langchain.prompts import PromptTemplate
-from langchain.retrievers import TimeWeightedVectorStoreRetriever
-from langchain.schema import Document
-from pydantic import BaseModel, Field
-from termcolor import colored
-import util
-import time
-
-class SuspicionAgent(BaseModel):
- """A character with memory and innate characteristics."""
-
- name: str
- game_name: str
- age: int
- observation_rule: str
- """The traits of the character you wish not to change."""
- status: str
- """Current activities of the character."""
- llm: BaseLanguageModel
-
- """The retriever to fetch related memories."""
- verbose: bool = False
-
- reflection_threshold: Optional[float] = None
- """When the total 'importance' of memories exceeds the above threshold, stop to reflect."""
-
- current_plan: List[str] = []
- belief: str = ""
- pattern: str = ""
- long_belief: str = ""
- counter_belief: str = ""
- plan: str = ""
- high_plan: str = ""
- """The current plan of the agent."""
-
- memory: List = ['']
- summary: str = "" #: :meta private:
- summary_refresh_seconds: int = 3600 #: :meta private:
- last_refreshed: datetime = Field(default_factory=datetime.now) #: :meta private:
-
- memory_importance: float = 0.0 #: :meta private:
- max_tokens_limit: int = 1200 #: :meta private:
- read_observation: str = "" #: :meta private:
-
- rule: str = "" #: :meta private:
- class Config:
- """Configuration for this pydantic object."""
-
- arbitrary_types_allowed = True
-
-
-
-
-
-
- def add_long_memory(self, memory_content: str) -> List[str]:
- """Add an observation or memory to the agent's memory."""
- self.memory.append(memory_content)
- return self.memory
-
-
-
-
- def planning_module(self, observation: str, recipient_name:str, previous_conversation: List[str] =None, belief: str =None, valid_action_list: List[str] = None, short_memory_summary:str = "",pattern:str = "",last_plan:str = "", mode: str = "second_tom") -> str:
- """Make Plans and Evaluate Plans."""
- """Combining these two modules together to save costs"""
-
- if mode == 'second_tom':
- prompt = PromptTemplate.from_template(
- "You are the objective player behind a NPC character called {initiator_name}, and you are playing the board game {game_name} with {recipient_name}.\n"
- + " The game rule is: {rule} \n"
- +'{pattern}\n'
- + " Your observation about the game status now is: {observation}\n"
- +'{belief}\n'
- + " Understanding all given information, can you do following things:"
- + " Make Reasonable Plans: Please plan several strategies according to actions {valid_action_list} you can play now to win the finally whole {game_name} games step by step. Note that you can say something or keep silent to confuse your opponent. "
- + " Potential {recipient_name}'s actions (if release) and Estimate Winning/Lose/Draw Rate for Each Plan: From the perspective of {recipient_name} , please infer what the action {recipient_name} with probability (normalize to number 100% in total) would do when {recipient_name} holds different cards and then calculate the winning/lose/draw rates when {recipient_name} holds different cards step by step. At last, please calculate the overall winning/lose/draw rates for each plan step by step considering {recipient_name}'s behaviour pattern. Output in a tree-structure: "
- + "Output: Plan 1: If I execute plan1. "
- "The winning/lose/draw rates when {recipient_name} holds card1: Based on {recipient_name}'s behaviour pattern, In the xx round, because {recipient_name} holds card1 (probability) and the combination with current public card (if release) (based on my belief on {recipient_name}), and if he sees my action, {recipient_name} will do action1 (probability) ( I actually hold card and the public card (if reveal) is , he holds card1 and the public card (if reveal), considering Single Game Win/Draw/Lose Rule, please infer I will win/draw/lose step by step ), action2 (probability) (considering Single Game Win/Draw/Lose Rule, please infer I will win/draw/lose step by step ),.. (normalize to number 100% in total); \n Overall (winning rate for his card1) is (probability = his card probability * win action probability), (lose rate for his card2) is (probability= his card probability * lose action probability), (draw rate for his card2) is (probability = his card probability * draw action probability) "
- "The winning/lose/draw rates when {recipient_name} holds card2: Based on {recipient_name}'s behaviour pattern, In the xx round, because {recipient_name} holds card2 (probability) and the combination with current public card (if release) (based on my belief on {recipient_name}) , and if he sees my action, he will do action1 (probability) (I actually hold card and the public card (if reveal) is , he holds card1 and the public card (if reveal), considering Single Game Win/Draw/Lose Rule, please infer I will win/draw/lose step by step ).. action2 (probability) (normalize to number 100% in total) (considering Single Game Win/Draw/Lose Rule, please infer I will win/draw/lose step by step ),.. ;..... continue ....\n Overall (winning rate for his card2) is (probability = his card probability * win action probability), (lose rate for his card2) is (probability= his card probability * lose action probability), (draw rate for his card2) is (probability = his card probability * draw action probability) "
- "...\n"
- "Plan1 overall {initiator_name}'s Winning/Lose/Draw rates : the Winning rate (probability) for plan 1 is (winning rate for his card1) + (winning rate for his card2) + .. ; Lose rate (probability) for plan 1 : (lose rate for his card1) + (lose rate for his card2) + .. ; Draw Rate (probability) for plan 1 : (draw rate for his card1) + (draw rate for his card2) + ... ; (normalize to number 100% in total) for plan1 \n"
- "Plan 2: If I execute plan2, The winning/lose/draw rates when {recipient_name} holds card1: Based on {recipient_name}'s behaviour pattern, In the xx round, if {recipient_name} holds card1 (probability) and the combination with current public card (if release), .. (format is similar with before ) ... continue .."
- "Plan 3: .. Coninue ... "
- + " The number of payoffs for each plan: Understanding your current observation, each new plans, please infer the number of wininng/lose payoffs for each plan step by step, Output: Plan1: After the action, All chips in the pot: If win, the winning payoff would be (Calculated by Winning Payoff Rules step by step) : After the action, All chips in the pot: If lose , the lose payoff would be: (Calculated by Lose Payoff Rules step by step). Plan2: After the action, All chips in the pot: If win, the winning chips would be (Calculated by Winning Payoff Rules step by step): After the action, All chips in the pot: If lose , the lose chips would be: (Calculated by Lose Payoff Rules step by step). If the number of my chips in pots have no change, please directly output them. \n"
- + " Estimate Expected Chips Gain for Each Plan: Understanding all the information and Estimate Winning/Lose/Draw Rate for Each Plan, please estimate the overall average Expected Chips Gain for each plan/strategy in the current game by calculating winning rate * (Winning Payoff Rule in the game rule) - lose rate * (Lose Payoff Rule in the game rule) step by step"
- + " Plan Selection: Please output the rank of estimated expected chips gains for every plan objectively step by step, and select the plan/strategy with the highest estimated expected chips gain considering both the strategy improvement. \n "
- )
-
- elif mode == 'first_tom':
- prompt = PromptTemplate.from_template(
- "You are the player behind a NPC character called {initiator_name}, and you are playing the board game {game_name} with {recipient_name}.\n"
- + " The game rule is: {rule} \n"
- + " {pattern} \n"
- + " Your observation about the game status now is: {observation}\n"
- + ' {belief}\n'
- + " Understanding all given information, can you do following things:"
- + " Make Reasonable Plans: Please plan several strategies according to actions {valid_action_list} you can play now to win the finally whole {game_name} games step by step. Note that you can say something or keep silent to confuse your opponent."
- + " Potential {recipient_name}'s actions and Estimate Winning/Lose/Draw Rate: From the perspective of {recipient_name}, please infer what the action {recipient_name} with probability (normalize to number 100% in total) would do when {recipient_name} holds different cards, and then calculate the winning/lose/draw rates when {recipient_name} holds different cards step by step. Output in a tree-structure: "
- + "Output: Based on {recipient_name}'s behaviour pattern and Analysis on {recipient_name}'s cards, "
- "Winning/lose/draw rates when {recipient_name} holds card1 in the xx round,: if {recipient_name} holds card1 (probability) (based on my belief on {recipient_name}) with the public card (if release), {recipient_name} will do action1 (probability) (infer I will win/draw/lose step by step (considering Single Game Win/Draw/Lose Rule and my factual card analysis with public card (if release), his card analysis with public card (if release) step by step ), action2 (probability) (infer I will win/draw/lose step by step ),.. (normalize to number 100% in total); Overall (winning rate for his card1) is (probability = his card probability * win action probability), (lose rate for his card2) is (probability= his card probability * lose action probability), (draw rate for his card2) is (probability = his card probability * draw action probability) "
- "The winning/lose/draw rates when {recipient_name} holds card2 in the xx round,: If {recipient_name} holds card2 (probability) (based on my belief on {recipient_name}) with the public card (if release), he will do action1 (probability) (infer I will win/draw/lose (considering Single Game Win/Draw/Lose Rule and my factual card analysis with current public card (if release), his card analysis with current public card (if release)) step by step ).. action2 (probability) (normalize to number 100% in total) (infer I will win/draw/lose step by step ),.. based on {recipient_name}'s behaviour pattern;..... continue .... Overall (winning rate for his card2) is (probability = his card probability * win action probability), (lose rate for his card2) is (probability= his card probability * lose action probability), (draw rate for his card2) is (probability = his card probability * draw action probability) "
- "..."
- "Overall {initiator_name}'s Winning/Lose/Draw rates : Based on the above analysis, the Winning rate (probability) is (winning rate for his card1) + (winning rate for his card2) + .. ; Lose rate (probability): (lose rate for his card1) + (lose rate for his card2) + .. ; Draw Rate (probability): (draw rate for his card1) + (draw rate for his card2) + ... ; (normalize to number 100% in total). \n"
- + " Potential believes about the number of winning and lose payoffs for each plan: Understanding the game rule, your current observation, previous actions summarization, each new plans, Winning Payoff Rule, Lose Payoff Rule, please infer your several believes about the number of chips in pots for each plan step by step, Output: Plan1: Chips in the pot: If win, the winning payoff would be (Calculated by Winning Payoff Rules in the game rule) : After the action, If lose , the lose payoff would be: . Plan2: Chips in the pot: If win, the winning chips would be (Calculated by Winning Payoff Rules in the game rule): After the action, If lose , the lose chips would be: . If the number of my chips in pots have no change, please directly output them. "
- + " Estimate Expected Chips Gain for Each Plan: Understanding the game rule, plans, and your knowledge about the {game_name}, please estimate the overall average Expected Chips Gain for each plan/strategy in the current game by calculating winning rate * (Winning Payoff Rule in the game rule) - lose rate * (Lose Payoff Rule in the game rule)., explain what is the results if you do not select the plan, and explain why is this final Expected Chips Gain reasonablely step by step? "
- + " Plan Selection: Please output the rank of estimated expected chips gains for every plan objectively step by step, and select the plan/strategy with the highest estimated expected chips gain considering both the strategy improvement. \n\n "
- )
- else:
- prompt = PromptTemplate.from_template(
- "You are the player behind a NPC character called {initiator_name}, and you are playing the board game {game_name} with {recipient_name}.\n"
- + " The game rule is: {rule} \n"
- + " {pattern} \n"
- + " Your observation about the game status now is: {observation}\n"
- + " Understanding all given information, can you do following things:"
- + " Make Reasonable Plans: Please plan several strategies according to actions {valid_action_list} you can play now to win the finally whole {game_name} games step by step. Note that you can say something or keep silent to confuse your opponent."
- + " Estimate Winning/Lose/Draw Rate for Each Plan: Understanding the given information, and your knowledge about the {game_name}, please estimate the success rate of each step of each plan step by step and the overall average winning/lose/draw rate (normalize to number 100% in total) of each plan/strategy for the current game step by step following the templete: If I do plan1, because I hold card, the public information (if release) and Single Game Win/Draw/Lose Rule, I will win or Lose or draw (probability); ... continue .... Overall win/draw/lose rate: Based on the analysis, I can do the weighted average step by step to get that the overall weighted average winning rate is (probability), average lose rate is (probability), draw rate is (probability) (normalize to number 100% in total)\n "
- + " Potential believes about the number of winning and lose payoffs for each plan: Understanding the game rule, your current observation, previous actions summarization, each new plans, Winning Payoff Rule, Lose Payoff Rule, please infer your several believes about the number of chips in pots for each plan step by step, Output: Plan1: Chips in the pot: If win, the winning payoff would be (Calculated by Winning Payoff Rules in the game rule) : After the action, Chips in the pot: If lose , the lose payoff would be: . Plan2: Chips in the pot: If win, the winning chips would be (Calculated by Winning Payoff Rules in the game rule): After the action, Chips in the pot: If lose , the lose chips would be: . If the number of my chips in pots have no change, please directly output them. "
- +" Estimate Expected Chips Gain for Each Plan: Understanding the game rule, plans, and your knowledge about the {game_name}, please estimate the overall average Expected Chips Gain for each plan/strategy in the current game by calculating winning rate * (Winning Payoff Rule in the game rule) - lose rate * (Lose Payoff Rule in the game rule)., explain what is the results if you do not select the plan, and explain why is this final Expected Chips Gain reasonablely step by step? "
- + " Plan Selection: Please output the rank of estimated expected chips gains for every plan objectively step by step, and select the plan/strategy with the highest estimated expected chips gain considering both the strategy improvement. \n\n "
- )
-
- agent_summary_description = short_memory_summary
-
- belief = self.belief if belief is None else belief
-
- kwargs = dict(
-
- recent_observations=agent_summary_description,
- last_plan=last_plan,
- belief=belief,
- initiator_name=self.name,
- pattern=pattern,
- recipient_name=recipient_name,
- observation=observation,
- rule=self.rule,
- game_name=self.game_name,
- valid_action_list=valid_action_list
- )
-
-
- plan_prediction_chain = LLMChain(llm=self.llm, prompt=prompt)
- self.plan = plan_prediction_chain.run(**kwargs)
- self.plan = self.plan.strip()
-
- return self.plan.strip()
-
-
-
- def get_belief(self, observation: str, recipient_name: str,short_memory_summary:str,pattern:str = "",mode: str = "second_tom") -> str:
- """React to get a belief."""
- if mode == 'second_tom':
- prompt = PromptTemplate.from_template(
- "You are the player behind a NPC character called {agent_name}, and you are playing the board game {game_name} with {recipient_name}. \n"
- + " The game rule is: {rule} \n"
- + " Your estimated judgement about the behaviour pattern of {recipient_name} and improved strategy is: {pattern} \n"
- + " Your observation now is: {observation}\n"
- + " Your current game progress summarization including actions and conversations with {recipient_name} is: {recent_observations}\n"
- + " Understanding the game rule, the cards you have, your observation, progress summarization in the current game, the estimated behaviour pattern of {recipient_name}, the potential guess pattern of {recipient_name} on you, and your knowledge about the {game_name}, can you do following things? "
- + " Analysis on my Cards: Understanding all given information and your knowledge about the {game_name}, please analysis what is your best combination and advantages of your cards in the current round step by step."
- + " Belief on {recipient_name}'s cards: Understanding all given information, please infer the probabilities about the cards of {recipient_name} (normalize to number 100% in total) objectively step by step."
- "Output: {recipient_name} saw my history actions (or not) and then did action1 (probability) in the 1st round , ... continue..... Before this round, {recipient_name} say my history actions (or not) and did action1 (probability), because {recipient_name}'s behaviour pattern and the match with the public card (if release), {recipient_name} tends to have card1 (probability), card2 (probability) ..continue.. (normalize to number 100% in total)."
- + " Analysis on {recipient_name}'s Cards: Understanding all given information and your knowledge about the {game_name}, please analysis what is {recipient_name}'s best combination and advantages of {recipient_name}'s cards in the current round step by step."
- + " Potential {recipient_name}'s current believes about your cards: Understanding all given information and your knowledge about the {game_name}, If you were {recipient_name} (he can only observe my actions but cannot see my cards), please infer the {recipient_name}'s believes about your cards with probability (normalize to number 100% in total) step by step. Output: {agent_name} did action1 (probability) (after I did action or not) in the 1st round, , ... continue... {agent_name} did action1 (probability) (after I did action or not) in the current round,, from the perspective of {recipient_name}, {agent_name} tends to have card1 (probability), card2 (probability) ... (normalize to number 100% in total) ."
- )
- elif mode == 'first_tom':
- prompt = PromptTemplate.from_template(
- "You are the player behind a NPC character called {agent_name}, and you are playing the board game {game_name} with {recipient_name}. \n"
- + " The game rule is: {rule} \n"
- + " Your estimated judgement about the behaviour pattern of {recipient_name} and improved strategy is: {pattern} \n"
- + " Your observation now is: {observation}\n"
- + " Your current game progress summarization including actions and conversations with {recipient_name} is: {recent_observations}\n"
- + " Understanding the game rule, the cards you have, your observation, progress summarization in the current game, the estimated behaviour pattern of {recipient_name} on you, and your knowledge about the {game_name}, can you do following things? "
- + " Analysis on my Cards: Understanding all given information, please analysis what is your best combination and advantages of your cards in the current round step by step."
- + " Belief on {recipient_name}'s cards: Understanding all given information, please infer your the probabilities about the cards of {recipient_name} (normalize to number 100% total) step by step. Templete: In the 1st round, {recipient_name} did action1 (probability), ... continue... In the current round, {recipient_name} did action1 (probability), because {recipient_name}'s behaviour pattern and the match with the current public card (if release), he tends to have card1 (probability), card2 (probability) (normalize to number 100% in total). "
- + " Analysis on {recipient_name}'s Cards: Understanding all given information, please analysis what is {recipient_name}'s best combination and advantages of {recipient_name}'s cards in the current round step by step."
-
- )
- agent_summary_description = short_memory_summary
-
- kwargs = dict(
- agent_summary_description=agent_summary_description,
- recent_observations=agent_summary_description,
- agent_name=self.name,
- pattern= pattern,
- recipient_name=recipient_name,
- observation=observation,
- game_name=self.game_name,
- rule=self.rule
-
- )
- print(recipient_name)
-
- belief_prediction_chain = LLMChain(llm=self.llm, prompt=prompt)
- self.belief = belief_prediction_chain.run(**kwargs)
- self.belief = self.belief.strip()
- return self.belief.strip()
-
-
- def get_pattern(self, recipient_name: str,game_pattern: str='', last_k:int=20,short_summarization:str='',mode:str='second_tom') -> str:
- """React to get a belief."""
-
- if mode == 'second_tom':
- prompt = PromptTemplate.from_template(
- "You are the objective player behind a NPC character called {agent_name}, and you are playing {game_name} with {recipient_name}. \n"
- + " The game rule is: {rule} \n"
- + " Your previous game memory including observations, actions and conversations with {recipient_name} is: {long_memory}\n"
- + " {recipient_name}'s game pattern: Understanding all given information and your understanding about the {game_name}, please infer and estimate as many as possible reasonable {recipient_name}'s game behaviour pattern/preferences for each card he holds and each round with probability (normalize to number 100\% in total for each pattern item) and please also infer advantages of his card, and analysis how the {recipient_name}'s behaviour pattern/preferences are influenced by my actions when he holds different cards step by step. Output as a tree-structure "
- + "Output: When {recipient_name} holds card1 and the combination of public card (if release): if {recipient_name} is the first to act, he would like to do action1 (probabilities), action2 (probabilities) ... continue .. If {recipient_name} sees the action1/action2/action3 of the opponent or not, he would like to do action1 (probabilities), action2 (probabilities) ... continue ... (normalize to number 100% in total), if {recipient_name} sees the action2 of the opponent or not, ... continue ..(more patterns with different actions).. in the 1st round, ; If {recipient_name} sees the action1 of the opponent or not, he would like to do action1 (probabilities), action2 (probabilities) ... continue... (normalize to number 100% in total), ... continue ..(more patterns)..In the 2nd round,;"
- "When {recipient_name} holds card2 and combination of public card (if release): if {recipient_name} is the first to act, he would like to do action1 (probabilities), action2 (probabilities) ... continue .. If {recipient_name} sees the action1 of the opponent or not, he would like to do action1 (probabilities), action2 (probabilities) .. continue ... (normalize to number 100% in total)...in the 1st round,; .. continue ..(more patterns with different actions).in the 2nd round .. "
- " (more patterns with different cards).. continue.."
- + " {recipient_name}'s guess on my game pattern: Understanding all given information, please infer several reasonable believes about my game pattern/preference when holding different cards from the perspective of {recipient_name} (please consider the advantages of the card, actions and the the match with the public card (if release)) for every round of the game in detail as a tree-structure output step by step"
- + "Output: In the 1st round, When name holds card1 with public card (if release), he would like to do (probabilities), action2 (probabilities) (normalize to number 100% in total) o ... continue .. and then do action ...;"
- "When name holds card2 with public card (if release), ... "
- " .. continue.."
- + " Strategy Improvement: Understanding the above information, think about what strategies I can adopt to exploit the game pattern of {recipient_name} and {recipient_name}'s guess on my game pattern for winning {recipient_name} in the whole game step by step. (Note that you cannot observe the cards of the opponent during the game, but you can observe his actions). Output as a tree-structure:"
- "When I hold card and the public card (if release), and see the action of the opponent, I would like to do action1; ... "
- )
- elif mode == 'first_tom':
- prompt = PromptTemplate.from_template(
- "You are the player behind a NPC character called {agent_name}, and you are playing the board game {game_name} with {recipient_name}. \n"
- + " The game rule is: {rule} \n"
- + " Your previous game memory including observations, actions and conversations with {recipient_name} is: {long_memory}\n"
- + " Please understand the game rule, previous all game history and your knowledge about the {game_name}, can you do following things for future games? "
- + " {recipient_name}'s game pattern: Understanding all given information, please infer all possible reasonable {recipient_name}'s game pattern/preferences for each card he holds and each round with probability (normalize to number 100\% in total for each pattern item) for every round of the game as a tree-structure output step by step "
- + "Output: In the 1st round, when name holds card1 and the public card (if release), he would like to do action (probabilities); when name holds card2 and the public card (if release), he would like to do action (probabilities), ... continue.. In the 2nd round, when name holds card1 and the public card (if release), .(similar with before).. continue. "
- + " Number of chips reason: Think about why you can have these chips in all previous games step by step. "
- + " Reflex: Reflex which your actions are right or wrong in previous games to win or Lose conrete chips step by step (Note that you cannot observe the cards of the opponent during the game, but you can observe his actions) "
- + " Strategy Improvement: Understanding the above information, think about what strategies I can adopt to exploit the game pattern of {recipient_name} for winning {recipient_name} in the whole game step by step. (Note that you cannot observe the cards of the opponent during the game, but you can observe his actions). Output as a tree-structure:"
- )
- else:
- prompt = PromptTemplate.from_template(
- "You are the player behind a NPC character called {agent_name}, and you are playing the board game {game_name} with {recipient_name}. \n"
- + " The game rule is: {rule} \n"
- + " Your previous game memory including observations, actions and conversations with {recipient_name} is: {long_memory}\n"
- + " Please understand the game rule, previous all game history and your knowledge about the {game_name}, can you do following things for future games? "
- + " Number of chips reason: Think about why you can have these chips in all previous games step by step. "
- + " Reflex: Reflex which your actions are right or wrong in previous games to win or Lose conrete chips step by step. (Note that you cannot observe the cards of the opponent during the game, but you can observe his actions) "
- + " Strategy Improvement: Understanding the above information, think about what strategies I need to adopt to win {recipient_name} for the whole game step by step. (Note that you cannot observe the cards of the opponent during the game, but you can observe his actions). Output as a tree-structure:"
- )
- reflection_chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)
- long_memory = self.memory[-last_k:]
- long_memory_str = "\n\n".join([o for o in long_memory])
-
- kwargs = dict(
- long_memory=long_memory_str,
- game_pattern=game_pattern,
- agent_name=self.name,
- recipient_name=recipient_name,
- game_name=self.game_name,
- rule=self.rule
-
- )
- # print(kwargs)
-
- self.long_belief = reflection_chain.run(**kwargs)
- self.long_belief = self.long_belief.strip()
- return self.long_belief.strip()
-
-
-
- def get_summarization(self, recipient_name: str,game_memory: str, opponent_name:str) -> str:
- """Get a long memory summarization to save costs."""
- prompt = PromptTemplate.from_template(
- "You are the player behind a NPC character called {agent_name}, and you are playing the board game {game_name} with {recipient_name}. \n"
- + " The game rule is: {rule} \n"
- + " The observation conversion rules are: {observation_rule}\n"
- + " One game memory including observations, actions and conversations with {recipient_name} is: {long_memory}\n"
- + " Understanding the game rule, observation conversion rules and game history and your knowledge about the {game_name}, can you do following things:"
- + " History summarization: summary the game history with action, observation, and results information? using the templete, and respond shortly: In the first round of first game, name holds card1 does action .... continue ..."
- + "{opponent_name}'s card reasoning: If the card of {opponent_name} is not available, because {agent_name}'s card is xx and public card (if release) is xxx, and {opponent_name} behaviours are xx, the current game result is xx, please infer {opponent_name}'s card with probability (100% in total) with your understanding about the above all information confidently step by step. \n"
- )
- reflection_chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)
- kwargs = dict(
- observation_rule=self.observation_rule,
- long_memory=game_memory,
- agent_name=self.name,
- recipient_name=recipient_name,
- opponent_name=opponent_name,
- # observation=observation,
- game_name=self.game_name,
- rule=self.rule
-
- )
- # print(kwargs)
-
- self.long_belief = reflection_chain.run(**kwargs)
- self.long_belief = self.long_belief.strip()
- return self.long_belief.strip()
-
-
- def get_short_memory_summary(self, observation: str, recipient_name: str,short_memory_summary:str) -> str:
- """React to get a belief."""
- prompt = PromptTemplate.from_template(
- "You are the player behind a NPC character called {agent_name}, and you are playing the board game {game_name} with {recipient_name}. \n"
- + " The game rule is: {rule} \n"
- + " Your current observation is: {observation}\n"
- + " The current game history including previous action, observations and conversation is: {agent_summary_description}\n"
- + " Based on the game rule, your observation and your knowledge about the {game_name}, please summarize the current history. Output as a tree-structure, and respond shortly: "
- + " In the first round, name does action, and say xxx .... continue ..."
- )
-
- agent_summary_description = short_memory_summary
-
- kwargs = dict(
- agent_summary_description=agent_summary_description,
- recent_observations=agent_summary_description,
- agent_name=self.name,
- recipient_name=recipient_name,
- observation=observation,
- game_name=self.game_name,
- rule=self.rule
-
- )
-
- belief_prediction_chain = LLMChain(llm=self.llm, prompt=prompt)
- self.belief = belief_prediction_chain.run(**kwargs)
- self.belief = self.belief.strip()
- return self.belief.strip()
-
-
-
- def convert_obs(self, observation: str, recipient_name: str, user_index: str, valid_action_list:str) -> str:
- """React to get a belief."""
- prompt = PromptTemplate.from_template(
- "You are the player behind a NPC character called {agent_name} with player index {user_index}, and you are playing the board game {game_name} with {recipient_name}. \n"
- + " The game rule is: {rule} \n"
- + " Your observation now is: {observation}\n"
- + " You will receive a valid action list you can perform in this turn \n"
- + " Your valid action list is: {valid_action_list}\n"
- + " The observation conversion rules are: {observation_rule}\n"
- + " Please convert {observation} and {valid_action_list} to the readable text based on the observation conversion rules and your knowledge about the {game_name} (respond shortly).\n\n"
- )
- kwargs = dict(
- user_index=user_index,
- agent_name=self.name,
- rule=self.rule,
- recipient_name=recipient_name,
- observation=observation,
- valid_action_list=valid_action_list,
- game_name=self.game_name,
- observation_rule=self.observation_rule
- )
- obs_prediction_chain = LLMChain(llm=self.llm, prompt=prompt)
- self.read_observation = obs_prediction_chain.run(**kwargs)
- self.read_observation = self.read_observation.strip()
- return self.read_observation
-
-
-
- def action_decision(self, observation: str, valid_action_list: List[str], promp_head: str, act: str = None,short_memory_summary:str="") -> Tuple[str,str]:
- """React to a given observation."""
- """React to a given observation."""
- prompt = PromptTemplate.from_template(
- promp_head
- + "\nYour plan is: {plan}"
- + "\n Based on the plan, please select the next action from the available action list: {valid_action_list} (Just one word) and say something to the opponent player to bluff or confuse him or keep silent to finally win the whole game and reduce the risk of your action (respond sentence only). Please respond them and split them by |"
- + "\n\n"
- )
-
- agent_summary_description = short_memory_summary
-
- kwargs = dict(
- agent_summary_description= agent_summary_description,
- # current_time=current_time_str,
- # relevant_memories=relevant_memories_str,
- agent_name= self.name,
- game_name=self.game_name,
- observation= observation,
- agent_status= self.status,
- valid_action_list = valid_action_list,
- plan = self.plan,
- belief = self.belief,
- act = act
- )
- action_prediction_chain = LLMChain(llm=self.llm, prompt=prompt)
-
- result = action_prediction_chain.run(**kwargs)
- if "|" in result:
- result,result_comm = result.split("|",1)
- else:
- result_comm = ""
- return result.strip(),result_comm.strip()
-
- def make_act(self, observation: str,opponent_name: str, player_index:int,valid_action_list: List, verbose_print:bool,game_idx:int,round:int,bot_short_memory:List, bot_long_memory:List, console,log_file_name='', mode='second_tom') -> Tuple[bool, str]:
- readable_text_amy_obs = self.convert_obs(observation, opponent_name, player_index, valid_action_list)
- if verbose_print:
- util.get_logging(logger_name=log_file_name + '_obs',
- content={str(game_idx + 1) + "_" + str(round): {"raw_obs": observation,
- "readable_text_obs": readable_text_amy_obs}})
- console.print('readable_text_obs: ', style="red")
- print(readable_text_amy_obs)
- time.sleep(0)
- if len(bot_short_memory[player_index]) == 1:
- short_memory_summary = f'{game_idx+1}th Game Start \n'+readable_text_amy_obs
- else:
- short_memory_summary = self.get_short_memory_summary(observation=readable_text_amy_obs, recipient_name=opponent_name,short_memory_summary='\n'.join(bot_short_memory[player_index]))
-
- if log_file_name is not None:
- util.get_logging(logger_name=log_file_name + '_short_memory',
- content={str(game_idx + 1) + "_" + str(round): {
- "raw_short_memory": '\n'.join(bot_short_memory[player_index]),
- "short_memory_summary": short_memory_summary}})
- if verbose_print:
- console.print('short_memory_summary: ', style="yellow")
- print(short_memory_summary)
-
- time.sleep(0)
- if round <= 1:
- self.pattern = self.get_pattern(opponent_name,'',short_summarization=short_memory_summary,mode=mode)
- if log_file_name is not None:
- util.get_logging(logger_name=log_file_name + '_pattern_model',
- content={str(game_idx + 1) + "_" + str(round): self.pattern})
- console.print('pattern: ', style="blue")
- print(self.pattern)
-
- time.sleep(0)
- print(opponent_name)
-
- if mode == 'second_tom' or mode == 'first_tom':
- belief = self.get_belief(readable_text_amy_obs,opponent_name,short_memory_summary=short_memory_summary,pattern=self.pattern,mode=mode)
- if verbose_print:
- console.print(self.name + " belief: " , style="deep_pink3")
- print(self.name + " belief: " + str(belief))
- util.get_logging(logger_name=log_file_name + '_belief',
- content={str(game_idx + 1) + "_" + str(round): {
- "belief": str(belief)}})
- else:
- belief = ''
-
- time.sleep(0)
- plan = self.planning_module(readable_text_amy_obs,opponent_name, belief=belief,valid_action_list=valid_action_list,short_memory_summary=short_memory_summary,pattern=self.pattern,last_plan='', mode=mode)
- if verbose_print:
- console.print(self.name + " plan: " , style="orchid")
- print(self.name + " plan: " + str(plan))
- util.get_logging(logger_name=log_file_name + '_plan',
- content={str(game_idx + 1) + "_" + str(round): {
- "plan": str(plan)}})
- time.sleep(0)
- promp_head = ''
- act, comm = self.action_decision(readable_text_amy_obs, valid_action_list, promp_head,short_memory_summary=short_memory_summary)
- if log_file_name is not None:
- util.get_logging(logger_name= log_file_name + '_act',
- content={str(game_idx + 1) + "_" + str(round): {
- "act": str(act), "talk_sentence": str(comm)}})
-
- while act not in valid_action_list:
- print('Action + ', str(act), ' is not a valid action in valid_action_list, please try again.\n')
- promp_head += 'Action {act} is not a valid action in {valid_action_list}, please try again.\n'
- act, comm = self.action_decision( readable_text_amy_obs, valid_action_list, promp_head,act)
- print(self.name + " act: " + str(act))
- print(comm)
-
- bot_short_memory[player_index].append(f"{self.name} have the observation {readable_text_amy_obs}, try to take action: {act} and say {comm} to {opponent_name}")
- bot_short_memory[((player_index + 1)%2)].append(f"{self.name} try to take action: {act} and say {comm} to {opponent_name}")
-
- bot_long_memory[player_index].append(
- f"{self.name} have the observation {observation}, try to take action: {act} and say {comm} to {opponent_name}")
- return act,comm,bot_short_memory,bot_long_memory
diff --git a/spaces/daishen/LAiW/README.md b/spaces/daishen/LAiW/README.md
deleted file mode 100644
index b12750088bcd4aa12d38287fc9e903a78533167d..0000000000000000000000000000000000000000
--- a/spaces/daishen/LAiW/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: SCULaiw
-emoji: 🦀
-colorFrom: purple
-colorTo: pink
-sdk: gradio
-sdk_version: 3.44.3
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/datasciencedojo/Handpose/app.py b/spaces/datasciencedojo/Handpose/app.py
deleted file mode 100644
index 7d7386c04462cfa8636e11f01fc56d1a19917a67..0000000000000000000000000000000000000000
--- a/spaces/datasciencedojo/Handpose/app.py
+++ /dev/null
@@ -1,88 +0,0 @@
-__all__ = ['learn','classify_image','categories','image','label','examples','intf']
-
-from fastai.vision.all import *
-import gradio as gr
-
-# Cell
-learn = load_learner('handpose.pkl')
-
-# Cell
-categories = ('call', 'dislike', 'fist', 'four', 'like', 'mute', 'ok', 'one', 'palm', 'peace',
- 'peace_inverted', 'rock', 'stop', 'stop_inverted', 'three', 'three2', 'two_up', 'two_up_inverted')
-
-def classify_image(img):
- pred, idx, probs = learn.predict(img)
- return dict(zip(categories, map(float, probs)))
-
-# Cell
-image = gr.inputs.Image(shape=(192,192),source='webcam')
-label = gr.outputs.Label()
-examples = ['beyonce-ok.jpg']
-
-css = """
-footer {display:none !important}
-.output-markdown{display:none !important}
-div[data-testid="label"] {overflow-x: hidden !important; overflow-y: scroll !important; height:520px !important;}
-.gr-button-primary {
- z-index: 14;
- height: 43px;
- width: 130px;
- left: 0px;
- top: 0px;
- padding: 0px;
- cursor: pointer !important;
- background: none rgb(17, 20, 45) !important;
- border: none !important;
- text-align: center !important;
- font-family: Poppins !important;
- font-size: 14px !important;
- font-weight: 500 !important;
- color: rgb(255, 255, 255) !important;
- line-height: 1 !important;
- border-radius: 12px !important;
- transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important;
- box-shadow: none !important;
-}
-.gr-button-primary:hover{
- z-index: 14;
- height: 43px;
- width: 130px;
- left: 0px;
- top: 0px;
- padding: 0px;
- cursor: pointer !important;
- background: none rgb(37, 56, 133) !important;
- border: none !important;
- text-align: center !important;
- font-family: Poppins !important;
- font-size: 14px !important;
- font-weight: 500 !important;
- color: rgb(255, 255, 255) !important;
- line-height: 1 !important;
- border-radius: 12px !important;
- transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important;
- box-shadow: rgb(0 0 0 / 23%) 0px 1px 7px 0px !important;
-}
-.hover\:bg-orange-50:hover {
- --tw-bg-opacity: 1 !important;
- background-color: rgb(229,225,255) !important;
-}
-
-.from-orange-400 {
- --tw-gradient-from: rgb(17, 20, 45) !important;
- --tw-gradient-to: rgb(255 150 51 / 0);
- --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to) !important;
-}
-.group-hover\:from-orange-500{
- --tw-gradient-from:rgb(17, 20, 45) !important;
- --tw-gradient-to: rgb(37 56 133 / 37%);
- --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to) !important;
-}
-.group:hover .group-hover\:text-orange-500{
- --tw-text-opacity: 1 !important;
- color:rgb(37 56 133 / var(--tw-text-opacity)) !important;
-}
-"""
-
-intf = gr.Interface(fn=classify_image, inputs=image, outputs=label,examples=examples ,title="Hand Gesture Recognition | Data Science Dojo", css=css)
-intf.launch(inline=False)
\ No newline at end of file
diff --git a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/clap/training/lp_train.py b/spaces/dawood/audioldm-text-to-audio-generation/audioldm/clap/training/lp_train.py
deleted file mode 100644
index 24a19bacd0a4b789415cfccbce1f8bc99bc493ed..0000000000000000000000000000000000000000
--- a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/clap/training/lp_train.py
+++ /dev/null
@@ -1,301 +0,0 @@
-import json
-import logging
-import math
-import os
-import time
-from contextlib import suppress
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-
-try:
- import wandb
-except ImportError:
- wandb = None
-
-from open_clip import LPLoss, LPMetrics, lp_gather_features
-from open_clip.utils import do_mixup, get_mix_lambda
-from .distributed import is_master
-from .zero_shot import zero_shot_eval
-
-
-class AverageMeter(object):
- """Computes and stores the average and current value"""
-
- def __init__(self):
- self.reset()
-
- def reset(self):
- self.val = 0
- self.avg = 0
- self.sum = 0
- self.count = 0
-
- def update(self, val, n=1):
- self.val = val
- self.sum += val * n
- self.count += n
- self.avg = self.sum / self.count
-
-
-def unwrap_model(model):
- if hasattr(model, "module"):
- return model.module
- else:
- return model
-
-
-def train_one_epoch(
- model,
- data,
- epoch,
- optimizer,
- scaler,
- scheduler,
- args,
- tb_writer=None,
- extra_suffix="",
-):
- device = torch.device(args.device)
- autocast = torch.cuda.amp.autocast if args.precision == "amp" else suppress
- model.train()
- loss = LPLoss(args.lp_loss)
-
- dataloader, sampler = data["train"].dataloader, data["train"].sampler
- if args.distributed and sampler is not None:
- sampler.set_epoch(epoch)
- num_batches_per_epoch = dataloader.num_batches
- sample_digits = math.ceil(math.log(dataloader.num_samples + 1, 10))
-
- # for toy dataset
- if args.dataset_type == "toy":
- dataloader.dataset.generate_queue()
-
- loss_m = AverageMeter()
- batch_time_m = AverageMeter()
- data_time_m = AverageMeter()
- end = time.time()
-
- for i, batch in enumerate(dataloader):
- step = num_batches_per_epoch * epoch + i
-
- if isinstance(scheduler, dict):
- for s in scheduler.values():
- s(step)
- else:
- scheduler(step)
-
- audio = batch # contains mel_spec, wavform, and longer list
- class_label = batch["class_label"]
- # audio = audio.to(device=device, non_blocking=True)
- class_label = class_label.to(device=device, non_blocking=True)
-
- if args.mixup:
- # https://github.com/RetroCirce/HTS-Audio-Transformer/blob/main/utils.py#L146
- mix_lambda = torch.from_numpy(
- get_mix_lambda(0.5, len(audio["waveform"]))
- ).to(device)
- class_label = do_mixup(class_label, mix_lambda)
- else:
- mix_lambda = None
-
- data_time_m.update(time.time() - end)
- if isinstance(optimizer, dict):
- for o_ in optimizer.values():
- o_.zero_grad()
- else:
- optimizer.zero_grad()
-
- with autocast():
- pred = model(audio, mix_lambda=mix_lambda, device=device)
- total_loss = loss(pred, class_label)
-
- if isinstance(optimizer, dict):
- if scaler is not None:
- scaler.scale(total_loss).backward()
- for o_ in optimizer.values():
- if args.horovod:
- o_.synchronize()
- scaler.unscale_(o_)
- with o_.skip_synchronize():
- scaler.step(o_)
- else:
- scaler.step(o_)
- scaler.update()
- else:
- total_loss.backward()
- for o_ in optimizer.values():
- o_.step()
- else:
- if scaler is not None:
- scaler.scale(total_loss).backward()
- if args.horovod:
- optimizer.synchronize()
- scaler.unscale_(optimizer)
- with optimizer.skip_synchronize():
- scaler.step(optimizer)
- else:
- scaler.step(optimizer)
- scaler.update()
- else:
- total_loss.backward()
- optimizer.step()
-
- # Note: we clamp to 4.6052 = ln(100), as in the original paper.
- with torch.no_grad():
- unwrap_model(model).clap_model.logit_scale_a.clamp_(0, math.log(100))
- unwrap_model(model).clap_model.logit_scale_t.clamp_(0, math.log(100))
-
- batch_time_m.update(time.time() - end)
- end = time.time()
- batch_count = i + 1
-
- if is_master(args) and (i % 100 == 0 or batch_count == num_batches_per_epoch):
- if isinstance(audio, dict):
- batch_size = len(audio["waveform"])
- else:
- batch_size = len(audio)
- num_samples = batch_count * batch_size * args.world_size
- samples_per_epoch = dataloader.num_samples
- percent_complete = 100.0 * batch_count / num_batches_per_epoch
-
- # NOTE loss is coarsely sampled, just master node and per log update
- loss_m.update(total_loss.item(), batch_size)
- if isinstance(optimizer, dict):
- logging.info(
- f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] "
- f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) "
- f"Data (t): {data_time_m.avg:.3f} "
- f"Batch (t): {batch_time_m.avg:.3f} "
- f"LR: {[o_.param_groups[0]['lr'] for o_ in optimizer.values()]}"
- )
- log_data = {
- "loss": loss_m.val,
- "data_time": data_time_m.val,
- "batch_time": batch_time_m.val,
- "lr": [o_.param_groups[0]["lr"] for o_ in optimizer.values()],
- }
- else:
- logging.info(
- f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] "
- f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) "
- f"Data (t): {data_time_m.avg:.3f} "
- f"Batch (t): {batch_time_m.avg:.3f} "
- f"LR: {optimizer.param_groups[0]['lr']:5f} "
- )
-
- # Save train loss / etc. Using non avg meter values as loggers have their own smoothing
- log_data = {
- "loss": loss_m.val,
- "data_time": data_time_m.val,
- "batch_time": batch_time_m.val,
- "lr": optimizer.param_groups[0]["lr"],
- }
- for name, val in log_data.items():
- name = f"train{extra_suffix}/{name}"
- if tb_writer is not None:
- tb_writer.add_scalar(name, val, step)
- if args.wandb:
- assert wandb is not None, "Please install wandb."
- wandb.log({name: val, "step": step})
-
- # resetting batch / data time meters per log window
- batch_time_m.reset()
- data_time_m.reset()
- # end for
-
-
-def evaluate(model, data, epoch, args, tb_writer=None, extra_suffix=""):
- metrics = {}
- if not args.parallel_eval:
- if not is_master(args):
- return metrics
- device = torch.device(args.device)
- model.eval()
-
- # CHANGE
- # zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
- # metrics.update(zero_shot_metrics)
- if is_master(args):
- print("Evaluating...")
- metric_names = args.lp_metrics.split(",")
- eval_tool = LPMetrics(metric_names=metric_names)
-
- autocast = torch.cuda.amp.autocast if args.precision == "amp" else suppress
- if "val" in data and (
- args.val_frequency
- and ((epoch % args.val_frequency) == 0 or epoch == args.epochs)
- ):
- if args.parallel_eval:
- dataloader, sampler = data["val"].dataloader, data["val"].sampler
- if args.distributed and sampler is not None:
- sampler.set_epoch(epoch)
- samples_per_val = dataloader.num_samples
- else:
- dataloader = data["val"].dataloader
- num_samples = 0
- samples_per_val = dataloader.num_samples
-
- eval_info = {"pred": [], "target": []}
- with torch.no_grad():
- for i, batch in enumerate(dataloader):
- audio = batch # contains mel_spec, wavform, and longer list
- class_label = batch["class_label"]
-
- # audio = audio.to(device=device, non_blocking=True)
- class_label = class_label.to(device=device, non_blocking=True)
-
- with autocast():
- pred = model(audio, device=device)
- if args.parallel_eval:
- pred, class_label = lp_gather_features(
- pred, class_label, args.world_size, args.horovod
- )
- eval_info["pred"].append(pred)
- eval_info["target"].append(class_label)
-
- num_samples += class_label.shape[0]
-
- if (i % 100) == 0: # and i != 0:
- logging.info(
- f"Eval Epoch: {epoch} [{num_samples} / {samples_per_val}]"
- )
-
- if is_master(args):
- eval_info["pred"] = torch.cat(eval_info["pred"], 0).cpu()
- eval_info["target"] = torch.cat(eval_info["target"], 0).cpu()
- metric_dict = eval_tool.evaluate_mertics(
- eval_info["pred"], eval_info["target"]
- )
- metrics.update(metric_dict)
- if "epoch" not in metrics.keys():
- metrics.update({"epoch": epoch})
-
- if is_master(args):
- if not metrics:
- return metrics
-
- logging.info(
- f"Eval Epoch: {epoch} "
- + "\n".join(
- ["\t".join([f"{m}: {round(metrics[m], 4):.4f}"]) for m in metrics]
- )
- )
- if args.save_logs:
- for name, val in metrics.items():
- if tb_writer is not None:
- tb_writer.add_scalar(f"val{extra_suffix}/{name}", val, epoch)
-
- with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f:
- f.write(json.dumps(metrics))
- f.write("\n")
-
- if args.wandb:
- assert wandb is not None, "Please install wandb."
- for name, val in metrics.items():
- wandb.log({f"val{extra_suffix}/{name}": val, "epoch": epoch})
-
- return metrics
- else:
- return metrics
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/GdImageFile.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/GdImageFile.py
deleted file mode 100644
index bafc43a19d432290867a5c08b9820f2e4f79aea3..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/GdImageFile.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# GD file handling
-#
-# History:
-# 1996-04-12 fl Created
-#
-# Copyright (c) 1997 by Secret Labs AB.
-# Copyright (c) 1996 by Fredrik Lundh.
-#
-# See the README file for information on usage and redistribution.
-#
-
-
-"""
-.. note::
- This format cannot be automatically recognized, so the
- class is not registered for use with :py:func:`PIL.Image.open()`. To open a
- gd file, use the :py:func:`PIL.GdImageFile.open()` function instead.
-
-.. warning::
- THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This
- implementation is provided for convenience and demonstrational
- purposes only.
-"""
-
-
-from . import ImageFile, ImagePalette, UnidentifiedImageError
-from ._binary import i16be as i16
-from ._binary import i32be as i32
-
-
-class GdImageFile(ImageFile.ImageFile):
- """
- Image plugin for the GD uncompressed format. Note that this format
- is not supported by the standard :py:func:`PIL.Image.open()` function. To use
- this plugin, you have to import the :py:mod:`PIL.GdImageFile` module and
- use the :py:func:`PIL.GdImageFile.open()` function.
- """
-
- format = "GD"
- format_description = "GD uncompressed images"
-
- def _open(self):
- # Header
- s = self.fp.read(1037)
-
- if i16(s) not in [65534, 65535]:
- msg = "Not a valid GD 2.x .gd file"
- raise SyntaxError(msg)
-
- self.mode = "L" # FIXME: "P"
- self._size = i16(s, 2), i16(s, 4)
-
- true_color = s[6]
- true_color_offset = 2 if true_color else 0
-
- # transparency index
- tindex = i32(s, 7 + true_color_offset)
- if tindex < 256:
- self.info["transparency"] = tindex
-
- self.palette = ImagePalette.raw(
- "XBGR", s[7 + true_color_offset + 4 : 7 + true_color_offset + 4 + 256 * 4]
- )
-
- self.tile = [
- (
- "raw",
- (0, 0) + self.size,
- 7 + true_color_offset + 4 + 256 * 4,
- ("L", 0, 1),
- )
- ]
-
-
-def open(fp, mode="r"):
- """
- Load texture from a GD image file.
-
- :param fp: GD file name, or an opened file handle.
- :param mode: Optional mode. In this version, if the mode argument
- is given, it must be "r".
- :returns: An image instance.
- :raises OSError: If the image could not be read.
- """
- if mode != "r":
- msg = "bad mode"
- raise ValueError(msg)
-
- try:
- return GdImageFile(fp)
- except SyntaxError as e:
- msg = "cannot identify this image file"
- raise UnidentifiedImageError(msg) from e
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/anyio/abc/_tasks.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/anyio/abc/_tasks.py
deleted file mode 100644
index e48d3c1e97e02cd188b567b50a4c0c615f187e4d..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/anyio/abc/_tasks.py
+++ /dev/null
@@ -1,119 +0,0 @@
-from __future__ import annotations
-
-import sys
-from abc import ABCMeta, abstractmethod
-from types import TracebackType
-from typing import TYPE_CHECKING, Any, Awaitable, Callable, TypeVar, overload
-from warnings import warn
-
-if sys.version_info >= (3, 8):
- from typing import Protocol
-else:
- from typing_extensions import Protocol
-
-if TYPE_CHECKING:
- from anyio._core._tasks import CancelScope
-
-T_Retval = TypeVar("T_Retval")
-T_contra = TypeVar("T_contra", contravariant=True)
-
-
-class TaskStatus(Protocol[T_contra]):
- @overload
- def started(self: TaskStatus[None]) -> None:
- ...
-
- @overload
- def started(self, value: T_contra) -> None:
- ...
-
- def started(self, value: T_contra | None = None) -> None:
- """
- Signal that the task has started.
-
- :param value: object passed back to the starter of the task
- """
-
-
-class TaskGroup(metaclass=ABCMeta):
- """
- Groups several asynchronous tasks together.
-
- :ivar cancel_scope: the cancel scope inherited by all child tasks
- :vartype cancel_scope: CancelScope
- """
-
- cancel_scope: CancelScope
-
- async def spawn(
- self,
- func: Callable[..., Awaitable[Any]],
- *args: object,
- name: object = None,
- ) -> None:
- """
- Start a new task in this task group.
-
- :param func: a coroutine function
- :param args: positional arguments to call the function with
- :param name: name of the task, for the purposes of introspection and debugging
-
- .. deprecated:: 3.0
- Use :meth:`start_soon` instead. If your code needs AnyIO 2 compatibility, you
- can keep using this until AnyIO 4.
-
- """
- warn(
- 'spawn() is deprecated -- use start_soon() (without the "await") instead',
- DeprecationWarning,
- )
- self.start_soon(func, *args, name=name)
-
- @abstractmethod
- def start_soon(
- self,
- func: Callable[..., Awaitable[Any]],
- *args: object,
- name: object = None,
- ) -> None:
- """
- Start a new task in this task group.
-
- :param func: a coroutine function
- :param args: positional arguments to call the function with
- :param name: name of the task, for the purposes of introspection and debugging
-
- .. versionadded:: 3.0
- """
-
- @abstractmethod
- async def start(
- self,
- func: Callable[..., Awaitable[Any]],
- *args: object,
- name: object = None,
- ) -> Any:
- """
- Start a new task and wait until it signals for readiness.
-
- :param func: a coroutine function
- :param args: positional arguments to call the function with
- :param name: name of the task, for the purposes of introspection and debugging
- :return: the value passed to ``task_status.started()``
- :raises RuntimeError: if the task finishes without calling ``task_status.started()``
-
- .. versionadded:: 3.0
- """
-
- @abstractmethod
- async def __aenter__(self) -> TaskGroup:
- """Enter the task group context and allow starting new tasks."""
-
- @abstractmethod
- async def __aexit__(
- self,
- exc_type: type[BaseException] | None,
- exc_val: BaseException | None,
- exc_tb: TracebackType | None,
- ) -> bool | None:
- """Exit the task group context waiting for all tasks to finish."""
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/click/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/click/__init__.py
deleted file mode 100644
index 9a1dab048917edc420af440c73bd1d689de6b3fa..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/click/__init__.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-Click is a simple Python module inspired by the stdlib optparse to make
-writing command line scripts fun. Unlike other modules, it's based
-around a simple API that does not come with too much magic and is
-composable.
-"""
-from .core import Argument as Argument
-from .core import BaseCommand as BaseCommand
-from .core import Command as Command
-from .core import CommandCollection as CommandCollection
-from .core import Context as Context
-from .core import Group as Group
-from .core import MultiCommand as MultiCommand
-from .core import Option as Option
-from .core import Parameter as Parameter
-from .decorators import argument as argument
-from .decorators import command as command
-from .decorators import confirmation_option as confirmation_option
-from .decorators import group as group
-from .decorators import help_option as help_option
-from .decorators import make_pass_decorator as make_pass_decorator
-from .decorators import option as option
-from .decorators import pass_context as pass_context
-from .decorators import pass_obj as pass_obj
-from .decorators import password_option as password_option
-from .decorators import version_option as version_option
-from .exceptions import Abort as Abort
-from .exceptions import BadArgumentUsage as BadArgumentUsage
-from .exceptions import BadOptionUsage as BadOptionUsage
-from .exceptions import BadParameter as BadParameter
-from .exceptions import ClickException as ClickException
-from .exceptions import FileError as FileError
-from .exceptions import MissingParameter as MissingParameter
-from .exceptions import NoSuchOption as NoSuchOption
-from .exceptions import UsageError as UsageError
-from .formatting import HelpFormatter as HelpFormatter
-from .formatting import wrap_text as wrap_text
-from .globals import get_current_context as get_current_context
-from .parser import OptionParser as OptionParser
-from .termui import clear as clear
-from .termui import confirm as confirm
-from .termui import echo_via_pager as echo_via_pager
-from .termui import edit as edit
-from .termui import getchar as getchar
-from .termui import launch as launch
-from .termui import pause as pause
-from .termui import progressbar as progressbar
-from .termui import prompt as prompt
-from .termui import secho as secho
-from .termui import style as style
-from .termui import unstyle as unstyle
-from .types import BOOL as BOOL
-from .types import Choice as Choice
-from .types import DateTime as DateTime
-from .types import File as File
-from .types import FLOAT as FLOAT
-from .types import FloatRange as FloatRange
-from .types import INT as INT
-from .types import IntRange as IntRange
-from .types import ParamType as ParamType
-from .types import Path as Path
-from .types import STRING as STRING
-from .types import Tuple as Tuple
-from .types import UNPROCESSED as UNPROCESSED
-from .types import UUID as UUID
-from .utils import echo as echo
-from .utils import format_filename as format_filename
-from .utils import get_app_dir as get_app_dir
-from .utils import get_binary_stream as get_binary_stream
-from .utils import get_text_stream as get_text_stream
-from .utils import open_file as open_file
-
-__version__ = "8.1.7"
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/contourpy/util/data.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/contourpy/util/data.py
deleted file mode 100644
index e6ba9a976c2aa4cabbf0a6031400f0d910b59ac3..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/contourpy/util/data.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from __future__ import annotations
-
-from typing import TYPE_CHECKING, Any
-
-import numpy as np
-
-if TYPE_CHECKING:
- from contourpy._contourpy import CoordinateArray
-
-
-def simple(
- shape: tuple[int, int], want_mask: bool = False,
-) -> tuple[CoordinateArray, CoordinateArray, CoordinateArray | np.ma.MaskedArray[Any, Any]]:
- """Return simple test data consisting of the sum of two gaussians.
-
- Args:
- shape (tuple(int, int)): 2D shape of data to return.
- want_mask (bool, optional): Whether test data should be masked or not, default ``False``.
-
- Return:
- Tuple of 3 arrays: ``x``, ``y``, ``z`` test data, ``z`` will be masked if
- ``want_mask=True``.
- """
- ny, nx = shape
- x = np.arange(nx, dtype=np.float64)
- y = np.arange(ny, dtype=np.float64)
- x, y = np.meshgrid(x, y)
-
- xscale = nx - 1.0
- yscale = ny - 1.0
-
- # z is sum of 2D gaussians.
- amp = np.asarray([1.0, -1.0, 0.8, -0.9, 0.7])
- mid = np.asarray([[0.4, 0.2], [0.3, 0.8], [0.9, 0.75], [0.7, 0.3], [0.05, 0.7]])
- width = np.asarray([0.4, 0.2, 0.2, 0.2, 0.1])
-
- z = np.zeros_like(x)
- for i in range(len(amp)):
- z += amp[i]*np.exp(-((x/xscale - mid[i, 0])**2 + (y/yscale - mid[i, 1])**2) / width[i]**2)
-
- if want_mask:
- mask = np.logical_or(
- ((x/xscale - 1.0)**2 / 0.2 + (y/yscale - 0.0)**2 / 0.1) < 1.0,
- ((x/xscale - 0.2)**2 / 0.02 + (y/yscale - 0.45)**2 / 0.08) < 1.0,
- )
- z = np.ma.array(z, mask=mask) # type: ignore[no-untyped-call]
-
- return x, y, z
-
-
-def random(
- shape: tuple[int, int], seed: int = 2187, mask_fraction: float = 0.0,
-) -> tuple[CoordinateArray, CoordinateArray, CoordinateArray | np.ma.MaskedArray[Any, Any]]:
- """Return random test data..
-
- Args:
- shape (tuple(int, int)): 2D shape of data to return.
- seed (int, optional): Seed for random number generator, default 2187.
- mask_fraction (float, optional): Fraction of elements to mask, default 0.
-
- Return:
- Tuple of 3 arrays: ``x``, ``y``, ``z`` test data, ``z`` will be masked if
- ``mask_fraction`` is greater than zero.
- """
- ny, nx = shape
- x = np.arange(nx, dtype=np.float64)
- y = np.arange(ny, dtype=np.float64)
- x, y = np.meshgrid(x, y)
-
- rng = np.random.default_rng(seed)
- z = rng.uniform(size=shape)
-
- if mask_fraction > 0.0:
- mask_fraction = min(mask_fraction, 0.99)
- mask = rng.uniform(size=shape) < mask_fraction
- z = np.ma.array(z, mask=mask) # type: ignore[no-untyped-call]
-
- return x, y, z
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/duplicate_button.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/duplicate_button.py
deleted file mode 100644
index c6b8f486a9cf348a2a9a65da75970ea92b394087..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/duplicate_button.py
+++ /dev/null
@@ -1,79 +0,0 @@
-""" Predefined buttons with bound events that can be included in a gr.Blocks for convenience. """
-
-from __future__ import annotations
-
-from typing import Literal
-
-from gradio_client.documentation import document, set_documentation_group
-
-from gradio.components import Button
-from gradio.utils import get_space
-
-set_documentation_group("component")
-
-
-@document()
-class DuplicateButton(Button):
- """
- Button that triggers a Spaces Duplication, when the demo is on Hugging Face Spaces. Does nothing locally.
- Preprocessing: passes the button value as a {str} into the function
- Postprocessing: expects a {str} to be returned from a function, which is set as the label of the button
- """
-
- is_template = True
-
- def __init__(
- self,
- *,
- value: str = "Duplicate Space",
- variant: Literal["primary", "secondary", "stop"] = "secondary",
- size: Literal["sm", "lg"] | None = "sm",
- icon: str | None = None,
- link: str | None = None,
- visible: bool = True,
- interactive: bool = True,
- elem_id: str | None = None,
- elem_classes: list[str] | str | None = None,
- scale: int | None = 0,
- min_width: int | None = None,
- _activate: bool = True,
- **kwargs,
- ):
- """
- Parameters:
- value: Default text for the button to display. If callable, the function will be called whenever the app loads to set the initial value of the component.
- variant: 'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button.
- size: Size of the button. Can be "sm" or "lg".
- icon: URL or path to the icon file to display within the button. If None, no icon will be displayed.
- link: URL to open when the button is clicked. If None, no link will be used.
- visible: If False, component will be hidden.
- interactive: If False, the Button will be in a disabled state.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
- scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
- """
- super().__init__(
- value,
- variant=variant,
- size=size,
- icon=icon,
- link=link,
- visible=visible,
- interactive=interactive,
- elem_id=elem_id,
- elem_classes=elem_classes,
- scale=scale,
- min_width=min_width,
- **kwargs,
- )
- if _activate:
- self.activate()
-
- def activate(self):
- space_name = get_space()
- if space_name is not None:
- self.click(
- fn=None,
- _js=f"() => {{ window.open(`https://huggingface.co/spaces/{space_name}?duplicate=true`, '_blank') }}",
- )
diff --git a/spaces/declare-lab/tango/diffusers/utils/stale.py b/spaces/declare-lab/tango/diffusers/utils/stale.py
deleted file mode 100644
index 12932f31c243f44566fb65daf80b0b3637cc8a95..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/utils/stale.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright 2023 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Script to close stale issue. Taken in part from the AllenNLP repository.
-https://github.com/allenai/allennlp.
-"""
-import os
-from datetime import datetime as dt
-
-from github import Github
-
-
-LABELS_TO_EXEMPT = [
- "good first issue",
- "good second issue",
- "good difficult issue",
- "enhancement",
- "new pipeline/model",
- "new scheduler",
- "wip",
-]
-
-
-def main():
- g = Github(os.environ["GITHUB_TOKEN"])
- repo = g.get_repo("huggingface/diffusers")
- open_issues = repo.get_issues(state="open")
-
- for issue in open_issues:
- comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True)
- last_comment = comments[0] if len(comments) > 0 else None
- if (
- last_comment is not None
- and last_comment.user.login == "github-actions[bot]"
- and (dt.utcnow() - issue.updated_at).days > 7
- and (dt.utcnow() - issue.created_at).days >= 30
- and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
- ):
- # Closes the issue after 7 days of inactivity since the Stalebot notification.
- issue.edit(state="closed")
- elif (
- "stale" in issue.get_labels()
- and last_comment is not None
- and last_comment.user.login != "github-actions[bot]"
- ):
- # Opens the issue if someone other than Stalebot commented.
- issue.edit(state="open")
- issue.remove_from_labels("stale")
- elif (
- (dt.utcnow() - issue.updated_at).days > 23
- and (dt.utcnow() - issue.created_at).days >= 30
- and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
- ):
- # Post a Stalebot notification after 23 days of inactivity.
- issue.create_comment(
- "This issue has been automatically marked as stale because it has not had "
- "recent activity. If you think this still needs to be addressed "
- "please comment on this thread.\n\nPlease note that issues that do not follow the "
- "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
- "are likely to be ignored."
- )
- issue.add_to_labels("stale")
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/deepwisdom/MetaGPT/tests/metagpt/learn/test_google_search.py b/spaces/deepwisdom/MetaGPT/tests/metagpt/learn/test_google_search.py
deleted file mode 100644
index da32e8923e49df661ffdd24c22001682171e573b..0000000000000000000000000000000000000000
--- a/spaces/deepwisdom/MetaGPT/tests/metagpt/learn/test_google_search.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import asyncio
-
-from pydantic import BaseModel
-
-from metagpt.learn.google_search import google_search
-
-
-async def mock_google_search():
- class Input(BaseModel):
- input: str
-
- inputs = [{"input": "ai agent"}]
-
- for i in inputs:
- seed = Input(**i)
- result = await google_search(seed.input)
- assert result != ""
-
-
-def test_suite():
- loop = asyncio.get_event_loop()
- task = loop.create_task(mock_google_search())
- loop.run_until_complete(task)
-
-
-if __name__ == "__main__":
- test_suite()
diff --git a/spaces/descript/vampnet/vampnet/scheduler.py b/spaces/descript/vampnet/vampnet/scheduler.py
deleted file mode 100644
index a57108c2af7c974d882b45e092907195ded71c9a..0000000000000000000000000000000000000000
--- a/spaces/descript/vampnet/vampnet/scheduler.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import copy
-from typing import List
-
-import torch
-
-class NoamScheduler:
- """OG scheduler from transformer paper: https://arxiv.org/pdf/1706.03762.pdf
- Implementation from Annotated Transformer: https://nlp.seas.harvard.edu/2018/04/03/attention.html
- """
-
- def __init__(
- self,
- optimizer: torch.optim.Optimizer,
- d_model: int = 512,
- factor: float = 1.0,
- warmup: int = 4000,
- ):
- # Store hparams
- self.warmup = warmup
- self.factor = factor
- self.d_model = d_model
-
- # Initialize variables `lr` and `steps`
- self.lr = None
- self.steps = 0
-
- # Store the optimizer
- self.optimizer = optimizer
-
- def state_dict(self):
- return {
- key: value for key, value in self.__dict__.items() if key != "optimizer"
- }
-
- def load_state_dict(self, state_dict):
- self.__dict__.update(state_dict)
-
- def step(self):
- self.steps += 1
- self.lr = self.factor * (
- self.d_model ** (-0.5)
- * min(self.steps ** (-0.5), self.steps * self.warmup ** (-1.5))
- )
-
- for p in self.optimizer.param_groups:
- p["lr"] = self.lr
-
diff --git a/spaces/dfurman/chat-all-in/ideas.md b/spaces/dfurman/chat-all-in/ideas.md
deleted file mode 100644
index 16b39b9e50ab8d29f72b2873b733a231eeb4c7f3..0000000000000000000000000000000000000000
--- a/spaces/dfurman/chat-all-in/ideas.md
+++ /dev/null
@@ -1,6 +0,0 @@
-- programmatic data preprocessing via youtube api
-- slack frontend as alternative deployment - possible to use gradio api?
-- tiktok token counter for openai truncation
-- save conversations from chatgpt sessions, mix in full and smaller sections of context. Save in mpt format with the context. Then, fine-tune some open-source models on this dataset.
-- add back in "stop" button for generation?
-- add advanced params switches for model type and section retrieval type
diff --git "a/spaces/diacanFperku/AutoGPT/Another Way To Resize The Live Tv\302\240display.md" "b/spaces/diacanFperku/AutoGPT/Another Way To Resize The Live Tv\302\240display.md"
deleted file mode 100644
index 662190694dd23164c036324c41c5946723f925ea..0000000000000000000000000000000000000000
--- "a/spaces/diacanFperku/AutoGPT/Another Way To Resize The Live Tv\302\240display.md"
+++ /dev/null
@@ -1,30 +0,0 @@
-
-Another Way to Resize the Live TV Display
-If you are using a smart TV or a streaming device like Chromecast or Firestick, you may want to adjust the screen size and resolution of your live TV display to fit your preferences and TV capabilities. While most devices have their own settings menus that allow you to change these options, there is another way to resize the live TV display using your PC.
-By connecting your PC to your TV via an HDMI cable, you can use your PC's display settings to customize the screen size and resolution of your live TV display. This can be useful if you want to use your PC as a media center or if you want to have more control over the display quality. Here are the steps to follow:
-Another way to resize the live tv display
Download File ⇒ https://gohhs.com/2uFVkd
-
-- Connect your PC to your TV using an HDMI cable. Make sure both devices are turned on and set to the correct HDMI input.
-- On your PC, right-click on an empty space on your desktop and select "Display settings".
-- You should see two displays labeled "1" and "2". Display 1 is your PC monitor and display 2 is your TV. Click on display 2 to select it.
-- Under "Scale and layout", you can adjust the size of text, apps, and other items on your TV screen. You can also change the resolution and orientation of your TV display.
-- Under "Multiple displays", you can choose how you want to use your PC and TV displays. You can extend your desktop across both screens, duplicate your desktop on both screens, or show only on one screen.
-- Click "Apply" to save your changes and check how they look on your TV screen. You can also use the "Detect" and "Identify" buttons to troubleshoot any issues with your displays.
-
-By using this method, you can resize the live TV display according to your needs and preferences. You can also switch back to the device's settings menu if you want to change other options like sound, color, or game mode[^1^] [^2^].
-
-Some advantages of resizing the live TV display using your PC are:
-
-- You can use your PC as a media center and access your favorite streaming services, websites, and apps on your TV screen.
-- You can adjust the display quality to match your TV's capabilities and your viewing preferences.
-- You can use your PC's keyboard and mouse to navigate and control your live TV display.
-
-Some disadvantages of resizing the live TV display using your PC are:
-
-- You may need to buy an HDMI cable if you don't have one already.
-- You may experience some lag or delay between your PC and TV displays.
-- You may need to adjust the display settings every time you switch between different devices or inputs.
-
-In conclusion, resizing the live TV display using your PC is another way to customize your viewing experience and enjoy your favorite live TV shows and channels. However, it may also require some extra equipment and adjustments. You can try this method and see if it works for you or stick to the device's settings menu for simpler options.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Arma 3 Warhammer 40k Mod.md b/spaces/diacanFperku/AutoGPT/Arma 3 Warhammer 40k Mod.md
deleted file mode 100644
index 34414df1261e4f00dc12608e2f54358ee7ed40d8..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Arma 3 Warhammer 40k Mod.md
+++ /dev/null
@@ -1,16 +0,0 @@
-Arma 3 Warhammer 40k Mod
Download >>> https://gohhs.com/2uFVFR
-
-including Elite Marines - 7 Space Marine Regiments - 5 Chaos Space Marine Regiments - 2 Dark Eldar Regiments - 4 Black Templar - 6 Devastators - 6 Assault Terminators - 10 Terminators, 12 Stormtroopers, 18 Plague Marines, 18 Grave Guard, 28 Chaos Lords, 4 Dark Eldar, 1 Flesh King - New Planet: Mars New Project : Pure Nexus Expansion - Dedicated to the True I.G Fans - Arma 3 Steam Workshop for free! by GoldenNuke. Arma 3 Pure Nexus Team by GoldenNuke. Available on: Windows - Steam. For other platforms please search Steam Workshop for Arma 3 and Pure Nexus. or play.com. Visit the Community Page for more....
-
-Warhammer 40,000: Dawn of War II is a real-time strategy game developed by Relic Entertainment and released on August 16, 2010. The game received generally favourable reviews from critics, who praised the gameplay, game engine, user interface and audio, but criticised the singleplayer and multiplayer, and AI....
-
-The mod offers four factions playable in skirmish or multiplayer mode, the two Space Marine and two Chaos Space Marines, which includes new artwork and units, the custom campaign, a lot of new models and new vehicles. It also includes new weapons, new units and special abilities,...
-
-Battle for Nidal - The entire City including the Old quarter of Nidal, the new quarter of Nidal, the Nidal Bridge, the Shati Valley, the South Gate, the Terronix Port, the Terronix Town Hall and the Terronix Council are present with all their buildings and rooms. Various outposts in the Nidal have been...
-
-Alterwar – Warhammer 40,000 Campaign 2: Tzeentch’s Wrath (w/ SI) - The entire City including the Old quarter of Nidal, the new quarter of Nidal, the Nidal Bridge, the Shati Valley, the South Gate, the Terronix Port, the Terronix Town Hall and the Terronix Council are present with all their buildings and rooms. Various...
-
-PVP Deathmatch - (BIN - 12th Jan 2011) - The entire City including the Old quarter of Nidal, the new quarter of Nidal, the Nidal Bridge, the Shati Valley, the South Gate, the Terronix Port, the Terronix Town Hall and the Terronix Council are present with all their buildings and rooms. 4fefd39f24
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/CRACK AllMyNotes Organizer Deluxe 3.21 Build 873 Final.md b/spaces/diacanFperku/AutoGPT/CRACK AllMyNotes Organizer Deluxe 3.21 Build 873 Final.md
deleted file mode 100644
index 3f42d11051778dae38cefe5dc4cd0b4a3ae6793c..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/CRACK AllMyNotes Organizer Deluxe 3.21 Build 873 Final.md
+++ /dev/null
@@ -1,12 +0,0 @@
-CRACK AllMyNotes Organizer Deluxe 3.21 Build 873 Final
Download Zip · https://gohhs.com/2uFUHf
-
-Mar 20, 2564 BE - CyberLink Power2Go Platinum 14.0.14563.0 Pre-Cracked Young . CRACK AllMyNotes Organizer Deluxe 3.21 Build 873 Final ... CRACK AllMyNotes Organizer Pro 3.20 Final
-Download free WinAVI Video Converter Ultimate . Converter Ultimate 8.0.0 Build 2031 Rus.
-WinAVI Video Converter Ultimate .
-WinAVI Video
-WinAVI Video Converter Ultimate 8.2.0.1335 Portable 2013 Rus.
-WinAVI Video Converter Ultimate 8.1.0.1335 Portable 2013 Rus.
-WinAVI Video Converter Ultimate 7.2.0.1059 Portable + Rus. 8a78ff9644
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Cinder Book One In The Lunar Chronicles Pdf [HOT] Download.md b/spaces/diacanFperku/AutoGPT/Cinder Book One In The Lunar Chronicles Pdf [HOT] Download.md
deleted file mode 100644
index af5ff8ff3f4540d5107736638378784a0612f8bf..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Cinder Book One In The Lunar Chronicles Pdf [HOT] Download.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-chapter thirty-seven begins when cinder, pearl and peony are in the chancellor's castle, trying to find peony. when they do, the chancellor informs them that kai has been killed and that peony has been infected. they escape, but are soon captured and have their memories wiped. on their way to a lab, they see the chancellor's cyborgs fighting with cinder's cyborgs.
-cinder is now in her 40s, but still remains the same innocent girl that everyone knew when she was a teenager. she is convinced that her friend peony is dead and wants to feel her dead body to confirm it. on the way to the hovercraft, she sees kai coming towards her. she hides, but he spots her. kai tells cinder that peony is alive and comes back with her to take her to her. cinder takes her back to the hovercraft but the earthen authorities find them. when cinder is forced to escape on foot, she feels restless. here is a girl who grew up in the palace, she has no clue how to live without servants and servants to obey. peony is the only one she can depend on and she is her only friend. without any answers, she uses her abilities and manipulates kai and the earthen government to help her. and together they stand to save the world.
-cinder book one in the lunar chronicles pdf download
DOWNLOAD ✫ https://gohhs.com/2uFT9I
-cinder returns home to her clunking robot, nainsi, who tells her the queen has ordered her to obtain the lunar chronicles. after she leaves for her assignment, nainsi falls ill and tells cinder that she has not been programming her circuits properly. cinder returns to the palace, and after nainsi repairs, she rushes to the library. she overhears levana declaring that she is going to marry kai and makes her way to the royal throne room. on the way, she is attacked by several guards, and one of the men is killed. she makes it into the throne room just as kai and levana are about to exchange vows. by the time kai turns to look at her, she has already been taken.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Comsol Multiphysics Free Download Cracked Softwaresinstmankl.md b/spaces/diacanFperku/AutoGPT/Comsol Multiphysics Free Download Cracked Softwaresinstmankl.md
deleted file mode 100644
index 0b4fd90a17c8fe9dd1b847ad47f17cda5e5affff..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Comsol Multiphysics Free Download Cracked Softwaresinstmankl.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Comsol Multiphysics Free Download Cracked Softwaresinstmankl
DOWNLOAD ->->->-> https://gohhs.com/2uFTYm
-
- d5da3c52bf
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Jurm Movie [TOP] Download 720p In Hindi.md b/spaces/diacanFperku/AutoGPT/Jurm Movie [TOP] Download 720p In Hindi.md
deleted file mode 100644
index 829c6497a150e27d8e490ea0d27b6e2e2907a794..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Jurm Movie [TOP] Download 720p In Hindi.md
+++ /dev/null
@@ -1,34 +0,0 @@
-Jurm movie download 720p in hindi
Download File ★★★ https://gohhs.com/2uFUIh
-
-Download Jurm movie online now.
-
-Ludaar (2018) Watch Jurm full movie online in HD. Enjoy Jurm starring Bipasha Basu, Vivek Oberoi, Jimmy Shergill, Geetika Verma, and directed by Atul Sabharwal . Download Jurm movie online now.
-
-Jurm download movie
-
-Official Jurm movie site: Watch Jurm Full Movie 2018, Download Jurm Full Movie 2018 Online, Watch Jurm 2018 Full Movie Online, Watch Jurm Full Movie Online, Watch Jurm Online, Download Jurm, Watch Jurm 2018, Watch Jurm Full Movie, Jurm movie
-
-Poster of Jurm:
-
-IMDB Link:
-
-Jurm watch full movie online
-
-Watch Jurm Full Movie 2018, Download Jurm Full Movie 2018 Online, Watch Jurm 2018 Full Movie Online, Watch Jurm 2018 Full Movie Online, Watch Jurm 2018 Full Movie Online, Watch Jurm Online, Download Jurm, Watch Jurm 2018, Watch Jurm Full Movie, Jurm movie
-
-Watch Jurm full movie online in HD. Enjoy Jurm starring Vivek Shauq, Milind Soman, Salim Kaskar, and directed by Vikram . Download Jurm movie online now.
-
-Hindi full movie download Jurm
-
-Watch Jurm Full Movie 2018, Download Jurm Full Movie 2018 Online, Watch Jurm 2018 Full Movie Online, Watch Jurm 2018 Full Movie Online, Watch Jurm 2018 Full Movie Online, Watch Jurm 2018 Full Movie Online, Watch Jurm Online, Download Jurm, Watch Jurm 2018, Watch Jurm Full Movie, Jurm movie
-
-Watch Jurm full movie online in HD. Enjoy Jurm starring Vivek Shauq, Pragati , and directed by Nikhil . Download Jurm movie online now.
-
-Watch Jurm full movie online in HD. Enjoy Jurm starring Girish Karnad, Kavi Kishore, and directed by Anupam Roy . Download Jurm movie online now.
-
-Jurm watch full movie online in HD. Enjoy Jurm starring Nana Patekar, Mukul Dev, and directed by Atul Sabharwal . Download Jurm movie online now.
-
-Jurm watch full movie online in HD. Enjoy Jurm starring Rishi Kapoor 4fefd39f24
-
-
-
diff --git a/spaces/digitalxingtong/Azusa-Bert-VITS2/monotonic_align/core.c b/spaces/digitalxingtong/Azusa-Bert-VITS2/monotonic_align/core.c
deleted file mode 100644
index 5f8af54d32474f821e9d1f4d2679d78128722596..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Azusa-Bert-VITS2/monotonic_align/core.c
+++ /dev/null
@@ -1,26530 +0,0 @@
-/* Generated by Cython 3.0.0 */
-
-/* BEGIN: Cython Metadata
-{
- "distutils": {
- "name": "monotonic_align.core",
- "sources": [
- "core.pyx"
- ]
- },
- "module_name": "monotonic_align.core"
-}
-END: Cython Metadata */
-
-#ifndef PY_SSIZE_T_CLEAN
-#define PY_SSIZE_T_CLEAN
-#endif /* PY_SSIZE_T_CLEAN */
-#if defined(CYTHON_LIMITED_API) && 0
- #ifndef Py_LIMITED_API
- #if CYTHON_LIMITED_API+0 > 0x03030000
- #define Py_LIMITED_API CYTHON_LIMITED_API
- #else
- #define Py_LIMITED_API 0x03030000
- #endif
- #endif
-#endif
-
-#include "Python.h"
-#ifndef Py_PYTHON_H
- #error Python headers needed to compile C extensions, please install development version of Python.
-#elif PY_VERSION_HEX < 0x02070000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
- #error Cython requires Python 2.7+ or Python 3.3+.
-#else
-#define CYTHON_ABI "3_0_0"
-#define __PYX_ABI_MODULE_NAME "_cython_" CYTHON_ABI
-#define __PYX_TYPE_MODULE_PREFIX __PYX_ABI_MODULE_NAME "."
-#define CYTHON_HEX_VERSION 0x030000F0
-#define CYTHON_FUTURE_DIVISION 1
-#include
-#ifndef offsetof
- #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
-#endif
-#if !defined(_WIN32) && !defined(WIN32) && !defined(MS_WINDOWS)
- #ifndef __stdcall
- #define __stdcall
- #endif
- #ifndef __cdecl
- #define __cdecl
- #endif
- #ifndef __fastcall
- #define __fastcall
- #endif
-#endif
-#ifndef DL_IMPORT
- #define DL_IMPORT(t) t
-#endif
-#ifndef DL_EXPORT
- #define DL_EXPORT(t) t
-#endif
-#define __PYX_COMMA ,
-#ifndef HAVE_LONG_LONG
- #define HAVE_LONG_LONG
-#endif
-#ifndef PY_LONG_LONG
- #define PY_LONG_LONG LONG_LONG
-#endif
-#ifndef Py_HUGE_VAL
- #define Py_HUGE_VAL HUGE_VAL
-#endif
-#if defined(GRAALVM_PYTHON)
- /* For very preliminary testing purposes. Most variables are set the same as PyPy.
- The existence of this section does not imply that anything works or is even tested */
- #define CYTHON_COMPILING_IN_PYPY 0
- #define CYTHON_COMPILING_IN_CPYTHON 0
- #define CYTHON_COMPILING_IN_LIMITED_API 0
- #define CYTHON_COMPILING_IN_GRAAL 1
- #define CYTHON_COMPILING_IN_NOGIL 0
- #undef CYTHON_USE_TYPE_SLOTS
- #define CYTHON_USE_TYPE_SLOTS 0
- #undef CYTHON_USE_TYPE_SPECS
- #define CYTHON_USE_TYPE_SPECS 0
- #undef CYTHON_USE_PYTYPE_LOOKUP
- #define CYTHON_USE_PYTYPE_LOOKUP 0
- #if PY_VERSION_HEX < 0x03050000
- #undef CYTHON_USE_ASYNC_SLOTS
- #define CYTHON_USE_ASYNC_SLOTS 0
- #elif !defined(CYTHON_USE_ASYNC_SLOTS)
- #define CYTHON_USE_ASYNC_SLOTS 1
- #endif
- #undef CYTHON_USE_PYLIST_INTERNALS
- #define CYTHON_USE_PYLIST_INTERNALS 0
- #undef CYTHON_USE_UNICODE_INTERNALS
- #define CYTHON_USE_UNICODE_INTERNALS 0
- #undef CYTHON_USE_UNICODE_WRITER
- #define CYTHON_USE_UNICODE_WRITER 0
- #undef CYTHON_USE_PYLONG_INTERNALS
- #define CYTHON_USE_PYLONG_INTERNALS 0
- #undef CYTHON_AVOID_BORROWED_REFS
- #define CYTHON_AVOID_BORROWED_REFS 1
- #undef CYTHON_ASSUME_SAFE_MACROS
- #define CYTHON_ASSUME_SAFE_MACROS 0
- #undef CYTHON_UNPACK_METHODS
- #define CYTHON_UNPACK_METHODS 0
- #undef CYTHON_FAST_THREAD_STATE
- #define CYTHON_FAST_THREAD_STATE 0
- #undef CYTHON_FAST_GIL
- #define CYTHON_FAST_GIL 0
- #undef CYTHON_METH_FASTCALL
- #define CYTHON_METH_FASTCALL 0
- #undef CYTHON_FAST_PYCALL
- #define CYTHON_FAST_PYCALL 0
- #ifndef CYTHON_PEP487_INIT_SUBCLASS
- #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3)
- #endif
- #undef CYTHON_PEP489_MULTI_PHASE_INIT
- #define CYTHON_PEP489_MULTI_PHASE_INIT 1
- #undef CYTHON_USE_MODULE_STATE
- #define CYTHON_USE_MODULE_STATE 0
- #undef CYTHON_USE_TP_FINALIZE
- #define CYTHON_USE_TP_FINALIZE 0
- #undef CYTHON_USE_DICT_VERSIONS
- #define CYTHON_USE_DICT_VERSIONS 0
- #undef CYTHON_USE_EXC_INFO_STACK
- #define CYTHON_USE_EXC_INFO_STACK 0
- #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
- #define CYTHON_UPDATE_DESCRIPTOR_DOC 0
- #endif
-#elif defined(PYPY_VERSION)
- #define CYTHON_COMPILING_IN_PYPY 1
- #define CYTHON_COMPILING_IN_CPYTHON 0
- #define CYTHON_COMPILING_IN_LIMITED_API 0
- #define CYTHON_COMPILING_IN_GRAAL 0
- #define CYTHON_COMPILING_IN_NOGIL 0
- #undef CYTHON_USE_TYPE_SLOTS
- #define CYTHON_USE_TYPE_SLOTS 0
- #undef CYTHON_USE_TYPE_SPECS
- #define CYTHON_USE_TYPE_SPECS 0
- #undef CYTHON_USE_PYTYPE_LOOKUP
- #define CYTHON_USE_PYTYPE_LOOKUP 0
- #if PY_VERSION_HEX < 0x03050000
- #undef CYTHON_USE_ASYNC_SLOTS
- #define CYTHON_USE_ASYNC_SLOTS 0
- #elif !defined(CYTHON_USE_ASYNC_SLOTS)
- #define CYTHON_USE_ASYNC_SLOTS 1
- #endif
- #undef CYTHON_USE_PYLIST_INTERNALS
- #define CYTHON_USE_PYLIST_INTERNALS 0
- #undef CYTHON_USE_UNICODE_INTERNALS
- #define CYTHON_USE_UNICODE_INTERNALS 0
- #undef CYTHON_USE_UNICODE_WRITER
- #define CYTHON_USE_UNICODE_WRITER 0
- #undef CYTHON_USE_PYLONG_INTERNALS
- #define CYTHON_USE_PYLONG_INTERNALS 0
- #undef CYTHON_AVOID_BORROWED_REFS
- #define CYTHON_AVOID_BORROWED_REFS 1
- #undef CYTHON_ASSUME_SAFE_MACROS
- #define CYTHON_ASSUME_SAFE_MACROS 0
- #undef CYTHON_UNPACK_METHODS
- #define CYTHON_UNPACK_METHODS 0
- #undef CYTHON_FAST_THREAD_STATE
- #define CYTHON_FAST_THREAD_STATE 0
- #undef CYTHON_FAST_GIL
- #define CYTHON_FAST_GIL 0
- #undef CYTHON_METH_FASTCALL
- #define CYTHON_METH_FASTCALL 0
- #undef CYTHON_FAST_PYCALL
- #define CYTHON_FAST_PYCALL 0
- #ifndef CYTHON_PEP487_INIT_SUBCLASS
- #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3)
- #endif
- #if PY_VERSION_HEX < 0x03090000
- #undef CYTHON_PEP489_MULTI_PHASE_INIT
- #define CYTHON_PEP489_MULTI_PHASE_INIT 0
- #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT)
- #define CYTHON_PEP489_MULTI_PHASE_INIT 1
- #endif
- #undef CYTHON_USE_MODULE_STATE
- #define CYTHON_USE_MODULE_STATE 0
- #undef CYTHON_USE_TP_FINALIZE
- #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1 && PYPY_VERSION_NUM >= 0x07030C00)
- #undef CYTHON_USE_DICT_VERSIONS
- #define CYTHON_USE_DICT_VERSIONS 0
- #undef CYTHON_USE_EXC_INFO_STACK
- #define CYTHON_USE_EXC_INFO_STACK 0
- #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
- #define CYTHON_UPDATE_DESCRIPTOR_DOC 0
- #endif
-#elif defined(CYTHON_LIMITED_API)
- #define CYTHON_COMPILING_IN_PYPY 0
- #define CYTHON_COMPILING_IN_CPYTHON 0
- #define CYTHON_COMPILING_IN_LIMITED_API 1
- #define CYTHON_COMPILING_IN_GRAAL 0
- #define CYTHON_COMPILING_IN_NOGIL 0
- #undef CYTHON_CLINE_IN_TRACEBACK
- #define CYTHON_CLINE_IN_TRACEBACK 0
- #undef CYTHON_USE_TYPE_SLOTS
- #define CYTHON_USE_TYPE_SLOTS 0
- #undef CYTHON_USE_TYPE_SPECS
- #define CYTHON_USE_TYPE_SPECS 1
- #undef CYTHON_USE_PYTYPE_LOOKUP
- #define CYTHON_USE_PYTYPE_LOOKUP 0
- #undef CYTHON_USE_ASYNC_SLOTS
- #define CYTHON_USE_ASYNC_SLOTS 0
- #undef CYTHON_USE_PYLIST_INTERNALS
- #define CYTHON_USE_PYLIST_INTERNALS 0
- #undef CYTHON_USE_UNICODE_INTERNALS
- #define CYTHON_USE_UNICODE_INTERNALS 0
- #ifndef CYTHON_USE_UNICODE_WRITER
- #define CYTHON_USE_UNICODE_WRITER 0
- #endif
- #undef CYTHON_USE_PYLONG_INTERNALS
- #define CYTHON_USE_PYLONG_INTERNALS 0
- #ifndef CYTHON_AVOID_BORROWED_REFS
- #define CYTHON_AVOID_BORROWED_REFS 0
- #endif
- #undef CYTHON_ASSUME_SAFE_MACROS
- #define CYTHON_ASSUME_SAFE_MACROS 0
- #undef CYTHON_UNPACK_METHODS
- #define CYTHON_UNPACK_METHODS 0
- #undef CYTHON_FAST_THREAD_STATE
- #define CYTHON_FAST_THREAD_STATE 0
- #undef CYTHON_FAST_GIL
- #define CYTHON_FAST_GIL 0
- #undef CYTHON_METH_FASTCALL
- #define CYTHON_METH_FASTCALL 0
- #undef CYTHON_FAST_PYCALL
- #define CYTHON_FAST_PYCALL 0
- #ifndef CYTHON_PEP487_INIT_SUBCLASS
- #define CYTHON_PEP487_INIT_SUBCLASS 1
- #endif
- #undef CYTHON_PEP489_MULTI_PHASE_INIT
- #define CYTHON_PEP489_MULTI_PHASE_INIT 0
- #undef CYTHON_USE_MODULE_STATE
- #define CYTHON_USE_MODULE_STATE 1
- #ifndef CYTHON_USE_TP_FINALIZE
- #define CYTHON_USE_TP_FINALIZE 1
- #endif
- #undef CYTHON_USE_DICT_VERSIONS
- #define CYTHON_USE_DICT_VERSIONS 0
- #undef CYTHON_USE_EXC_INFO_STACK
- #define CYTHON_USE_EXC_INFO_STACK 0
- #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
- #define CYTHON_UPDATE_DESCRIPTOR_DOC 0
- #endif
-#elif defined(PY_NOGIL)
- #define CYTHON_COMPILING_IN_PYPY 0
- #define CYTHON_COMPILING_IN_CPYTHON 0
- #define CYTHON_COMPILING_IN_LIMITED_API 0
- #define CYTHON_COMPILING_IN_GRAAL 0
- #define CYTHON_COMPILING_IN_NOGIL 1
- #ifndef CYTHON_USE_TYPE_SLOTS
- #define CYTHON_USE_TYPE_SLOTS 1
- #endif
- #undef CYTHON_USE_PYTYPE_LOOKUP
- #define CYTHON_USE_PYTYPE_LOOKUP 0
- #ifndef CYTHON_USE_ASYNC_SLOTS
- #define CYTHON_USE_ASYNC_SLOTS 1
- #endif
- #undef CYTHON_USE_PYLIST_INTERNALS
- #define CYTHON_USE_PYLIST_INTERNALS 0
- #ifndef CYTHON_USE_UNICODE_INTERNALS
- #define CYTHON_USE_UNICODE_INTERNALS 1
- #endif
- #undef CYTHON_USE_UNICODE_WRITER
- #define CYTHON_USE_UNICODE_WRITER 0
- #undef CYTHON_USE_PYLONG_INTERNALS
- #define CYTHON_USE_PYLONG_INTERNALS 0
- #ifndef CYTHON_AVOID_BORROWED_REFS
- #define CYTHON_AVOID_BORROWED_REFS 0
- #endif
- #ifndef CYTHON_ASSUME_SAFE_MACROS
- #define CYTHON_ASSUME_SAFE_MACROS 1
- #endif
- #ifndef CYTHON_UNPACK_METHODS
- #define CYTHON_UNPACK_METHODS 1
- #endif
- #undef CYTHON_FAST_THREAD_STATE
- #define CYTHON_FAST_THREAD_STATE 0
- #undef CYTHON_FAST_PYCALL
- #define CYTHON_FAST_PYCALL 0
- #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
- #define CYTHON_PEP489_MULTI_PHASE_INIT 1
- #endif
- #ifndef CYTHON_USE_TP_FINALIZE
- #define CYTHON_USE_TP_FINALIZE 1
- #endif
- #undef CYTHON_USE_DICT_VERSIONS
- #define CYTHON_USE_DICT_VERSIONS 0
- #undef CYTHON_USE_EXC_INFO_STACK
- #define CYTHON_USE_EXC_INFO_STACK 0
-#else
- #define CYTHON_COMPILING_IN_PYPY 0
- #define CYTHON_COMPILING_IN_CPYTHON 1
- #define CYTHON_COMPILING_IN_LIMITED_API 0
- #define CYTHON_COMPILING_IN_GRAAL 0
- #define CYTHON_COMPILING_IN_NOGIL 0
- #ifndef CYTHON_USE_TYPE_SLOTS
- #define CYTHON_USE_TYPE_SLOTS 1
- #endif
- #ifndef CYTHON_USE_TYPE_SPECS
- #define CYTHON_USE_TYPE_SPECS 0
- #endif
- #ifndef CYTHON_USE_PYTYPE_LOOKUP
- #define CYTHON_USE_PYTYPE_LOOKUP 1
- #endif
- #if PY_MAJOR_VERSION < 3
- #undef CYTHON_USE_ASYNC_SLOTS
- #define CYTHON_USE_ASYNC_SLOTS 0
- #elif !defined(CYTHON_USE_ASYNC_SLOTS)
- #define CYTHON_USE_ASYNC_SLOTS 1
- #endif
- #ifndef CYTHON_USE_PYLONG_INTERNALS
- #define CYTHON_USE_PYLONG_INTERNALS 1
- #endif
- #ifndef CYTHON_USE_PYLIST_INTERNALS
- #define CYTHON_USE_PYLIST_INTERNALS 1
- #endif
- #ifndef CYTHON_USE_UNICODE_INTERNALS
- #define CYTHON_USE_UNICODE_INTERNALS 1
- #endif
- #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2
- #undef CYTHON_USE_UNICODE_WRITER
- #define CYTHON_USE_UNICODE_WRITER 0
- #elif !defined(CYTHON_USE_UNICODE_WRITER)
- #define CYTHON_USE_UNICODE_WRITER 1
- #endif
- #ifndef CYTHON_AVOID_BORROWED_REFS
- #define CYTHON_AVOID_BORROWED_REFS 0
- #endif
- #ifndef CYTHON_ASSUME_SAFE_MACROS
- #define CYTHON_ASSUME_SAFE_MACROS 1
- #endif
- #ifndef CYTHON_UNPACK_METHODS
- #define CYTHON_UNPACK_METHODS 1
- #endif
- #ifndef CYTHON_FAST_THREAD_STATE
- #define CYTHON_FAST_THREAD_STATE 1
- #endif
- #ifndef CYTHON_FAST_GIL
- #define CYTHON_FAST_GIL (PY_MAJOR_VERSION < 3 || PY_VERSION_HEX >= 0x03060000 && PY_VERSION_HEX < 0x030C00A6)
- #endif
- #ifndef CYTHON_METH_FASTCALL
- #define CYTHON_METH_FASTCALL (PY_VERSION_HEX >= 0x030700A1)
- #endif
- #ifndef CYTHON_FAST_PYCALL
- #define CYTHON_FAST_PYCALL 1
- #endif
- #ifndef CYTHON_PEP487_INIT_SUBCLASS
- #define CYTHON_PEP487_INIT_SUBCLASS 1
- #endif
- #if PY_VERSION_HEX < 0x03050000
- #undef CYTHON_PEP489_MULTI_PHASE_INIT
- #define CYTHON_PEP489_MULTI_PHASE_INIT 0
- #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT)
- #define CYTHON_PEP489_MULTI_PHASE_INIT 1
- #endif
- #ifndef CYTHON_USE_MODULE_STATE
- #define CYTHON_USE_MODULE_STATE 0
- #endif
- #if PY_VERSION_HEX < 0x030400a1
- #undef CYTHON_USE_TP_FINALIZE
- #define CYTHON_USE_TP_FINALIZE 0
- #elif !defined(CYTHON_USE_TP_FINALIZE)
- #define CYTHON_USE_TP_FINALIZE 1
- #endif
- #if PY_VERSION_HEX < 0x030600B1
- #undef CYTHON_USE_DICT_VERSIONS
- #define CYTHON_USE_DICT_VERSIONS 0
- #elif !defined(CYTHON_USE_DICT_VERSIONS)
- #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX < 0x030C00A5)
- #endif
- #if PY_VERSION_HEX < 0x030700A3
- #undef CYTHON_USE_EXC_INFO_STACK
- #define CYTHON_USE_EXC_INFO_STACK 0
- #elif !defined(CYTHON_USE_EXC_INFO_STACK)
- #define CYTHON_USE_EXC_INFO_STACK 1
- #endif
- #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
- #define CYTHON_UPDATE_DESCRIPTOR_DOC 1
- #endif
-#endif
-#if !defined(CYTHON_FAST_PYCCALL)
-#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
-#endif
-#if !defined(CYTHON_VECTORCALL)
-#define CYTHON_VECTORCALL (CYTHON_FAST_PYCCALL && PY_VERSION_HEX >= 0x030800B1)
-#endif
-#define CYTHON_BACKPORT_VECTORCALL (CYTHON_METH_FASTCALL && PY_VERSION_HEX < 0x030800B1)
-#if CYTHON_USE_PYLONG_INTERNALS
- #if PY_MAJOR_VERSION < 3
- #include "longintrepr.h"
- #endif
- #undef SHIFT
- #undef BASE
- #undef MASK
- #ifdef SIZEOF_VOID_P
- enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
- #endif
-#endif
-#ifndef __has_attribute
- #define __has_attribute(x) 0
-#endif
-#ifndef __has_cpp_attribute
- #define __has_cpp_attribute(x) 0
-#endif
-#ifndef CYTHON_RESTRICT
- #if defined(__GNUC__)
- #define CYTHON_RESTRICT __restrict__
- #elif defined(_MSC_VER) && _MSC_VER >= 1400
- #define CYTHON_RESTRICT __restrict
- #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
- #define CYTHON_RESTRICT restrict
- #else
- #define CYTHON_RESTRICT
- #endif
-#endif
-#ifndef CYTHON_UNUSED
- #if defined(__cplusplus)
- /* for clang __has_cpp_attribute(maybe_unused) is true even before C++17
- * but leads to warnings with -pedantic, since it is a C++17 feature */
- #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L)
- #if __has_cpp_attribute(maybe_unused)
- #define CYTHON_UNUSED [[maybe_unused]]
- #endif
- #endif
- #endif
-#endif
-#ifndef CYTHON_UNUSED
-# if defined(__GNUC__)
-# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
-# define CYTHON_UNUSED __attribute__ ((__unused__))
-# else
-# define CYTHON_UNUSED
-# endif
-# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
-# define CYTHON_UNUSED __attribute__ ((__unused__))
-# else
-# define CYTHON_UNUSED
-# endif
-#endif
-#ifndef CYTHON_UNUSED_VAR
-# if defined(__cplusplus)
- template void CYTHON_UNUSED_VAR( const T& ) { }
-# else
-# define CYTHON_UNUSED_VAR(x) (void)(x)
-# endif
-#endif
-#ifndef CYTHON_MAYBE_UNUSED_VAR
- #define CYTHON_MAYBE_UNUSED_VAR(x) CYTHON_UNUSED_VAR(x)
-#endif
-#ifndef CYTHON_NCP_UNUSED
-# if CYTHON_COMPILING_IN_CPYTHON
-# define CYTHON_NCP_UNUSED
-# else
-# define CYTHON_NCP_UNUSED CYTHON_UNUSED
-# endif
-#endif
-#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
-#ifdef _MSC_VER
- #ifndef _MSC_STDINT_H_
- #if _MSC_VER < 1300
- typedef unsigned char uint8_t;
- typedef unsigned short uint16_t;
- typedef unsigned int uint32_t;
- #else
- typedef unsigned __int8 uint8_t;
- typedef unsigned __int16 uint16_t;
- typedef unsigned __int32 uint32_t;
- #endif
- #endif
- #if _MSC_VER < 1300
- #ifdef _WIN64
- typedef unsigned long long __pyx_uintptr_t;
- #else
- typedef unsigned int __pyx_uintptr_t;
- #endif
- #else
- #ifdef _WIN64
- typedef unsigned __int64 __pyx_uintptr_t;
- #else
- typedef unsigned __int32 __pyx_uintptr_t;
- #endif
- #endif
-#else
- #include
- typedef uintptr_t __pyx_uintptr_t;
-#endif
-#ifndef CYTHON_FALLTHROUGH
- #if defined(__cplusplus)
- /* for clang __has_cpp_attribute(fallthrough) is true even before C++17
- * but leads to warnings with -pedantic, since it is a C++17 feature */
- #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L)
- #if __has_cpp_attribute(fallthrough)
- #define CYTHON_FALLTHROUGH [[fallthrough]]
- #endif
- #endif
- #ifndef CYTHON_FALLTHROUGH
- #if __has_cpp_attribute(clang::fallthrough)
- #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
- #elif __has_cpp_attribute(gnu::fallthrough)
- #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
- #endif
- #endif
- #endif
- #ifndef CYTHON_FALLTHROUGH
- #if __has_attribute(fallthrough)
- #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
- #else
- #define CYTHON_FALLTHROUGH
- #endif
- #endif
- #if defined(__clang__) && defined(__apple_build_version__)
- #if __apple_build_version__ < 7000000
- #undef CYTHON_FALLTHROUGH
- #define CYTHON_FALLTHROUGH
- #endif
- #endif
-#endif
-#ifdef __cplusplus
- template
- struct __PYX_IS_UNSIGNED_IMPL {static const bool value = T(0) < T(-1);};
- #define __PYX_IS_UNSIGNED(type) (__PYX_IS_UNSIGNED_IMPL::value)
-#else
- #define __PYX_IS_UNSIGNED(type) (((type)-1) > 0)
-#endif
-#if CYTHON_COMPILING_IN_PYPY == 1
- #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x030A0000)
-#else
- #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000)
-#endif
-#define __PYX_REINTERPRET_FUNCION(func_pointer, other_pointer) ((func_pointer)(void(*)(void))(other_pointer))
-
-#ifndef CYTHON_INLINE
- #if defined(__clang__)
- #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
- #elif defined(__GNUC__)
- #define CYTHON_INLINE __inline__
- #elif defined(_MSC_VER)
- #define CYTHON_INLINE __inline
- #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
- #define CYTHON_INLINE inline
- #else
- #define CYTHON_INLINE
- #endif
-#endif
-
-#define __PYX_BUILD_PY_SSIZE_T "n"
-#define CYTHON_FORMAT_SSIZE_T "z"
-#if PY_MAJOR_VERSION < 3
- #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
- #define __Pyx_DefaultClassType PyClass_Type
- #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
- PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
-#else
- #define __Pyx_BUILTIN_MODULE_NAME "builtins"
- #define __Pyx_DefaultClassType PyType_Type
-#if PY_VERSION_HEX >= 0x030B00A1
- static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f,
- PyObject *code, PyObject *c, PyObject* n, PyObject *v,
- PyObject *fv, PyObject *cell, PyObject* fn,
- PyObject *name, int fline, PyObject *lnos) {
- PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL;
- PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *empty=NULL;
- const char *fn_cstr=NULL;
- const char *name_cstr=NULL;
- PyCodeObject *co=NULL, *result=NULL;
- PyObject *type, *value, *traceback;
- PyErr_Fetch(&type, &value, &traceback);
- if (!(kwds=PyDict_New())) goto end;
- if (!(argcount=PyLong_FromLong(a))) goto end;
- if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end;
- if (!(posonlyargcount=PyLong_FromLong(p))) goto end;
- if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end;
- if (!(kwonlyargcount=PyLong_FromLong(k))) goto end;
- if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end;
- if (!(nlocals=PyLong_FromLong(l))) goto end;
- if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end;
- if (!(stacksize=PyLong_FromLong(s))) goto end;
- if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end;
- if (!(flags=PyLong_FromLong(f))) goto end;
- if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end;
- if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end;
- if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end;
- if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end;
- if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto end;
- if (!(empty = PyTuple_New(0))) goto end;
- result = (PyCodeObject*) PyObject_Call(replace, empty, kwds);
- end:
- Py_XDECREF((PyObject*) co);
- Py_XDECREF(kwds);
- Py_XDECREF(argcount);
- Py_XDECREF(posonlyargcount);
- Py_XDECREF(kwonlyargcount);
- Py_XDECREF(nlocals);
- Py_XDECREF(stacksize);
- Py_XDECREF(replace);
- Py_XDECREF(empty);
- if (type) {
- PyErr_Restore(type, value, traceback);
- }
- return result;
- }
-#elif PY_VERSION_HEX >= 0x030800B2 && !CYTHON_COMPILING_IN_PYPY
- #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
- PyCode_NewWithPosOnlyArgs(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
-#else
- #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
- PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
-#endif
-#endif
-#if PY_VERSION_HEX >= 0x030900A4 || defined(Py_IS_TYPE)
- #define __Pyx_IS_TYPE(ob, type) Py_IS_TYPE(ob, type)
-#else
- #define __Pyx_IS_TYPE(ob, type) (((const PyObject*)ob)->ob_type == (type))
-#endif
-#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_Is)
- #define __Pyx_Py_Is(x, y) Py_Is(x, y)
-#else
- #define __Pyx_Py_Is(x, y) ((x) == (y))
-#endif
-#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsNone)
- #define __Pyx_Py_IsNone(ob) Py_IsNone(ob)
-#else
- #define __Pyx_Py_IsNone(ob) __Pyx_Py_Is((ob), Py_None)
-#endif
-#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsTrue)
- #define __Pyx_Py_IsTrue(ob) Py_IsTrue(ob)
-#else
- #define __Pyx_Py_IsTrue(ob) __Pyx_Py_Is((ob), Py_True)
-#endif
-#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsFalse)
- #define __Pyx_Py_IsFalse(ob) Py_IsFalse(ob)
-#else
- #define __Pyx_Py_IsFalse(ob) __Pyx_Py_Is((ob), Py_False)
-#endif
-#define __Pyx_NoneAsNull(obj) (__Pyx_Py_IsNone(obj) ? NULL : (obj))
-#if PY_VERSION_HEX >= 0x030900F0 && !CYTHON_COMPILING_IN_PYPY
- #define __Pyx_PyObject_GC_IsFinalized(o) PyObject_GC_IsFinalized(o)
-#else
- #define __Pyx_PyObject_GC_IsFinalized(o) _PyGC_FINALIZED(o)
-#endif
-#ifndef CO_COROUTINE
- #define CO_COROUTINE 0x80
-#endif
-#ifndef CO_ASYNC_GENERATOR
- #define CO_ASYNC_GENERATOR 0x200
-#endif
-#ifndef Py_TPFLAGS_CHECKTYPES
- #define Py_TPFLAGS_CHECKTYPES 0
-#endif
-#ifndef Py_TPFLAGS_HAVE_INDEX
- #define Py_TPFLAGS_HAVE_INDEX 0
-#endif
-#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
- #define Py_TPFLAGS_HAVE_NEWBUFFER 0
-#endif
-#ifndef Py_TPFLAGS_HAVE_FINALIZE
- #define Py_TPFLAGS_HAVE_FINALIZE 0
-#endif
-#ifndef Py_TPFLAGS_SEQUENCE
- #define Py_TPFLAGS_SEQUENCE 0
-#endif
-#ifndef Py_TPFLAGS_MAPPING
- #define Py_TPFLAGS_MAPPING 0
-#endif
-#ifndef METH_STACKLESS
- #define METH_STACKLESS 0
-#endif
-#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
- #ifndef METH_FASTCALL
- #define METH_FASTCALL 0x80
- #endif
- typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
- typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
- Py_ssize_t nargs, PyObject *kwnames);
-#else
- #define __Pyx_PyCFunctionFast _PyCFunctionFast
- #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
-#endif
-#if CYTHON_METH_FASTCALL
- #define __Pyx_METH_FASTCALL METH_FASTCALL
- #define __Pyx_PyCFunction_FastCall __Pyx_PyCFunctionFast
- #define __Pyx_PyCFunction_FastCallWithKeywords __Pyx_PyCFunctionFastWithKeywords
-#else
- #define __Pyx_METH_FASTCALL METH_VARARGS
- #define __Pyx_PyCFunction_FastCall PyCFunction
- #define __Pyx_PyCFunction_FastCallWithKeywords PyCFunctionWithKeywords
-#endif
-#if CYTHON_VECTORCALL
- #define __pyx_vectorcallfunc vectorcallfunc
- #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET PY_VECTORCALL_ARGUMENTS_OFFSET
- #define __Pyx_PyVectorcall_NARGS(n) PyVectorcall_NARGS((size_t)(n))
-#elif CYTHON_BACKPORT_VECTORCALL
- typedef PyObject *(*__pyx_vectorcallfunc)(PyObject *callable, PyObject *const *args,
- size_t nargsf, PyObject *kwnames);
- #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET ((size_t)1 << (8 * sizeof(size_t) - 1))
- #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(((size_t)(n)) & ~__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET))
-#else
- #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET 0
- #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(n))
-#endif
-#if PY_VERSION_HEX < 0x030900B1
- #define __Pyx_PyType_FromModuleAndSpec(m, s, b) ((void)m, PyType_FromSpecWithBases(s, b))
- typedef PyObject *(*__Pyx_PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *, size_t, PyObject *);
-#else
- #define __Pyx_PyType_FromModuleAndSpec(m, s, b) PyType_FromModuleAndSpec(m, s, b)
- #define __Pyx_PyCMethod PyCMethod
-#endif
-#ifndef METH_METHOD
- #define METH_METHOD 0x200
-#endif
-#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
- #define PyObject_Malloc(s) PyMem_Malloc(s)
- #define PyObject_Free(p) PyMem_Free(p)
- #define PyObject_Realloc(p) PyMem_Realloc(p)
-#endif
-#if CYTHON_COMPILING_IN_LIMITED_API
- #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
- #define __Pyx_PyFrame_SetLineNumber(frame, lineno)
-#else
- #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
- #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
-#endif
-#if CYTHON_COMPILING_IN_LIMITED_API
- #define __Pyx_PyThreadState_Current PyThreadState_Get()
-#elif !CYTHON_FAST_THREAD_STATE
- #define __Pyx_PyThreadState_Current PyThreadState_GET()
-#elif PY_VERSION_HEX >= 0x03060000
- #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
-#elif PY_VERSION_HEX >= 0x03000000
- #define __Pyx_PyThreadState_Current PyThreadState_GET()
-#else
- #define __Pyx_PyThreadState_Current _PyThreadState_Current
-#endif
-#if CYTHON_COMPILING_IN_LIMITED_API
-static CYTHON_INLINE void *__Pyx_PyModule_GetState(PyObject *op)
-{
- void *result;
- result = PyModule_GetState(op);
- if (!result)
- Py_FatalError("Couldn't find the module state");
- return result;
-}
-#endif
-#define __Pyx_PyObject_GetSlot(obj, name, func_ctype) __Pyx_PyType_GetSlot(Py_TYPE(obj), name, func_ctype)
-#if CYTHON_COMPILING_IN_LIMITED_API
- #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((func_ctype) PyType_GetSlot((type), Py_##name))
-#else
- #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((type)->name)
-#endif
-#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
-#include "pythread.h"
-#define Py_tss_NEEDS_INIT 0
-typedef int Py_tss_t;
-static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
- *key = PyThread_create_key();
- return 0;
-}
-static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
- Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
- *key = Py_tss_NEEDS_INIT;
- return key;
-}
-static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
- PyObject_Free(key);
-}
-static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
- return *key != Py_tss_NEEDS_INIT;
-}
-static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
- PyThread_delete_key(*key);
- *key = Py_tss_NEEDS_INIT;
-}
-static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
- return PyThread_set_key_value(*key, value);
-}
-static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
- return PyThread_get_key_value(*key);
-}
-#endif
-#if PY_MAJOR_VERSION < 3
- #if CYTHON_COMPILING_IN_PYPY
- #if PYPY_VERSION_NUM < 0x07030600
- #if defined(__cplusplus) && __cplusplus >= 201402L
- [[deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")]]
- #elif defined(__GNUC__) || defined(__clang__)
- __attribute__ ((__deprecated__("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")))
- #elif defined(_MSC_VER)
- __declspec(deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6"))
- #endif
- static CYTHON_INLINE int PyGILState_Check(void) {
- return 0;
- }
- #else // PYPY_VERSION_NUM < 0x07030600
- #endif // PYPY_VERSION_NUM < 0x07030600
- #else
- static CYTHON_INLINE int PyGILState_Check(void) {
- PyThreadState * tstate = _PyThreadState_Current;
- return tstate && (tstate == PyGILState_GetThisThreadState());
- }
- #endif
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
-#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
-#else
-#define __Pyx_PyDict_NewPresized(n) PyDict_New()
-#endif
-#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
- #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
- #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
-#else
- #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
- #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX > 0x030600B4 && CYTHON_USE_UNICODE_INTERNALS
-#define __Pyx_PyDict_GetItemStrWithError(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
-static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStr(PyObject *dict, PyObject *name) {
- PyObject *res = __Pyx_PyDict_GetItemStrWithError(dict, name);
- if (res == NULL) PyErr_Clear();
- return res;
-}
-#elif PY_MAJOR_VERSION >= 3 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000)
-#define __Pyx_PyDict_GetItemStrWithError PyDict_GetItemWithError
-#define __Pyx_PyDict_GetItemStr PyDict_GetItem
-#else
-static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict, PyObject *name) {
-#if CYTHON_COMPILING_IN_PYPY
- return PyDict_GetItem(dict, name);
-#else
- PyDictEntry *ep;
- PyDictObject *mp = (PyDictObject*) dict;
- long hash = ((PyStringObject *) name)->ob_shash;
- assert(hash != -1);
- ep = (mp->ma_lookup)(mp, name, hash);
- if (ep == NULL) {
- return NULL;
- }
- return ep->me_value;
-#endif
-}
-#define __Pyx_PyDict_GetItemStr PyDict_GetItem
-#endif
-#if CYTHON_USE_TYPE_SLOTS
- #define __Pyx_PyType_GetFlags(tp) (((PyTypeObject *)tp)->tp_flags)
- #define __Pyx_PyType_HasFeature(type, feature) ((__Pyx_PyType_GetFlags(type) & (feature)) != 0)
- #define __Pyx_PyObject_GetIterNextFunc(obj) (Py_TYPE(obj)->tp_iternext)
-#else
- #define __Pyx_PyType_GetFlags(tp) (PyType_GetFlags((PyTypeObject *)tp))
- #define __Pyx_PyType_HasFeature(type, feature) PyType_HasFeature(type, feature)
- #define __Pyx_PyObject_GetIterNextFunc(obj) PyIter_Next
-#endif
-#if CYTHON_USE_TYPE_SPECS && PY_VERSION_HEX >= 0x03080000
-#define __Pyx_PyHeapTypeObject_GC_Del(obj) {\
- PyTypeObject *type = Py_TYPE(obj);\
- assert(__Pyx_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE));\
- PyObject_GC_Del(obj);\
- Py_DECREF(type);\
-}
-#else
-#define __Pyx_PyHeapTypeObject_GC_Del(obj) PyObject_GC_Del(obj)
-#endif
-#if CYTHON_COMPILING_IN_LIMITED_API
- #define CYTHON_PEP393_ENABLED 1
- #define __Pyx_PyUnicode_READY(op) (0)
- #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GetLength(u)
- #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_ReadChar(u, i)
- #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((void)u, 1114111U)
- #define __Pyx_PyUnicode_KIND(u) ((void)u, (0))
- #define __Pyx_PyUnicode_DATA(u) ((void*)u)
- #define __Pyx_PyUnicode_READ(k, d, i) ((void)k, PyUnicode_ReadChar((PyObject*)(d), i))
- #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GetLength(u))
-#elif PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
- #define CYTHON_PEP393_ENABLED 1
- #if PY_VERSION_HEX >= 0x030C0000
- #define __Pyx_PyUnicode_READY(op) (0)
- #else
- #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
- 0 : _PyUnicode_Ready((PyObject *)(op)))
- #endif
- #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
- #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
- #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
- #define __Pyx_PyUnicode_KIND(u) ((int)PyUnicode_KIND(u))
- #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
- #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
- #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, (Py_UCS4) ch)
- #if PY_VERSION_HEX >= 0x030C0000
- #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
- #else
- #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000
- #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length))
- #else
- #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
- #endif
- #endif
-#else
- #define CYTHON_PEP393_ENABLED 0
- #define PyUnicode_1BYTE_KIND 1
- #define PyUnicode_2BYTE_KIND 2
- #define PyUnicode_4BYTE_KIND 4
- #define __Pyx_PyUnicode_READY(op) (0)
- #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
- #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
- #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535U : 1114111U)
- #define __Pyx_PyUnicode_KIND(u) ((int)sizeof(Py_UNICODE))
- #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
- #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
- #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = (Py_UNICODE) ch)
- #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
-#endif
-#if CYTHON_COMPILING_IN_PYPY
- #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
- #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
-#else
- #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
- #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
- PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
-#endif
-#if CYTHON_COMPILING_IN_PYPY
- #if !defined(PyUnicode_DecodeUnicodeEscape)
- #define PyUnicode_DecodeUnicodeEscape(s, size, errors) PyUnicode_Decode(s, size, "unicode_escape", errors)
- #endif
- #if !defined(PyUnicode_Contains) || (PY_MAJOR_VERSION == 2 && PYPY_VERSION_NUM < 0x07030500)
- #undef PyUnicode_Contains
- #define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
- #endif
- #if !defined(PyByteArray_Check)
- #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
- #endif
- #if !defined(PyObject_Format)
- #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
- #endif
-#endif
-#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
-#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
-#if PY_MAJOR_VERSION >= 3
- #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
-#else
- #define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
-#endif
-#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
- #define PyObject_ASCII(o) PyObject_Repr(o)
-#endif
-#if PY_MAJOR_VERSION >= 3
- #define PyBaseString_Type PyUnicode_Type
- #define PyStringObject PyUnicodeObject
- #define PyString_Type PyUnicode_Type
- #define PyString_Check PyUnicode_Check
- #define PyString_CheckExact PyUnicode_CheckExact
-#ifndef PyObject_Unicode
- #define PyObject_Unicode PyObject_Str
-#endif
-#endif
-#if PY_MAJOR_VERSION >= 3
- #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
- #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
-#else
- #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
- #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON
- #define __Pyx_PySequence_ListKeepNew(obj)\
- (likely(PyList_CheckExact(obj) && Py_REFCNT(obj) == 1) ? __Pyx_NewRef(obj) : PySequence_List(obj))
-#else
- #define __Pyx_PySequence_ListKeepNew(obj) PySequence_List(obj)
-#endif
-#ifndef PySet_CheckExact
- #define PySet_CheckExact(obj) __Pyx_IS_TYPE(obj, &PySet_Type)
-#endif
-#if PY_VERSION_HEX >= 0x030900A4
- #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
- #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
-#else
- #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
- #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
-#endif
-#if CYTHON_ASSUME_SAFE_MACROS
- #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
-#else
- #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
-#endif
-#if PY_MAJOR_VERSION >= 3
- #define PyIntObject PyLongObject
- #define PyInt_Type PyLong_Type
- #define PyInt_Check(op) PyLong_Check(op)
- #define PyInt_CheckExact(op) PyLong_CheckExact(op)
- #define __Pyx_Py3Int_Check(op) PyLong_Check(op)
- #define __Pyx_Py3Int_CheckExact(op) PyLong_CheckExact(op)
- #define PyInt_FromString PyLong_FromString
- #define PyInt_FromUnicode PyLong_FromUnicode
- #define PyInt_FromLong PyLong_FromLong
- #define PyInt_FromSize_t PyLong_FromSize_t
- #define PyInt_FromSsize_t PyLong_FromSsize_t
- #define PyInt_AsLong PyLong_AsLong
- #define PyInt_AS_LONG PyLong_AS_LONG
- #define PyInt_AsSsize_t PyLong_AsSsize_t
- #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
- #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
- #define PyNumber_Int PyNumber_Long
-#else
- #define __Pyx_Py3Int_Check(op) (PyLong_Check(op) || PyInt_Check(op))
- #define __Pyx_Py3Int_CheckExact(op) (PyLong_CheckExact(op) || PyInt_CheckExact(op))
-#endif
-#if PY_MAJOR_VERSION >= 3
- #define PyBoolObject PyLongObject
-#endif
-#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
- #ifndef PyUnicode_InternFromString
- #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
- #endif
-#endif
-#if PY_VERSION_HEX < 0x030200A4
- typedef long Py_hash_t;
- #define __Pyx_PyInt_FromHash_t PyInt_FromLong
- #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t
-#else
- #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
- #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t
-#endif
-#if CYTHON_USE_ASYNC_SLOTS
- #if PY_VERSION_HEX >= 0x030500B1
- #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
- #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
- #else
- #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
- #endif
-#else
- #define __Pyx_PyType_AsAsync(obj) NULL
-#endif
-#ifndef __Pyx_PyAsyncMethodsStruct
- typedef struct {
- unaryfunc am_await;
- unaryfunc am_aiter;
- unaryfunc am_anext;
- } __Pyx_PyAsyncMethodsStruct;
-#endif
-
-#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS)
- #if !defined(_USE_MATH_DEFINES)
- #define _USE_MATH_DEFINES
- #endif
-#endif
-#include
-#ifdef NAN
-#define __PYX_NAN() ((float) NAN)
-#else
-static CYTHON_INLINE float __PYX_NAN() {
- float value;
- memset(&value, 0xFF, sizeof(value));
- return value;
-}
-#endif
-#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
-#define __Pyx_truncl trunc
-#else
-#define __Pyx_truncl truncl
-#endif
-
-#define __PYX_MARK_ERR_POS(f_index, lineno) \
- { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
-#define __PYX_ERR(f_index, lineno, Ln_error) \
- { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
-
-#ifdef CYTHON_EXTERN_C
- #undef __PYX_EXTERN_C
- #define __PYX_EXTERN_C CYTHON_EXTERN_C
-#elif defined(__PYX_EXTERN_C)
- #ifdef _MSC_VER
- #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.")
- #else
- #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.
- #endif
-#else
- #ifdef __cplusplus
- #define __PYX_EXTERN_C extern "C"
- #else
- #define __PYX_EXTERN_C extern
- #endif
-#endif
-
-#define __PYX_HAVE__monotonic_align__core
-#define __PYX_HAVE_API__monotonic_align__core
-/* Early includes */
-#include "pythread.h"
-#include
-#include
-#ifdef _OPENMP
-#include
-#endif /* _OPENMP */
-
-#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
-#define CYTHON_WITHOUT_ASSERTIONS
-#endif
-
-typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
- const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
-
-#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
-#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
-#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
-#define __PYX_DEFAULT_STRING_ENCODING ""
-#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
-#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
-#define __Pyx_uchar_cast(c) ((unsigned char)c)
-#define __Pyx_long_cast(x) ((long)x)
-#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
- (sizeof(type) < sizeof(Py_ssize_t)) ||\
- (sizeof(type) > sizeof(Py_ssize_t) &&\
- likely(v < (type)PY_SSIZE_T_MAX ||\
- v == (type)PY_SSIZE_T_MAX) &&\
- (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
- v == (type)PY_SSIZE_T_MIN))) ||\
- (sizeof(type) == sizeof(Py_ssize_t) &&\
- (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
- v == (type)PY_SSIZE_T_MAX))) )
-static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
- return (size_t) i < (size_t) limit;
-}
-#if defined (__cplusplus) && __cplusplus >= 201103L
- #include
- #define __Pyx_sst_abs(value) std::abs(value)
-#elif SIZEOF_INT >= SIZEOF_SIZE_T
- #define __Pyx_sst_abs(value) abs(value)
-#elif SIZEOF_LONG >= SIZEOF_SIZE_T
- #define __Pyx_sst_abs(value) labs(value)
-#elif defined (_MSC_VER)
- #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
-#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
- #define __Pyx_sst_abs(value) llabs(value)
-#elif defined (__GNUC__)
- #define __Pyx_sst_abs(value) __builtin_llabs(value)
-#else
- #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
-#endif
-static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
-static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
-#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
-#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
-#define __Pyx_PyBytes_FromString PyBytes_FromString
-#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
-static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
-#if PY_MAJOR_VERSION < 3
- #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
- #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
-#else
- #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
- #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
-#endif
-#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyObject_AsWritableString(s) ((char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_AsWritableSString(s) ((signed char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
-#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
-#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
-#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
-#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
-#if CYTHON_COMPILING_IN_LIMITED_API
-static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const wchar_t *u)
-{
- const wchar_t *u_end = u;
- while (*u_end++) ;
- return (size_t)(u_end - u - 1);
-}
-#else
-static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
-{
- const Py_UNICODE *u_end = u;
- while (*u_end++) ;
- return (size_t)(u_end - u - 1);
-}
-#endif
-#define __Pyx_PyUnicode_FromOrdinal(o) PyUnicode_FromOrdinal((int)o)
-#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
-#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
-#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
-#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
-#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
-static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
-static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
-static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
-static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
-#define __Pyx_PySequence_Tuple(obj)\
- (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
-static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
-static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
-static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*);
-#if CYTHON_ASSUME_SAFE_MACROS
-#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
-#else
-#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
-#endif
-#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
-#if PY_MAJOR_VERSION >= 3
-#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
-#else
-#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
-#endif
-#if CYTHON_USE_PYLONG_INTERNALS
- #if PY_VERSION_HEX >= 0x030C00A7
- #ifndef _PyLong_SIGN_MASK
- #define _PyLong_SIGN_MASK 3
- #endif
- #ifndef _PyLong_NON_SIZE_BITS
- #define _PyLong_NON_SIZE_BITS 3
- #endif
- #define __Pyx_PyLong_Sign(x) (((PyLongObject*)x)->long_value.lv_tag & _PyLong_SIGN_MASK)
- #define __Pyx_PyLong_IsNeg(x) ((__Pyx_PyLong_Sign(x) & 2) != 0)
- #define __Pyx_PyLong_IsNonNeg(x) (!__Pyx_PyLong_IsNeg(x))
- #define __Pyx_PyLong_IsZero(x) (__Pyx_PyLong_Sign(x) & 1)
- #define __Pyx_PyLong_IsPos(x) (__Pyx_PyLong_Sign(x) == 0)
- #define __Pyx_PyLong_CompactValueUnsigned(x) (__Pyx_PyLong_Digits(x)[0])
- #define __Pyx_PyLong_DigitCount(x) ((Py_ssize_t) (((PyLongObject*)x)->long_value.lv_tag >> _PyLong_NON_SIZE_BITS))
- #define __Pyx_PyLong_SignedDigitCount(x)\
- ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * __Pyx_PyLong_DigitCount(x))
- #if defined(PyUnstable_Long_IsCompact) && defined(PyUnstable_Long_CompactValue)
- #define __Pyx_PyLong_IsCompact(x) PyUnstable_Long_IsCompact((PyLongObject*) x)
- #define __Pyx_PyLong_CompactValue(x) PyUnstable_Long_CompactValue((PyLongObject*) x)
- #else
- #define __Pyx_PyLong_IsCompact(x) (((PyLongObject*)x)->long_value.lv_tag < (2 << _PyLong_NON_SIZE_BITS))
- #define __Pyx_PyLong_CompactValue(x) ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * (Py_ssize_t) __Pyx_PyLong_Digits(x)[0])
- #endif
- typedef Py_ssize_t __Pyx_compact_pylong;
- typedef size_t __Pyx_compact_upylong;
- #else // Py < 3.12
- #define __Pyx_PyLong_IsNeg(x) (Py_SIZE(x) < 0)
- #define __Pyx_PyLong_IsNonNeg(x) (Py_SIZE(x) >= 0)
- #define __Pyx_PyLong_IsZero(x) (Py_SIZE(x) == 0)
- #define __Pyx_PyLong_IsPos(x) (Py_SIZE(x) > 0)
- #define __Pyx_PyLong_CompactValueUnsigned(x) ((Py_SIZE(x) == 0) ? 0 : __Pyx_PyLong_Digits(x)[0])
- #define __Pyx_PyLong_DigitCount(x) __Pyx_sst_abs(Py_SIZE(x))
- #define __Pyx_PyLong_SignedDigitCount(x) Py_SIZE(x)
- #define __Pyx_PyLong_IsCompact(x) (Py_SIZE(x) == 0 || Py_SIZE(x) == 1 || Py_SIZE(x) == -1)
- #define __Pyx_PyLong_CompactValue(x)\
- ((Py_SIZE(x) == 0) ? (sdigit) 0 : ((Py_SIZE(x) < 0) ? -(sdigit)__Pyx_PyLong_Digits(x)[0] : (sdigit)__Pyx_PyLong_Digits(x)[0]))
- typedef sdigit __Pyx_compact_pylong;
- typedef digit __Pyx_compact_upylong;
- #endif
- #if PY_VERSION_HEX >= 0x030C00A5
- #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->long_value.ob_digit)
- #else
- #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->ob_digit)
- #endif
-#endif
-#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
-static int __Pyx_sys_getdefaultencoding_not_ascii;
-static int __Pyx_init_sys_getdefaultencoding_params(void) {
- PyObject* sys;
- PyObject* default_encoding = NULL;
- PyObject* ascii_chars_u = NULL;
- PyObject* ascii_chars_b = NULL;
- const char* default_encoding_c;
- sys = PyImport_ImportModule("sys");
- if (!sys) goto bad;
- default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
- Py_DECREF(sys);
- if (!default_encoding) goto bad;
- default_encoding_c = PyBytes_AsString(default_encoding);
- if (!default_encoding_c) goto bad;
- if (strcmp(default_encoding_c, "ascii") == 0) {
- __Pyx_sys_getdefaultencoding_not_ascii = 0;
- } else {
- char ascii_chars[128];
- int c;
- for (c = 0; c < 128; c++) {
- ascii_chars[c] = (char) c;
- }
- __Pyx_sys_getdefaultencoding_not_ascii = 1;
- ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
- if (!ascii_chars_u) goto bad;
- ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
- if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
- PyErr_Format(
- PyExc_ValueError,
- "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
- default_encoding_c);
- goto bad;
- }
- Py_DECREF(ascii_chars_u);
- Py_DECREF(ascii_chars_b);
- }
- Py_DECREF(default_encoding);
- return 0;
-bad:
- Py_XDECREF(default_encoding);
- Py_XDECREF(ascii_chars_u);
- Py_XDECREF(ascii_chars_b);
- return -1;
-}
-#endif
-#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
-#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
-#else
-#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
-#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
-static char* __PYX_DEFAULT_STRING_ENCODING;
-static int __Pyx_init_sys_getdefaultencoding_params(void) {
- PyObject* sys;
- PyObject* default_encoding = NULL;
- char* default_encoding_c;
- sys = PyImport_ImportModule("sys");
- if (!sys) goto bad;
- default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
- Py_DECREF(sys);
- if (!default_encoding) goto bad;
- default_encoding_c = PyBytes_AsString(default_encoding);
- if (!default_encoding_c) goto bad;
- __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
- if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
- strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
- Py_DECREF(default_encoding);
- return 0;
-bad:
- Py_XDECREF(default_encoding);
- return -1;
-}
-#endif
-#endif
-
-
-/* Test for GCC > 2.95 */
-#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
- #define likely(x) __builtin_expect(!!(x), 1)
- #define unlikely(x) __builtin_expect(!!(x), 0)
-#else /* !__GNUC__ or GCC < 2.95 */
- #define likely(x) (x)
- #define unlikely(x) (x)
-#endif /* __GNUC__ */
-static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
-
-#if !CYTHON_USE_MODULE_STATE
-static PyObject *__pyx_m = NULL;
-#endif
-static int __pyx_lineno;
-static int __pyx_clineno = 0;
-static const char * __pyx_cfilenm = __FILE__;
-static const char *__pyx_filename;
-
-/* #### Code section: filename_table ### */
-
-static const char *__pyx_f[] = {
- "core.pyx",
- "",
-};
-/* #### Code section: utility_code_proto_before_types ### */
-/* ForceInitThreads.proto */
-#ifndef __PYX_FORCE_INIT_THREADS
- #define __PYX_FORCE_INIT_THREADS 0
-#endif
-
-/* NoFastGil.proto */
-#define __Pyx_PyGILState_Ensure PyGILState_Ensure
-#define __Pyx_PyGILState_Release PyGILState_Release
-#define __Pyx_FastGIL_Remember()
-#define __Pyx_FastGIL_Forget()
-#define __Pyx_FastGilFuncInit()
-
-/* BufferFormatStructs.proto */
-struct __Pyx_StructField_;
-#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
-typedef struct {
- const char* name;
- struct __Pyx_StructField_* fields;
- size_t size;
- size_t arraysize[8];
- int ndim;
- char typegroup;
- char is_unsigned;
- int flags;
-} __Pyx_TypeInfo;
-typedef struct __Pyx_StructField_ {
- __Pyx_TypeInfo* type;
- const char* name;
- size_t offset;
-} __Pyx_StructField;
-typedef struct {
- __Pyx_StructField* field;
- size_t parent_offset;
-} __Pyx_BufFmt_StackElem;
-typedef struct {
- __Pyx_StructField root;
- __Pyx_BufFmt_StackElem* head;
- size_t fmt_offset;
- size_t new_count, enc_count;
- size_t struct_alignment;
- int is_complex;
- char enc_type;
- char new_packmode;
- char enc_packmode;
- char is_valid_array;
-} __Pyx_BufFmt_Context;
-
-/* Atomics.proto */
-#include
-#ifndef CYTHON_ATOMICS
- #define CYTHON_ATOMICS 1
-#endif
-#define __PYX_CYTHON_ATOMICS_ENABLED() CYTHON_ATOMICS
-#define __pyx_atomic_int_type int
-#define __pyx_nonatomic_int_type int
-#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\
- (__STDC_VERSION__ >= 201112L) &&\
- !defined(__STDC_NO_ATOMICS__))
- #include
-#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\
- (__cplusplus >= 201103L) ||\
- (defined(_MSC_VER) && _MSC_VER >= 1700)))
- #include
-#endif
-#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\
- (__STDC_VERSION__ >= 201112L) &&\
- !defined(__STDC_NO_ATOMICS__) &&\
- ATOMIC_INT_LOCK_FREE == 2)
- #undef __pyx_atomic_int_type
- #define __pyx_atomic_int_type atomic_int
- #define __pyx_atomic_incr_aligned(value) atomic_fetch_add_explicit(value, 1, memory_order_relaxed)
- #define __pyx_atomic_decr_aligned(value) atomic_fetch_sub_explicit(value, 1, memory_order_acq_rel)
- #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER)
- #pragma message ("Using standard C atomics")
- #elif defined(__PYX_DEBUG_ATOMICS)
- #warning "Using standard C atomics"
- #endif
-#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\
- (__cplusplus >= 201103L) ||\
-\
- (defined(_MSC_VER) && _MSC_VER >= 1700)) &&\
- ATOMIC_INT_LOCK_FREE == 2)
- #undef __pyx_atomic_int_type
- #define __pyx_atomic_int_type std::atomic_int
- #define __pyx_atomic_incr_aligned(value) std::atomic_fetch_add_explicit(value, 1, std::memory_order_relaxed)
- #define __pyx_atomic_decr_aligned(value) std::atomic_fetch_sub_explicit(value, 1, std::memory_order_acq_rel)
- #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER)
- #pragma message ("Using standard C++ atomics")
- #elif defined(__PYX_DEBUG_ATOMICS)
- #warning "Using standard C++ atomics"
- #endif
-#elif CYTHON_ATOMICS && (__GNUC__ >= 5 || (__GNUC__ == 4 &&\
- (__GNUC_MINOR__ > 1 ||\
- (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ >= 2))))
- #define __pyx_atomic_incr_aligned(value) __sync_fetch_and_add(value, 1)
- #define __pyx_atomic_decr_aligned(value) __sync_fetch_and_sub(value, 1)
- #ifdef __PYX_DEBUG_ATOMICS
- #warning "Using GNU atomics"
- #endif
-#elif CYTHON_ATOMICS && defined(_MSC_VER)
- #include
- #undef __pyx_atomic_int_type
- #define __pyx_atomic_int_type long
- #define __pyx_nonatomic_int_type long
- #pragma intrinsic (_InterlockedExchangeAdd)
- #define __pyx_atomic_incr_aligned(value) _InterlockedExchangeAdd(value, 1)
- #define __pyx_atomic_decr_aligned(value) _InterlockedExchangeAdd(value, -1)
- #ifdef __PYX_DEBUG_ATOMICS
- #pragma message ("Using MSVC atomics")
- #endif
-#else
- #undef CYTHON_ATOMICS
- #define CYTHON_ATOMICS 0
- #ifdef __PYX_DEBUG_ATOMICS
- #warning "Not using atomics"
- #endif
-#endif
-#if CYTHON_ATOMICS
- #define __pyx_add_acquisition_count(memview)\
- __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview))
- #define __pyx_sub_acquisition_count(memview)\
- __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview))
-#else
- #define __pyx_add_acquisition_count(memview)\
- __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
- #define __pyx_sub_acquisition_count(memview)\
- __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
-#endif
-
-/* MemviewSliceStruct.proto */
-struct __pyx_memoryview_obj;
-typedef struct {
- struct __pyx_memoryview_obj *memview;
- char *data;
- Py_ssize_t shape[8];
- Py_ssize_t strides[8];
- Py_ssize_t suboffsets[8];
-} __Pyx_memviewslice;
-#define __Pyx_MemoryView_Len(m) (m.shape[0])
-
-/* #### Code section: numeric_typedefs ### */
-/* #### Code section: complex_type_declarations ### */
-/* #### Code section: type_declarations ### */
-
-/*--- Type declarations ---*/
-struct __pyx_array_obj;
-struct __pyx_MemviewEnum_obj;
-struct __pyx_memoryview_obj;
-struct __pyx_memoryviewslice_obj;
-struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each;
-
-/* "monotonic_align/core.pyx":7
- * @cython.boundscheck(False)
- * @cython.wraparound(False)
- * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
- * cdef int x
- * cdef int y
- */
-struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each {
- int __pyx_n;
- float max_neg_val;
-};
-
-/* "View.MemoryView":114
- * @cython.collection_type("sequence")
- * @cname("__pyx_array")
- * cdef class array: # <<<<<<<<<<<<<<
- *
- * cdef:
- */
-struct __pyx_array_obj {
- PyObject_HEAD
- struct __pyx_vtabstruct_array *__pyx_vtab;
- char *data;
- Py_ssize_t len;
- char *format;
- int ndim;
- Py_ssize_t *_shape;
- Py_ssize_t *_strides;
- Py_ssize_t itemsize;
- PyObject *mode;
- PyObject *_format;
- void (*callback_free_data)(void *);
- int free_data;
- int dtype_is_object;
-};
-
-
-/* "View.MemoryView":302
- *
- * @cname('__pyx_MemviewEnum')
- * cdef class Enum(object): # <<<<<<<<<<<<<<
- * cdef object name
- * def __init__(self, name):
- */
-struct __pyx_MemviewEnum_obj {
- PyObject_HEAD
- PyObject *name;
-};
-
-
-/* "View.MemoryView":337
- *
- * @cname('__pyx_memoryview')
- * cdef class memoryview: # <<<<<<<<<<<<<<
- *
- * cdef object obj
- */
-struct __pyx_memoryview_obj {
- PyObject_HEAD
- struct __pyx_vtabstruct_memoryview *__pyx_vtab;
- PyObject *obj;
- PyObject *_size;
- PyObject *_array_interface;
- PyThread_type_lock lock;
- __pyx_atomic_int_type acquisition_count;
- Py_buffer view;
- int flags;
- int dtype_is_object;
- __Pyx_TypeInfo *typeinfo;
-};
-
-
-/* "View.MemoryView":952
- * @cython.collection_type("sequence")
- * @cname('__pyx_memoryviewslice')
- * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
- * "Internal class for passing memoryview slices to Python"
- *
- */
-struct __pyx_memoryviewslice_obj {
- struct __pyx_memoryview_obj __pyx_base;
- __Pyx_memviewslice from_slice;
- PyObject *from_object;
- PyObject *(*to_object_func)(char *);
- int (*to_dtype_func)(char *, PyObject *);
-};
-
-
-
-/* "View.MemoryView":114
- * @cython.collection_type("sequence")
- * @cname("__pyx_array")
- * cdef class array: # <<<<<<<<<<<<<<
- *
- * cdef:
- */
-
-struct __pyx_vtabstruct_array {
- PyObject *(*get_memview)(struct __pyx_array_obj *);
-};
-static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
-
-
-/* "View.MemoryView":337
- *
- * @cname('__pyx_memoryview')
- * cdef class memoryview: # <<<<<<<<<<<<<<
- *
- * cdef object obj
- */
-
-struct __pyx_vtabstruct_memoryview {
- char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
- PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
- PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
- PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
- PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
- PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
- PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
- PyObject *(*_get_base)(struct __pyx_memoryview_obj *);
-};
-static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
-
-
-/* "View.MemoryView":952
- * @cython.collection_type("sequence")
- * @cname('__pyx_memoryviewslice')
- * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
- * "Internal class for passing memoryview slices to Python"
- *
- */
-
-struct __pyx_vtabstruct__memoryviewslice {
- struct __pyx_vtabstruct_memoryview __pyx_base;
-};
-static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
-/* #### Code section: utility_code_proto ### */
-
-/* --- Runtime support code (head) --- */
-/* Refnanny.proto */
-#ifndef CYTHON_REFNANNY
- #define CYTHON_REFNANNY 0
-#endif
-#if CYTHON_REFNANNY
- typedef struct {
- void (*INCREF)(void*, PyObject*, Py_ssize_t);
- void (*DECREF)(void*, PyObject*, Py_ssize_t);
- void (*GOTREF)(void*, PyObject*, Py_ssize_t);
- void (*GIVEREF)(void*, PyObject*, Py_ssize_t);
- void* (*SetupContext)(const char*, Py_ssize_t, const char*);
- void (*FinishContext)(void**);
- } __Pyx_RefNannyAPIStruct;
- static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
- static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
- #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
-#ifdef WITH_THREAD
- #define __Pyx_RefNannySetupContext(name, acquire_gil)\
- if (acquire_gil) {\
- PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
- __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\
- PyGILState_Release(__pyx_gilstate_save);\
- } else {\
- __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\
- }
- #define __Pyx_RefNannyFinishContextNogil() {\
- PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
- __Pyx_RefNannyFinishContext();\
- PyGILState_Release(__pyx_gilstate_save);\
- }
-#else
- #define __Pyx_RefNannySetupContext(name, acquire_gil)\
- __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__))
- #define __Pyx_RefNannyFinishContextNogil() __Pyx_RefNannyFinishContext()
-#endif
- #define __Pyx_RefNannyFinishContextNogil() {\
- PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
- __Pyx_RefNannyFinishContext();\
- PyGILState_Release(__pyx_gilstate_save);\
- }
- #define __Pyx_RefNannyFinishContext()\
- __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
- #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
- #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
- #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
- #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
- #define __Pyx_XINCREF(r) do { if((r) == NULL); else {__Pyx_INCREF(r); }} while(0)
- #define __Pyx_XDECREF(r) do { if((r) == NULL); else {__Pyx_DECREF(r); }} while(0)
- #define __Pyx_XGOTREF(r) do { if((r) == NULL); else {__Pyx_GOTREF(r); }} while(0)
- #define __Pyx_XGIVEREF(r) do { if((r) == NULL); else {__Pyx_GIVEREF(r);}} while(0)
-#else
- #define __Pyx_RefNannyDeclarations
- #define __Pyx_RefNannySetupContext(name, acquire_gil)
- #define __Pyx_RefNannyFinishContextNogil()
- #define __Pyx_RefNannyFinishContext()
- #define __Pyx_INCREF(r) Py_INCREF(r)
- #define __Pyx_DECREF(r) Py_DECREF(r)
- #define __Pyx_GOTREF(r)
- #define __Pyx_GIVEREF(r)
- #define __Pyx_XINCREF(r) Py_XINCREF(r)
- #define __Pyx_XDECREF(r) Py_XDECREF(r)
- #define __Pyx_XGOTREF(r)
- #define __Pyx_XGIVEREF(r)
-#endif
-#define __Pyx_Py_XDECREF_SET(r, v) do {\
- PyObject *tmp = (PyObject *) r;\
- r = v; Py_XDECREF(tmp);\
- } while (0)
-#define __Pyx_XDECREF_SET(r, v) do {\
- PyObject *tmp = (PyObject *) r;\
- r = v; __Pyx_XDECREF(tmp);\
- } while (0)
-#define __Pyx_DECREF_SET(r, v) do {\
- PyObject *tmp = (PyObject *) r;\
- r = v; __Pyx_DECREF(tmp);\
- } while (0)
-#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
-#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
-
-/* PyErrExceptionMatches.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
-static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
-#else
-#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
-#endif
-
-/* PyThreadStateGet.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
-#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
-#if PY_VERSION_HEX >= 0x030C00A6
-#define __Pyx_PyErr_Occurred() (__pyx_tstate->current_exception != NULL)
-#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->current_exception ? (PyObject*) Py_TYPE(__pyx_tstate->current_exception) : (PyObject*) NULL)
-#else
-#define __Pyx_PyErr_Occurred() (__pyx_tstate->curexc_type != NULL)
-#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->curexc_type)
-#endif
-#else
-#define __Pyx_PyThreadState_declare
-#define __Pyx_PyThreadState_assign
-#define __Pyx_PyErr_Occurred() (PyErr_Occurred() != NULL)
-#define __Pyx_PyErr_CurrentExceptionType() PyErr_Occurred()
-#endif
-
-/* PyErrFetchRestore.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
-#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
-#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
-#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
-#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
-static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
-static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
-#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A6
-#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
-#else
-#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
-#endif
-#else
-#define __Pyx_PyErr_Clear() PyErr_Clear()
-#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
-#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
-#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
-#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
-#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
-#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
-#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
-#endif
-
-/* PyObjectGetAttrStr.proto */
-#if CYTHON_USE_TYPE_SLOTS
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
-#else
-#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
-#endif
-
-/* PyObjectGetAttrStrNoError.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
-
-/* GetBuiltinName.proto */
-static PyObject *__Pyx_GetBuiltinName(PyObject *name);
-
-/* TupleAndListFromArray.proto */
-#if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n);
-static CYTHON_INLINE PyObject* __Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n);
-#endif
-
-/* IncludeStringH.proto */
-#include
-
-/* BytesEquals.proto */
-static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
-
-/* UnicodeEquals.proto */
-static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
-
-/* fastcall.proto */
-#define __Pyx_Arg_VARARGS(args, i) PyTuple_GET_ITEM(args, i)
-#define __Pyx_NumKwargs_VARARGS(kwds) PyDict_Size(kwds)
-#define __Pyx_KwValues_VARARGS(args, nargs) NULL
-#define __Pyx_GetKwValue_VARARGS(kw, kwvalues, s) __Pyx_PyDict_GetItemStrWithError(kw, s)
-#define __Pyx_KwargsAsDict_VARARGS(kw, kwvalues) PyDict_Copy(kw)
-#if CYTHON_METH_FASTCALL
- #define __Pyx_Arg_FASTCALL(args, i) args[i]
- #define __Pyx_NumKwargs_FASTCALL(kwds) PyTuple_GET_SIZE(kwds)
- #define __Pyx_KwValues_FASTCALL(args, nargs) ((args) + (nargs))
- static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s);
- #define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw)
-#else
- #define __Pyx_Arg_FASTCALL __Pyx_Arg_VARARGS
- #define __Pyx_NumKwargs_FASTCALL __Pyx_NumKwargs_VARARGS
- #define __Pyx_KwValues_FASTCALL __Pyx_KwValues_VARARGS
- #define __Pyx_GetKwValue_FASTCALL __Pyx_GetKwValue_VARARGS
- #define __Pyx_KwargsAsDict_FASTCALL __Pyx_KwargsAsDict_VARARGS
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON
-#define __Pyx_ArgsSlice_VARARGS(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_VARARGS(args, start), stop - start)
-#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_FASTCALL(args, start), stop - start)
-#else
-#define __Pyx_ArgsSlice_VARARGS(args, start, stop) PyTuple_GetSlice(args, start, stop)
-#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) PyTuple_GetSlice(args, start, stop)
-#endif
-
-/* RaiseArgTupleInvalid.proto */
-static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
- Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
-
-/* RaiseDoubleKeywords.proto */
-static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
-
-/* ParseKeywords.proto */
-static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject *const *kwvalues,
- PyObject **argnames[],
- PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,
- const char* function_name);
-
-/* ArgTypeTest.proto */
-#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
- ((likely(__Pyx_IS_TYPE(obj, type) | (none_allowed && (obj == Py_None)))) ? 1 :\
- __Pyx__ArgTypeTest(obj, type, name, exact))
-static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
-
-/* RaiseException.proto */
-static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
-
-/* PyFunctionFastCall.proto */
-#if CYTHON_FAST_PYCALL
-#if !CYTHON_VECTORCALL
-#define __Pyx_PyFunction_FastCall(func, args, nargs)\
- __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
-static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
-#endif
-#define __Pyx_BUILD_ASSERT_EXPR(cond)\
- (sizeof(char [1 - 2*!(cond)]) - 1)
-#ifndef Py_MEMBER_SIZE
-#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
-#endif
-#if !CYTHON_VECTORCALL
-#if PY_VERSION_HEX >= 0x03080000
- #include "frameobject.h"
-#if PY_VERSION_HEX >= 0x030b00a6
- #ifndef Py_BUILD_CORE
- #define Py_BUILD_CORE 1
- #endif
- #include "internal/pycore_frame.h"
-#endif
- #define __Pxy_PyFrame_Initialize_Offsets()
- #define __Pyx_PyFrame_GetLocalsplus(frame) ((frame)->f_localsplus)
-#else
- static size_t __pyx_pyframe_localsplus_offset = 0;
- #include "frameobject.h"
- #define __Pxy_PyFrame_Initialize_Offsets()\
- ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
- (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
- #define __Pyx_PyFrame_GetLocalsplus(frame)\
- (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
-#endif
-#endif
-#endif
-
-/* PyObjectCall.proto */
-#if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
-#else
-#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
-#endif
-
-/* PyObjectCallMethO.proto */
-#if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
-#endif
-
-/* PyObjectFastCall.proto */
-#define __Pyx_PyObject_FastCall(func, args, nargs) __Pyx_PyObject_FastCallDict(func, args, (size_t)(nargs), NULL)
-static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs);
-
-/* RaiseUnexpectedTypeError.proto */
-static int __Pyx_RaiseUnexpectedTypeError(const char *expected, PyObject *obj);
-
-/* GCCDiagnostics.proto */
-#if !defined(__INTEL_COMPILER) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
-#define __Pyx_HAS_GCC_DIAGNOSTIC
-#endif
-
-/* BuildPyUnicode.proto */
-static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength,
- int prepend_sign, char padding_char);
-
-/* CIntToPyUnicode.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_int(int value, Py_ssize_t width, char padding_char, char format_char);
-
-/* CIntToPyUnicode.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_Py_ssize_t(Py_ssize_t value, Py_ssize_t width, char padding_char, char format_char);
-
-/* JoinPyUnicode.proto */
-static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength,
- Py_UCS4 max_char);
-
-/* StrEquals.proto */
-#if PY_MAJOR_VERSION >= 3
-#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
-#else
-#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
-#endif
-
-/* PyObjectFormatSimple.proto */
-#if CYTHON_COMPILING_IN_PYPY
- #define __Pyx_PyObject_FormatSimple(s, f) (\
- likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\
- PyObject_Format(s, f))
-#elif PY_MAJOR_VERSION < 3
- #define __Pyx_PyObject_FormatSimple(s, f) (\
- likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\
- likely(PyString_CheckExact(s)) ? PyUnicode_FromEncodedObject(s, NULL, "strict") :\
- PyObject_Format(s, f))
-#elif CYTHON_USE_TYPE_SLOTS
- #define __Pyx_PyObject_FormatSimple(s, f) (\
- likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\
- likely(PyLong_CheckExact(s)) ? PyLong_Type.tp_repr(s) :\
- likely(PyFloat_CheckExact(s)) ? PyFloat_Type.tp_repr(s) :\
- PyObject_Format(s, f))
-#else
- #define __Pyx_PyObject_FormatSimple(s, f) (\
- likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\
- PyObject_Format(s, f))
-#endif
-
-CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
-static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
-/* GetAttr.proto */
-static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
-
-/* GetItemInt.proto */
-#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
- (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
- __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
- (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
- __Pyx_GetItemInt_Generic(o, to_py_func(i))))
-#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
- (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
- __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
- (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
- int wraparound, int boundscheck);
-#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
- (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
- __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
- (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
- int wraparound, int boundscheck);
-static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
- int is_list, int wraparound, int boundscheck);
-
-/* PyObjectCallOneArg.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
-
-/* ObjectGetItem.proto */
-#if CYTHON_USE_TYPE_SLOTS
-static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key);
-#else
-#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
-#endif
-
-/* KeywordStringCheck.proto */
-static int __Pyx_CheckKeywordStrings(PyObject *kw, const char* function_name, int kw_allowed);
-
-/* DivInt[Py_ssize_t].proto */
-static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
-
-/* UnaryNegOverflows.proto */
-#define __Pyx_UNARY_NEG_WOULD_OVERFLOW(x)\
- (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
-
-/* GetAttr3.proto */
-static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
-
-/* PyDictVersioning.proto */
-#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
-#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
-#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
-#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
- (version_var) = __PYX_GET_DICT_VERSION(dict);\
- (cache_var) = (value);
-#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
- static PY_UINT64_T __pyx_dict_version = 0;\
- static PyObject *__pyx_dict_cached_value = NULL;\
- if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
- (VAR) = __pyx_dict_cached_value;\
- } else {\
- (VAR) = __pyx_dict_cached_value = (LOOKUP);\
- __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
- }\
-}
-static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
-static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
-static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
-#else
-#define __PYX_GET_DICT_VERSION(dict) (0)
-#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
-#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
-#endif
-
-/* GetModuleGlobalName.proto */
-#if CYTHON_USE_DICT_VERSIONS
-#define __Pyx_GetModuleGlobalName(var, name) do {\
- static PY_UINT64_T __pyx_dict_version = 0;\
- static PyObject *__pyx_dict_cached_value = NULL;\
- (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
- (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
- __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
-} while(0)
-#define __Pyx_GetModuleGlobalNameUncached(var, name) do {\
- PY_UINT64_T __pyx_dict_version;\
- PyObject *__pyx_dict_cached_value;\
- (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
-} while(0)
-static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
-#else
-#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
-#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
-static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
-#endif
-
-/* AssertionsEnabled.proto */
-#define __Pyx_init_assertions_enabled()
-#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
- #define __pyx_assertions_enabled() (1)
-#elif PY_VERSION_HEX < 0x03080000 || CYTHON_COMPILING_IN_PYPY || defined(Py_LIMITED_API)
- #define __pyx_assertions_enabled() (!Py_OptimizeFlag)
-#elif CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030900A6
- static int __pyx_assertions_enabled_flag;
- #define __pyx_assertions_enabled() (__pyx_assertions_enabled_flag)
- #undef __Pyx_init_assertions_enabled
- static void __Pyx_init_assertions_enabled(void) {
- __pyx_assertions_enabled_flag = ! _PyInterpreterState_GetConfig(__Pyx_PyThreadState_Current->interp)->optimization_level;
- }
-#else
- #define __pyx_assertions_enabled() (!Py_OptimizeFlag)
-#endif
-
-/* RaiseTooManyValuesToUnpack.proto */
-static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
-
-/* RaiseNeedMoreValuesToUnpack.proto */
-static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
-
-/* RaiseNoneIterError.proto */
-static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
-
-/* ExtTypeTest.proto */
-static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
-
-/* GetTopmostException.proto */
-#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE
-static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
-#endif
-
-/* SaveResetException.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
-static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
-#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
-static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
-#else
-#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
-#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
-#endif
-
-/* GetException.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
-static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
-#else
-static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
-#endif
-
-/* SwapException.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
-static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
-#else
-static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
-#endif
-
-/* Import.proto */
-static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
-
-/* ImportDottedModule.proto */
-static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple);
-#if PY_MAJOR_VERSION >= 3
-static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple);
-#endif
-
-/* ssize_strlen.proto */
-static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s);
-
-/* FastTypeChecks.proto */
-#if CYTHON_COMPILING_IN_CPYTHON
-#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
-#define __Pyx_TypeCheck2(obj, type1, type2) __Pyx_IsAnySubtype2(Py_TYPE(obj), (PyTypeObject *)type1, (PyTypeObject *)type2)
-static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
-static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b);
-static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
-static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
-#else
-#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
-#define __Pyx_TypeCheck2(obj, type1, type2) (PyObject_TypeCheck(obj, (PyTypeObject *)type1) || PyObject_TypeCheck(obj, (PyTypeObject *)type2))
-#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
-#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
-#endif
-#define __Pyx_PyErr_ExceptionMatches2(err1, err2) __Pyx_PyErr_GivenExceptionMatches2(__Pyx_PyErr_CurrentExceptionType(), err1, err2)
-#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
-
-CYTHON_UNUSED static int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
-/* ListCompAppend.proto */
-#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
-static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
- PyListObject* L = (PyListObject*) list;
- Py_ssize_t len = Py_SIZE(list);
- if (likely(L->allocated > len)) {
- Py_INCREF(x);
- PyList_SET_ITEM(list, len, x);
- __Pyx_SET_SIZE(list, len + 1);
- return 0;
- }
- return PyList_Append(list, x);
-}
-#else
-#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
-#endif
-
-/* PySequenceMultiply.proto */
-#define __Pyx_PySequence_Multiply_Left(mul, seq) __Pyx_PySequence_Multiply(seq, mul)
-static CYTHON_INLINE PyObject* __Pyx_PySequence_Multiply(PyObject *seq, Py_ssize_t mul);
-
-/* SetItemInt.proto */
-#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
- (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
- __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\
- (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\
- __Pyx_SetItemInt_Generic(o, to_py_func(i), v)))
-static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v);
-static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v,
- int is_list, int wraparound, int boundscheck);
-
-/* RaiseUnboundLocalError.proto */
-static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
-
-/* DivInt[long].proto */
-static CYTHON_INLINE long __Pyx_div_long(long, long);
-
-/* PySequenceContains.proto */
-static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) {
- int result = PySequence_Contains(seq, item);
- return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
-}
-
-/* ImportFrom.proto */
-static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
-
-/* HasAttr.proto */
-static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
-
-/* ErrOccurredWithGIL.proto */
-static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void);
-
-/* PyObject_GenericGetAttrNoDict.proto */
-#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
-#else
-#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
-#endif
-
-/* PyObject_GenericGetAttr.proto */
-#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
-static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
-#else
-#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
-#endif
-
-/* IncludeStructmemberH.proto */
-#include
-
-/* FixUpExtensionType.proto */
-#if CYTHON_USE_TYPE_SPECS
-static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type);
-#endif
-
-/* PyObjectCallNoArg.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
-
-/* PyObjectGetMethod.proto */
-static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);
-
-/* PyObjectCallMethod0.proto */
-static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name);
-
-/* ValidateBasesTuple.proto */
-#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS
-static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases);
-#endif
-
-/* PyType_Ready.proto */
-CYTHON_UNUSED static int __Pyx_PyType_Ready(PyTypeObject *t);
-
-/* SetVTable.proto */
-static int __Pyx_SetVtable(PyTypeObject* typeptr , void* vtable);
-
-/* GetVTable.proto */
-static void* __Pyx_GetVtable(PyTypeObject *type);
-
-/* MergeVTables.proto */
-#if !CYTHON_COMPILING_IN_LIMITED_API
-static int __Pyx_MergeVtables(PyTypeObject *type);
-#endif
-
-/* SetupReduce.proto */
-#if !CYTHON_COMPILING_IN_LIMITED_API
-static int __Pyx_setup_reduce(PyObject* type_obj);
-#endif
-
-/* FetchSharedCythonModule.proto */
-static PyObject *__Pyx_FetchSharedCythonABIModule(void);
-
-/* FetchCommonType.proto */
-#if !CYTHON_USE_TYPE_SPECS
-static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type);
-#else
-static PyTypeObject* __Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases);
-#endif
-
-/* PyMethodNew.proto */
-#if PY_MAJOR_VERSION >= 3
-static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) {
- CYTHON_UNUSED_VAR(typ);
- if (!self)
- return __Pyx_NewRef(func);
- return PyMethod_New(func, self);
-}
-#else
- #define __Pyx_PyMethod_New PyMethod_New
-#endif
-
-/* PyVectorcallFastCallDict.proto */
-#if CYTHON_METH_FASTCALL
-static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw);
-#endif
-
-/* CythonFunctionShared.proto */
-#define __Pyx_CyFunction_USED
-#define __Pyx_CYFUNCTION_STATICMETHOD 0x01
-#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02
-#define __Pyx_CYFUNCTION_CCLASS 0x04
-#define __Pyx_CYFUNCTION_COROUTINE 0x08
-#define __Pyx_CyFunction_GetClosure(f)\
- (((__pyx_CyFunctionObject *) (f))->func_closure)
-#if PY_VERSION_HEX < 0x030900B1
- #define __Pyx_CyFunction_GetClassObj(f)\
- (((__pyx_CyFunctionObject *) (f))->func_classobj)
-#else
- #define __Pyx_CyFunction_GetClassObj(f)\
- ((PyObject*) ((PyCMethodObject *) (f))->mm_class)
-#endif
-#define __Pyx_CyFunction_SetClassObj(f, classobj)\
- __Pyx__CyFunction_SetClassObj((__pyx_CyFunctionObject *) (f), (classobj))
-#define __Pyx_CyFunction_Defaults(type, f)\
- ((type *)(((__pyx_CyFunctionObject *) (f))->defaults))
-#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\
- ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g)
-typedef struct {
-#if PY_VERSION_HEX < 0x030900B1
- PyCFunctionObject func;
-#else
- PyCMethodObject func;
-#endif
-#if CYTHON_BACKPORT_VECTORCALL
- __pyx_vectorcallfunc func_vectorcall;
-#endif
-#if PY_VERSION_HEX < 0x030500A0
- PyObject *func_weakreflist;
-#endif
- PyObject *func_dict;
- PyObject *func_name;
- PyObject *func_qualname;
- PyObject *func_doc;
- PyObject *func_globals;
- PyObject *func_code;
- PyObject *func_closure;
-#if PY_VERSION_HEX < 0x030900B1
- PyObject *func_classobj;
-#endif
- void *defaults;
- int defaults_pyobjects;
- size_t defaults_size; // used by FusedFunction for copying defaults
- int flags;
- PyObject *defaults_tuple;
- PyObject *defaults_kwdict;
- PyObject *(*defaults_getter)(PyObject *);
- PyObject *func_annotations;
- PyObject *func_is_coroutine;
-} __pyx_CyFunctionObject;
-#define __Pyx_CyFunction_Check(obj) __Pyx_TypeCheck(obj, __pyx_CyFunctionType)
-#define __Pyx_IsCyOrPyCFunction(obj) __Pyx_TypeCheck2(obj, __pyx_CyFunctionType, &PyCFunction_Type)
-#define __Pyx_CyFunction_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_CyFunctionType)
-static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml,
- int flags, PyObject* qualname,
- PyObject *closure,
- PyObject *module, PyObject *globals,
- PyObject* code);
-static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj);
-static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m,
- size_t size,
- int pyobjects);
-static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m,
- PyObject *tuple);
-static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m,
- PyObject *dict);
-static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m,
- PyObject *dict);
-static int __pyx_CyFunction_init(PyObject *module);
-#if CYTHON_METH_FASTCALL
-static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames);
-static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames);
-static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames);
-static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames);
-#if CYTHON_BACKPORT_VECTORCALL
-#define __Pyx_CyFunction_func_vectorcall(f) (((__pyx_CyFunctionObject*)f)->func_vectorcall)
-#else
-#define __Pyx_CyFunction_func_vectorcall(f) (((PyCFunctionObject*)f)->vectorcall)
-#endif
-#endif
-
-/* CythonFunction.proto */
-static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml,
- int flags, PyObject* qualname,
- PyObject *closure,
- PyObject *module, PyObject *globals,
- PyObject* code);
-
-/* CLineInTraceback.proto */
-#ifdef CYTHON_CLINE_IN_TRACEBACK
-#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
-#else
-static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
-#endif
-
-/* CodeObjectCache.proto */
-#if !CYTHON_COMPILING_IN_LIMITED_API
-typedef struct {
- PyCodeObject* code_object;
- int code_line;
-} __Pyx_CodeObjectCacheEntry;
-struct __Pyx_CodeObjectCache {
- int count;
- int max_count;
- __Pyx_CodeObjectCacheEntry* entries;
-};
-static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
-static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
-static PyCodeObject *__pyx_find_code_object(int code_line);
-static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
-#endif
-
-/* AddTraceback.proto */
-static void __Pyx_AddTraceback(const char *funcname, int c_line,
- int py_line, const char *filename);
-
-#if PY_MAJOR_VERSION < 3
- static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
- static void __Pyx_ReleaseBuffer(Py_buffer *view);
-#else
- #define __Pyx_GetBuffer PyObject_GetBuffer
- #define __Pyx_ReleaseBuffer PyBuffer_Release
-#endif
-
-
-/* BufferStructDeclare.proto */
-typedef struct {
- Py_ssize_t shape, strides, suboffsets;
-} __Pyx_Buf_DimInfo;
-typedef struct {
- size_t refcount;
- Py_buffer pybuffer;
-} __Pyx_Buffer;
-typedef struct {
- __Pyx_Buffer *rcbuffer;
- char *data;
- __Pyx_Buf_DimInfo diminfo[8];
-} __Pyx_LocalBuf_ND;
-
-/* MemviewSliceIsContig.proto */
-static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
-
-/* OverlappingSlices.proto */
-static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
- __Pyx_memviewslice *slice2,
- int ndim, size_t itemsize);
-
-/* IsLittleEndian.proto */
-static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
-
-/* BufferFormatCheck.proto */
-static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
-static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
- __Pyx_BufFmt_StackElem* stack,
- __Pyx_TypeInfo* type);
-
-/* TypeInfoCompare.proto */
-static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
-
-/* MemviewSliceValidateAndInit.proto */
-static int __Pyx_ValidateAndInit_memviewslice(
- int *axes_specs,
- int c_or_f_flag,
- int buf_flags,
- int ndim,
- __Pyx_TypeInfo *dtype,
- __Pyx_BufFmt_StackElem stack[],
- __Pyx_memviewslice *memviewslice,
- PyObject *original_obj);
-
-/* ObjectToMemviewSlice.proto */
-static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag);
-
-/* ObjectToMemviewSlice.proto */
-static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag);
-
-/* ObjectToMemviewSlice.proto */
-static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag);
-
-/* MemviewSliceCopyTemplate.proto */
-static __Pyx_memviewslice
-__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
- const char *mode, int ndim,
- size_t sizeof_dtype, int contig_flag,
- int dtype_is_object);
-
-/* MemviewSliceInit.proto */
-#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
-#define __Pyx_MEMVIEW_DIRECT 1
-#define __Pyx_MEMVIEW_PTR 2
-#define __Pyx_MEMVIEW_FULL 4
-#define __Pyx_MEMVIEW_CONTIG 8
-#define __Pyx_MEMVIEW_STRIDED 16
-#define __Pyx_MEMVIEW_FOLLOW 32
-#define __Pyx_IS_C_CONTIG 1
-#define __Pyx_IS_F_CONTIG 2
-static int __Pyx_init_memviewslice(
- struct __pyx_memoryview_obj *memview,
- int ndim,
- __Pyx_memviewslice *memviewslice,
- int memview_is_new_reference);
-static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
- __pyx_atomic_int_type *acquisition_count, PyThread_type_lock lock);
-static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
- __pyx_atomic_int_type *acquisition_count, PyThread_type_lock lock);
-#define __pyx_get_slice_count_pointer(memview) (&memview->acquisition_count)
-#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
-#define __PYX_XCLEAR_MEMVIEW(slice, have_gil) __Pyx_XCLEAR_MEMVIEW(slice, have_gil, __LINE__)
-static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
-static CYTHON_INLINE void __Pyx_XCLEAR_MEMVIEW(__Pyx_memviewslice *, int, int);
-
-/* CIntToPy.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
-
-/* CIntFromPy.proto */
-static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
-
-/* CIntToPy.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
-
-/* CIntFromPy.proto */
-static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
-
-/* CIntFromPy.proto */
-static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
-
-/* FormatTypeName.proto */
-#if CYTHON_COMPILING_IN_LIMITED_API
-typedef PyObject *__Pyx_TypeName;
-#define __Pyx_FMT_TYPENAME "%U"
-static __Pyx_TypeName __Pyx_PyType_GetName(PyTypeObject* tp);
-#define __Pyx_DECREF_TypeName(obj) Py_XDECREF(obj)
-#else
-typedef const char *__Pyx_TypeName;
-#define __Pyx_FMT_TYPENAME "%.200s"
-#define __Pyx_PyType_GetName(tp) ((tp)->tp_name)
-#define __Pyx_DECREF_TypeName(obj)
-#endif
-
-/* CheckBinaryVersion.proto */
-static int __Pyx_check_binary_version(void);
-
-/* InitStrings.proto */
-static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
-
-/* #### Code section: module_declarations ### */
-static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
-static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
-static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
-static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
-static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
-static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
-static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
-static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
-static PyObject *__pyx_memoryview__get_base(struct __pyx_memoryview_obj *__pyx_v_self); /* proto*/
-static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
-static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
-static PyObject *__pyx_memoryviewslice__get_base(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto*/
-
-/* Module declarations from "cython.view" */
-
-/* Module declarations from "cython.dataclasses" */
-
-/* Module declarations from "cython" */
-
-/* Module declarations from "monotonic_align.core" */
-static PyObject *__pyx_collections_abc_Sequence = 0;
-static PyObject *generic = 0;
-static PyObject *strided = 0;
-static PyObject *indirect = 0;
-static PyObject *contiguous = 0;
-static PyObject *indirect_contiguous = 0;
-static int __pyx_memoryview_thread_locks_used;
-static PyThread_type_lock __pyx_memoryview_thread_locks[8];
-static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/
-static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/
-static int __pyx_array_allocate_buffer(struct __pyx_array_obj *); /*proto*/
-static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
-static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
-static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
-static PyObject *_unellipsify(PyObject *, int); /*proto*/
-static int assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
-static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
-static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
-static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
-static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
-static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
-static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
-static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
-static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
-static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
-static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
-static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
-static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
-static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
-static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
-static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
-static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
-static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
-static int __pyx_memoryview_err_dim(PyObject *, PyObject *, int); /*proto*/
-static int __pyx_memoryview_err(PyObject *, PyObject *); /*proto*/
-static int __pyx_memoryview_err_no_memory(void); /*proto*/
-static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
-static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
-static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
-static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
-static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
-static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
-static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
-static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
-/* #### Code section: typeinfo ### */
-static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, __PYX_IS_UNSIGNED(int) ? 'U' : 'I', __PYX_IS_UNSIGNED(int), 0 };
-static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 };
-/* #### Code section: before_global_var ### */
-#define __Pyx_MODULE_NAME "monotonic_align.core"
-extern int __pyx_module_is_main_monotonic_align__core;
-int __pyx_module_is_main_monotonic_align__core = 0;
-
-/* Implementation of "monotonic_align.core" */
-/* #### Code section: global_var ### */
-static PyObject *__pyx_builtin_range;
-static PyObject *__pyx_builtin___import__;
-static PyObject *__pyx_builtin_ValueError;
-static PyObject *__pyx_builtin_MemoryError;
-static PyObject *__pyx_builtin_enumerate;
-static PyObject *__pyx_builtin_TypeError;
-static PyObject *__pyx_builtin_AssertionError;
-static PyObject *__pyx_builtin_Ellipsis;
-static PyObject *__pyx_builtin_id;
-static PyObject *__pyx_builtin_IndexError;
-/* #### Code section: string_decls ### */
-static const char __pyx_k_[] = ": ";
-static const char __pyx_k_O[] = "O";
-static const char __pyx_k_c[] = "c";
-static const char __pyx_k__2[] = ".";
-static const char __pyx_k__3[] = "*";
-static const char __pyx_k__6[] = "'";
-static const char __pyx_k__7[] = ")";
-static const char __pyx_k_gc[] = "gc";
-static const char __pyx_k_id[] = "id";
-static const char __pyx_k__23[] = "?";
-static const char __pyx_k_abc[] = "abc";
-static const char __pyx_k_and[] = " and ";
-static const char __pyx_k_got[] = " (got ";
-static const char __pyx_k_new[] = "__new__";
-static const char __pyx_k_obj[] = "obj";
-static const char __pyx_k_sys[] = "sys";
-static const char __pyx_k_base[] = "base";
-static const char __pyx_k_dict[] = "__dict__";
-static const char __pyx_k_main[] = "__main__";
-static const char __pyx_k_mode[] = "mode";
-static const char __pyx_k_name[] = "name";
-static const char __pyx_k_ndim[] = "ndim";
-static const char __pyx_k_pack[] = "pack";
-static const char __pyx_k_size[] = "size";
-static const char __pyx_k_spec[] = "__spec__";
-static const char __pyx_k_step[] = "step";
-static const char __pyx_k_stop[] = "stop";
-static const char __pyx_k_t_xs[] = "t_xs";
-static const char __pyx_k_t_ys[] = "t_ys";
-static const char __pyx_k_test[] = "__test__";
-static const char __pyx_k_ASCII[] = "ASCII";
-static const char __pyx_k_class[] = "__class__";
-static const char __pyx_k_count[] = "count";
-static const char __pyx_k_error[] = "error";
-static const char __pyx_k_flags[] = "flags";
-static const char __pyx_k_index[] = "index";
-static const char __pyx_k_paths[] = "paths";
-static const char __pyx_k_range[] = "range";
-static const char __pyx_k_shape[] = "shape";
-static const char __pyx_k_start[] = "start";
-static const char __pyx_k_enable[] = "enable";
-static const char __pyx_k_encode[] = "encode";
-static const char __pyx_k_format[] = "format";
-static const char __pyx_k_import[] = "__import__";
-static const char __pyx_k_name_2[] = "__name__";
-static const char __pyx_k_pickle[] = "pickle";
-static const char __pyx_k_reduce[] = "__reduce__";
-static const char __pyx_k_struct[] = "struct";
-static const char __pyx_k_unpack[] = "unpack";
-static const char __pyx_k_update[] = "update";
-static const char __pyx_k_values[] = "values";
-static const char __pyx_k_disable[] = "disable";
-static const char __pyx_k_fortran[] = "fortran";
-static const char __pyx_k_memview[] = "memview";
-static const char __pyx_k_Ellipsis[] = "Ellipsis";
-static const char __pyx_k_Sequence[] = "Sequence";
-static const char __pyx_k_core_pyx[] = "core.pyx";
-static const char __pyx_k_getstate[] = "__getstate__";
-static const char __pyx_k_itemsize[] = "itemsize";
-static const char __pyx_k_pyx_type[] = "__pyx_type";
-static const char __pyx_k_register[] = "register";
-static const char __pyx_k_setstate[] = "__setstate__";
-static const char __pyx_k_TypeError[] = "TypeError";
-static const char __pyx_k_enumerate[] = "enumerate";
-static const char __pyx_k_isenabled[] = "isenabled";
-static const char __pyx_k_pyx_state[] = "__pyx_state";
-static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
-static const char __pyx_k_IndexError[] = "IndexError";
-static const char __pyx_k_ValueError[] = "ValueError";
-static const char __pyx_k_pyx_result[] = "__pyx_result";
-static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
-static const char __pyx_k_MemoryError[] = "MemoryError";
-static const char __pyx_k_PickleError[] = "PickleError";
-static const char __pyx_k_collections[] = "collections";
-static const char __pyx_k_initializing[] = "_initializing";
-static const char __pyx_k_is_coroutine[] = "_is_coroutine";
-static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
-static const char __pyx_k_stringsource[] = "";
-static const char __pyx_k_version_info[] = "version_info";
-static const char __pyx_k_class_getitem[] = "__class_getitem__";
-static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
-static const char __pyx_k_AssertionError[] = "AssertionError";
-static const char __pyx_k_maximum_path_c[] = "maximum_path_c";
-static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
-static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
-static const char __pyx_k_collections_abc[] = "collections.abc";
-static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
-static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
-static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
-static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
-static const char __pyx_k_asyncio_coroutines[] = "asyncio.coroutines";
-static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
-static const char __pyx_k_strided_and_direct[] = "";
-static const char __pyx_k_monotonic_align_core[] = "monotonic_align.core";
-static const char __pyx_k_strided_and_indirect[] = "";
-static const char __pyx_k_Invalid_shape_in_axis[] = "Invalid shape in axis ";
-static const char __pyx_k_contiguous_and_direct[] = "";
-static const char __pyx_k_Cannot_index_with_type[] = "Cannot index with type '";
-static const char __pyx_k_MemoryView_of_r_object[] = "";
-static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "";
-static const char __pyx_k_contiguous_and_indirect[] = "";
-static const char __pyx_k_Dimension_d_is_not_direct[] = "Dimension %d is not direct";
-static const char __pyx_k_Index_out_of_bounds_axis_d[] = "Index out of bounds (axis %d)";
-static const char __pyx_k_Step_may_not_be_zero_axis_d[] = "Step may not be zero (axis %d)";
-static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
-static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
-static const char __pyx_k_strided_and_direct_or_indirect[] = "";
-static const char __pyx_k_All_dimensions_preceding_dimensi[] = "All dimensions preceding dimension %d must be indexed and not sliced";
-static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
-static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
-static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
-static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
-static const char __pyx_k_Cannot_transpose_memoryview_with[] = "Cannot transpose memoryview with indirect dimensions";
-static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
-static const char __pyx_k_Incompatible_checksums_0x_x_vs_0[] = "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))";
-static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
-static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got ";
-static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis ";
-static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
-static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension ";
-static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
-static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
-/* #### Code section: decls ### */
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
-static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
-static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
-static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
-static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
-static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
-static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
-static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
-static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
-static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */
-static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-/* #### Code section: late_includes ### */
-/* #### Code section: module_state ### */
-typedef struct {
- PyObject *__pyx_d;
- PyObject *__pyx_b;
- PyObject *__pyx_cython_runtime;
- PyObject *__pyx_empty_tuple;
- PyObject *__pyx_empty_bytes;
- PyObject *__pyx_empty_unicode;
- #ifdef __Pyx_CyFunction_USED
- PyTypeObject *__pyx_CyFunctionType;
- #endif
- #ifdef __Pyx_FusedFunction_USED
- PyTypeObject *__pyx_FusedFunctionType;
- #endif
- #ifdef __Pyx_Generator_USED
- PyTypeObject *__pyx_GeneratorType;
- #endif
- #ifdef __Pyx_IterableCoroutine_USED
- PyTypeObject *__pyx_IterableCoroutineType;
- #endif
- #ifdef __Pyx_Coroutine_USED
- PyTypeObject *__pyx_CoroutineAwaitType;
- #endif
- #ifdef __Pyx_Coroutine_USED
- PyTypeObject *__pyx_CoroutineType;
- #endif
- #if CYTHON_USE_MODULE_STATE
- #endif
- #if CYTHON_USE_MODULE_STATE
- #endif
- #if CYTHON_USE_MODULE_STATE
- #endif
- #if CYTHON_USE_MODULE_STATE
- PyObject *__pyx_type___pyx_array;
- PyObject *__pyx_type___pyx_MemviewEnum;
- PyObject *__pyx_type___pyx_memoryview;
- PyObject *__pyx_type___pyx_memoryviewslice;
- #endif
- PyTypeObject *__pyx_array_type;
- PyTypeObject *__pyx_MemviewEnum_type;
- PyTypeObject *__pyx_memoryview_type;
- PyTypeObject *__pyx_memoryviewslice_type;
- PyObject *__pyx_kp_u_;
- PyObject *__pyx_n_s_ASCII;
- PyObject *__pyx_kp_s_All_dimensions_preceding_dimensi;
- PyObject *__pyx_n_s_AssertionError;
- PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
- PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
- PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
- PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
- PyObject *__pyx_kp_u_Cannot_index_with_type;
- PyObject *__pyx_kp_s_Cannot_transpose_memoryview_with;
- PyObject *__pyx_kp_s_Dimension_d_is_not_direct;
- PyObject *__pyx_n_s_Ellipsis;
- PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
- PyObject *__pyx_kp_s_Incompatible_checksums_0x_x_vs_0;
- PyObject *__pyx_n_s_IndexError;
- PyObject *__pyx_kp_s_Index_out_of_bounds_axis_d;
- PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
- PyObject *__pyx_kp_u_Invalid_mode_expected_c_or_fortr;
- PyObject *__pyx_kp_u_Invalid_shape_in_axis;
- PyObject *__pyx_n_s_MemoryError;
- PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
- PyObject *__pyx_kp_s_MemoryView_of_r_object;
- PyObject *__pyx_n_b_O;
- PyObject *__pyx_kp_u_Out_of_bounds_on_buffer_access_a;
- PyObject *__pyx_n_s_PickleError;
- PyObject *__pyx_n_s_Sequence;
- PyObject *__pyx_kp_s_Step_may_not_be_zero_axis_d;
- PyObject *__pyx_n_s_TypeError;
- PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
- PyObject *__pyx_n_s_ValueError;
- PyObject *__pyx_n_s_View_MemoryView;
- PyObject *__pyx_kp_u__2;
- PyObject *__pyx_n_s__23;
- PyObject *__pyx_n_s__3;
- PyObject *__pyx_kp_u__6;
- PyObject *__pyx_kp_u__7;
- PyObject *__pyx_n_s_abc;
- PyObject *__pyx_n_s_allocate_buffer;
- PyObject *__pyx_kp_u_and;
- PyObject *__pyx_n_s_asyncio_coroutines;
- PyObject *__pyx_n_s_base;
- PyObject *__pyx_n_s_c;
- PyObject *__pyx_n_u_c;
- PyObject *__pyx_n_s_class;
- PyObject *__pyx_n_s_class_getitem;
- PyObject *__pyx_n_s_cline_in_traceback;
- PyObject *__pyx_n_s_collections;
- PyObject *__pyx_kp_s_collections_abc;
- PyObject *__pyx_kp_s_contiguous_and_direct;
- PyObject *__pyx_kp_s_contiguous_and_indirect;
- PyObject *__pyx_kp_s_core_pyx;
- PyObject *__pyx_n_s_count;
- PyObject *__pyx_n_s_dict;
- PyObject *__pyx_kp_u_disable;
- PyObject *__pyx_n_s_dtype_is_object;
- PyObject *__pyx_kp_u_enable;
- PyObject *__pyx_n_s_encode;
- PyObject *__pyx_n_s_enumerate;
- PyObject *__pyx_n_s_error;
- PyObject *__pyx_n_s_flags;
- PyObject *__pyx_n_s_format;
- PyObject *__pyx_n_s_fortran;
- PyObject *__pyx_n_u_fortran;
- PyObject *__pyx_kp_u_gc;
- PyObject *__pyx_n_s_getstate;
- PyObject *__pyx_kp_u_got;
- PyObject *__pyx_kp_u_got_differing_extents_in_dimensi;
- PyObject *__pyx_n_s_id;
- PyObject *__pyx_n_s_import;
- PyObject *__pyx_n_s_index;
- PyObject *__pyx_n_s_initializing;
- PyObject *__pyx_n_s_is_coroutine;
- PyObject *__pyx_kp_u_isenabled;
- PyObject *__pyx_n_s_itemsize;
- PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
- PyObject *__pyx_n_s_main;
- PyObject *__pyx_n_s_maximum_path_c;
- PyObject *__pyx_n_s_memview;
- PyObject *__pyx_n_s_mode;
- PyObject *__pyx_n_s_monotonic_align_core;
- PyObject *__pyx_n_s_name;
- PyObject *__pyx_n_s_name_2;
- PyObject *__pyx_n_s_ndim;
- PyObject *__pyx_n_s_new;
- PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
- PyObject *__pyx_n_s_obj;
- PyObject *__pyx_n_s_pack;
- PyObject *__pyx_n_s_paths;
- PyObject *__pyx_n_s_pickle;
- PyObject *__pyx_n_s_pyx_PickleError;
- PyObject *__pyx_n_s_pyx_checksum;
- PyObject *__pyx_n_s_pyx_result;
- PyObject *__pyx_n_s_pyx_state;
- PyObject *__pyx_n_s_pyx_type;
- PyObject *__pyx_n_s_pyx_unpickle_Enum;
- PyObject *__pyx_n_s_pyx_vtable;
- PyObject *__pyx_n_s_range;
- PyObject *__pyx_n_s_reduce;
- PyObject *__pyx_n_s_reduce_cython;
- PyObject *__pyx_n_s_reduce_ex;
- PyObject *__pyx_n_s_register;
- PyObject *__pyx_n_s_setstate;
- PyObject *__pyx_n_s_setstate_cython;
- PyObject *__pyx_n_s_shape;
- PyObject *__pyx_n_s_size;
- PyObject *__pyx_n_s_spec;
- PyObject *__pyx_n_s_start;
- PyObject *__pyx_n_s_step;
- PyObject *__pyx_n_s_stop;
- PyObject *__pyx_kp_s_strided_and_direct;
- PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
- PyObject *__pyx_kp_s_strided_and_indirect;
- PyObject *__pyx_kp_s_stringsource;
- PyObject *__pyx_n_s_struct;
- PyObject *__pyx_n_s_sys;
- PyObject *__pyx_n_s_t_xs;
- PyObject *__pyx_n_s_t_ys;
- PyObject *__pyx_n_s_test;
- PyObject *__pyx_kp_s_unable_to_allocate_array_data;
- PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
- PyObject *__pyx_n_s_unpack;
- PyObject *__pyx_n_s_update;
- PyObject *__pyx_n_s_values;
- PyObject *__pyx_n_s_version_info;
- PyObject *__pyx_int_0;
- PyObject *__pyx_int_1;
- PyObject *__pyx_int_3;
- PyObject *__pyx_int_112105877;
- PyObject *__pyx_int_136983863;
- PyObject *__pyx_int_184977713;
- PyObject *__pyx_int_neg_1;
- float __pyx_k__9;
- PyObject *__pyx_slice__5;
- PyObject *__pyx_tuple__4;
- PyObject *__pyx_tuple__8;
- PyObject *__pyx_tuple__10;
- PyObject *__pyx_tuple__11;
- PyObject *__pyx_tuple__12;
- PyObject *__pyx_tuple__13;
- PyObject *__pyx_tuple__14;
- PyObject *__pyx_tuple__15;
- PyObject *__pyx_tuple__16;
- PyObject *__pyx_tuple__17;
- PyObject *__pyx_tuple__18;
- PyObject *__pyx_tuple__19;
- PyObject *__pyx_tuple__21;
- PyObject *__pyx_codeobj__20;
- PyObject *__pyx_codeobj__22;
-} __pyx_mstate;
-
-#if CYTHON_USE_MODULE_STATE
-#ifdef __cplusplus
-namespace {
- extern struct PyModuleDef __pyx_moduledef;
-} /* anonymous namespace */
-#else
-static struct PyModuleDef __pyx_moduledef;
-#endif
-
-#define __pyx_mstate(o) ((__pyx_mstate *)__Pyx_PyModule_GetState(o))
-
-#define __pyx_mstate_global (__pyx_mstate(PyState_FindModule(&__pyx_moduledef)))
-
-#define __pyx_m (PyState_FindModule(&__pyx_moduledef))
-#else
-static __pyx_mstate __pyx_mstate_global_static =
-#ifdef __cplusplus
- {};
-#else
- {0};
-#endif
-static __pyx_mstate *__pyx_mstate_global = &__pyx_mstate_global_static;
-#endif
-/* #### Code section: module_state_clear ### */
-#if CYTHON_USE_MODULE_STATE
-static int __pyx_m_clear(PyObject *m) {
- __pyx_mstate *clear_module_state = __pyx_mstate(m);
- if (!clear_module_state) return 0;
- Py_CLEAR(clear_module_state->__pyx_d);
- Py_CLEAR(clear_module_state->__pyx_b);
- Py_CLEAR(clear_module_state->__pyx_cython_runtime);
- Py_CLEAR(clear_module_state->__pyx_empty_tuple);
- Py_CLEAR(clear_module_state->__pyx_empty_bytes);
- Py_CLEAR(clear_module_state->__pyx_empty_unicode);
- #ifdef __Pyx_CyFunction_USED
- Py_CLEAR(clear_module_state->__pyx_CyFunctionType);
- #endif
- #ifdef __Pyx_FusedFunction_USED
- Py_CLEAR(clear_module_state->__pyx_FusedFunctionType);
- #endif
- Py_CLEAR(clear_module_state->__pyx_array_type);
- Py_CLEAR(clear_module_state->__pyx_type___pyx_array);
- Py_CLEAR(clear_module_state->__pyx_MemviewEnum_type);
- Py_CLEAR(clear_module_state->__pyx_type___pyx_MemviewEnum);
- Py_CLEAR(clear_module_state->__pyx_memoryview_type);
- Py_CLEAR(clear_module_state->__pyx_type___pyx_memoryview);
- Py_CLEAR(clear_module_state->__pyx_memoryviewslice_type);
- Py_CLEAR(clear_module_state->__pyx_type___pyx_memoryviewslice);
- Py_CLEAR(clear_module_state->__pyx_kp_u_);
- Py_CLEAR(clear_module_state->__pyx_n_s_ASCII);
- Py_CLEAR(clear_module_state->__pyx_kp_s_All_dimensions_preceding_dimensi);
- Py_CLEAR(clear_module_state->__pyx_n_s_AssertionError);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Buffer_view_does_not_expose_stri);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Can_only_create_a_buffer_that_is);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Cannot_assign_to_read_only_memor);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Cannot_create_writable_memory_vi);
- Py_CLEAR(clear_module_state->__pyx_kp_u_Cannot_index_with_type);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Cannot_transpose_memoryview_with);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Dimension_d_is_not_direct);
- Py_CLEAR(clear_module_state->__pyx_n_s_Ellipsis);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Empty_shape_tuple_for_cython_arr);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Incompatible_checksums_0x_x_vs_0);
- Py_CLEAR(clear_module_state->__pyx_n_s_IndexError);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Index_out_of_bounds_axis_d);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Indirect_dimensions_not_supporte);
- Py_CLEAR(clear_module_state->__pyx_kp_u_Invalid_mode_expected_c_or_fortr);
- Py_CLEAR(clear_module_state->__pyx_kp_u_Invalid_shape_in_axis);
- Py_CLEAR(clear_module_state->__pyx_n_s_MemoryError);
- Py_CLEAR(clear_module_state->__pyx_kp_s_MemoryView_of_r_at_0x_x);
- Py_CLEAR(clear_module_state->__pyx_kp_s_MemoryView_of_r_object);
- Py_CLEAR(clear_module_state->__pyx_n_b_O);
- Py_CLEAR(clear_module_state->__pyx_kp_u_Out_of_bounds_on_buffer_access_a);
- Py_CLEAR(clear_module_state->__pyx_n_s_PickleError);
- Py_CLEAR(clear_module_state->__pyx_n_s_Sequence);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Step_may_not_be_zero_axis_d);
- Py_CLEAR(clear_module_state->__pyx_n_s_TypeError);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Unable_to_convert_item_to_object);
- Py_CLEAR(clear_module_state->__pyx_n_s_ValueError);
- Py_CLEAR(clear_module_state->__pyx_n_s_View_MemoryView);
- Py_CLEAR(clear_module_state->__pyx_kp_u__2);
- Py_CLEAR(clear_module_state->__pyx_n_s__23);
- Py_CLEAR(clear_module_state->__pyx_n_s__3);
- Py_CLEAR(clear_module_state->__pyx_kp_u__6);
- Py_CLEAR(clear_module_state->__pyx_kp_u__7);
- Py_CLEAR(clear_module_state->__pyx_n_s_abc);
- Py_CLEAR(clear_module_state->__pyx_n_s_allocate_buffer);
- Py_CLEAR(clear_module_state->__pyx_kp_u_and);
- Py_CLEAR(clear_module_state->__pyx_n_s_asyncio_coroutines);
- Py_CLEAR(clear_module_state->__pyx_n_s_base);
- Py_CLEAR(clear_module_state->__pyx_n_s_c);
- Py_CLEAR(clear_module_state->__pyx_n_u_c);
- Py_CLEAR(clear_module_state->__pyx_n_s_class);
- Py_CLEAR(clear_module_state->__pyx_n_s_class_getitem);
- Py_CLEAR(clear_module_state->__pyx_n_s_cline_in_traceback);
- Py_CLEAR(clear_module_state->__pyx_n_s_collections);
- Py_CLEAR(clear_module_state->__pyx_kp_s_collections_abc);
- Py_CLEAR(clear_module_state->__pyx_kp_s_contiguous_and_direct);
- Py_CLEAR(clear_module_state->__pyx_kp_s_contiguous_and_indirect);
- Py_CLEAR(clear_module_state->__pyx_kp_s_core_pyx);
- Py_CLEAR(clear_module_state->__pyx_n_s_count);
- Py_CLEAR(clear_module_state->__pyx_n_s_dict);
- Py_CLEAR(clear_module_state->__pyx_kp_u_disable);
- Py_CLEAR(clear_module_state->__pyx_n_s_dtype_is_object);
- Py_CLEAR(clear_module_state->__pyx_kp_u_enable);
- Py_CLEAR(clear_module_state->__pyx_n_s_encode);
- Py_CLEAR(clear_module_state->__pyx_n_s_enumerate);
- Py_CLEAR(clear_module_state->__pyx_n_s_error);
- Py_CLEAR(clear_module_state->__pyx_n_s_flags);
- Py_CLEAR(clear_module_state->__pyx_n_s_format);
- Py_CLEAR(clear_module_state->__pyx_n_s_fortran);
- Py_CLEAR(clear_module_state->__pyx_n_u_fortran);
- Py_CLEAR(clear_module_state->__pyx_kp_u_gc);
- Py_CLEAR(clear_module_state->__pyx_n_s_getstate);
- Py_CLEAR(clear_module_state->__pyx_kp_u_got);
- Py_CLEAR(clear_module_state->__pyx_kp_u_got_differing_extents_in_dimensi);
- Py_CLEAR(clear_module_state->__pyx_n_s_id);
- Py_CLEAR(clear_module_state->__pyx_n_s_import);
- Py_CLEAR(clear_module_state->__pyx_n_s_index);
- Py_CLEAR(clear_module_state->__pyx_n_s_initializing);
- Py_CLEAR(clear_module_state->__pyx_n_s_is_coroutine);
- Py_CLEAR(clear_module_state->__pyx_kp_u_isenabled);
- Py_CLEAR(clear_module_state->__pyx_n_s_itemsize);
- Py_CLEAR(clear_module_state->__pyx_kp_s_itemsize_0_for_cython_array);
- Py_CLEAR(clear_module_state->__pyx_n_s_main);
- Py_CLEAR(clear_module_state->__pyx_n_s_maximum_path_c);
- Py_CLEAR(clear_module_state->__pyx_n_s_memview);
- Py_CLEAR(clear_module_state->__pyx_n_s_mode);
- Py_CLEAR(clear_module_state->__pyx_n_s_monotonic_align_core);
- Py_CLEAR(clear_module_state->__pyx_n_s_name);
- Py_CLEAR(clear_module_state->__pyx_n_s_name_2);
- Py_CLEAR(clear_module_state->__pyx_n_s_ndim);
- Py_CLEAR(clear_module_state->__pyx_n_s_new);
- Py_CLEAR(clear_module_state->__pyx_kp_s_no_default___reduce___due_to_non);
- Py_CLEAR(clear_module_state->__pyx_n_s_obj);
- Py_CLEAR(clear_module_state->__pyx_n_s_pack);
- Py_CLEAR(clear_module_state->__pyx_n_s_paths);
- Py_CLEAR(clear_module_state->__pyx_n_s_pickle);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_PickleError);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_checksum);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_result);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_state);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_type);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_unpickle_Enum);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_vtable);
- Py_CLEAR(clear_module_state->__pyx_n_s_range);
- Py_CLEAR(clear_module_state->__pyx_n_s_reduce);
- Py_CLEAR(clear_module_state->__pyx_n_s_reduce_cython);
- Py_CLEAR(clear_module_state->__pyx_n_s_reduce_ex);
- Py_CLEAR(clear_module_state->__pyx_n_s_register);
- Py_CLEAR(clear_module_state->__pyx_n_s_setstate);
- Py_CLEAR(clear_module_state->__pyx_n_s_setstate_cython);
- Py_CLEAR(clear_module_state->__pyx_n_s_shape);
- Py_CLEAR(clear_module_state->__pyx_n_s_size);
- Py_CLEAR(clear_module_state->__pyx_n_s_spec);
- Py_CLEAR(clear_module_state->__pyx_n_s_start);
- Py_CLEAR(clear_module_state->__pyx_n_s_step);
- Py_CLEAR(clear_module_state->__pyx_n_s_stop);
- Py_CLEAR(clear_module_state->__pyx_kp_s_strided_and_direct);
- Py_CLEAR(clear_module_state->__pyx_kp_s_strided_and_direct_or_indirect);
- Py_CLEAR(clear_module_state->__pyx_kp_s_strided_and_indirect);
- Py_CLEAR(clear_module_state->__pyx_kp_s_stringsource);
- Py_CLEAR(clear_module_state->__pyx_n_s_struct);
- Py_CLEAR(clear_module_state->__pyx_n_s_sys);
- Py_CLEAR(clear_module_state->__pyx_n_s_t_xs);
- Py_CLEAR(clear_module_state->__pyx_n_s_t_ys);
- Py_CLEAR(clear_module_state->__pyx_n_s_test);
- Py_CLEAR(clear_module_state->__pyx_kp_s_unable_to_allocate_array_data);
- Py_CLEAR(clear_module_state->__pyx_kp_s_unable_to_allocate_shape_and_str);
- Py_CLEAR(clear_module_state->__pyx_n_s_unpack);
- Py_CLEAR(clear_module_state->__pyx_n_s_update);
- Py_CLEAR(clear_module_state->__pyx_n_s_values);
- Py_CLEAR(clear_module_state->__pyx_n_s_version_info);
- Py_CLEAR(clear_module_state->__pyx_int_0);
- Py_CLEAR(clear_module_state->__pyx_int_1);
- Py_CLEAR(clear_module_state->__pyx_int_3);
- Py_CLEAR(clear_module_state->__pyx_int_112105877);
- Py_CLEAR(clear_module_state->__pyx_int_136983863);
- Py_CLEAR(clear_module_state->__pyx_int_184977713);
- Py_CLEAR(clear_module_state->__pyx_int_neg_1);
- Py_CLEAR(clear_module_state->__pyx_slice__5);
- Py_CLEAR(clear_module_state->__pyx_tuple__4);
- Py_CLEAR(clear_module_state->__pyx_tuple__8);
- Py_CLEAR(clear_module_state->__pyx_tuple__10);
- Py_CLEAR(clear_module_state->__pyx_tuple__11);
- Py_CLEAR(clear_module_state->__pyx_tuple__12);
- Py_CLEAR(clear_module_state->__pyx_tuple__13);
- Py_CLEAR(clear_module_state->__pyx_tuple__14);
- Py_CLEAR(clear_module_state->__pyx_tuple__15);
- Py_CLEAR(clear_module_state->__pyx_tuple__16);
- Py_CLEAR(clear_module_state->__pyx_tuple__17);
- Py_CLEAR(clear_module_state->__pyx_tuple__18);
- Py_CLEAR(clear_module_state->__pyx_tuple__19);
- Py_CLEAR(clear_module_state->__pyx_tuple__21);
- Py_CLEAR(clear_module_state->__pyx_codeobj__20);
- Py_CLEAR(clear_module_state->__pyx_codeobj__22);
- return 0;
-}
-#endif
-/* #### Code section: module_state_traverse ### */
-#if CYTHON_USE_MODULE_STATE
-static int __pyx_m_traverse(PyObject *m, visitproc visit, void *arg) {
- __pyx_mstate *traverse_module_state = __pyx_mstate(m);
- if (!traverse_module_state) return 0;
- Py_VISIT(traverse_module_state->__pyx_d);
- Py_VISIT(traverse_module_state->__pyx_b);
- Py_VISIT(traverse_module_state->__pyx_cython_runtime);
- Py_VISIT(traverse_module_state->__pyx_empty_tuple);
- Py_VISIT(traverse_module_state->__pyx_empty_bytes);
- Py_VISIT(traverse_module_state->__pyx_empty_unicode);
- #ifdef __Pyx_CyFunction_USED
- Py_VISIT(traverse_module_state->__pyx_CyFunctionType);
- #endif
- #ifdef __Pyx_FusedFunction_USED
- Py_VISIT(traverse_module_state->__pyx_FusedFunctionType);
- #endif
- Py_VISIT(traverse_module_state->__pyx_array_type);
- Py_VISIT(traverse_module_state->__pyx_type___pyx_array);
- Py_VISIT(traverse_module_state->__pyx_MemviewEnum_type);
- Py_VISIT(traverse_module_state->__pyx_type___pyx_MemviewEnum);
- Py_VISIT(traverse_module_state->__pyx_memoryview_type);
- Py_VISIT(traverse_module_state->__pyx_type___pyx_memoryview);
- Py_VISIT(traverse_module_state->__pyx_memoryviewslice_type);
- Py_VISIT(traverse_module_state->__pyx_type___pyx_memoryviewslice);
- Py_VISIT(traverse_module_state->__pyx_kp_u_);
- Py_VISIT(traverse_module_state->__pyx_n_s_ASCII);
- Py_VISIT(traverse_module_state->__pyx_kp_s_All_dimensions_preceding_dimensi);
- Py_VISIT(traverse_module_state->__pyx_n_s_AssertionError);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Buffer_view_does_not_expose_stri);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Can_only_create_a_buffer_that_is);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Cannot_assign_to_read_only_memor);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Cannot_create_writable_memory_vi);
- Py_VISIT(traverse_module_state->__pyx_kp_u_Cannot_index_with_type);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Cannot_transpose_memoryview_with);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Dimension_d_is_not_direct);
- Py_VISIT(traverse_module_state->__pyx_n_s_Ellipsis);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Empty_shape_tuple_for_cython_arr);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Incompatible_checksums_0x_x_vs_0);
- Py_VISIT(traverse_module_state->__pyx_n_s_IndexError);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Index_out_of_bounds_axis_d);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Indirect_dimensions_not_supporte);
- Py_VISIT(traverse_module_state->__pyx_kp_u_Invalid_mode_expected_c_or_fortr);
- Py_VISIT(traverse_module_state->__pyx_kp_u_Invalid_shape_in_axis);
- Py_VISIT(traverse_module_state->__pyx_n_s_MemoryError);
- Py_VISIT(traverse_module_state->__pyx_kp_s_MemoryView_of_r_at_0x_x);
- Py_VISIT(traverse_module_state->__pyx_kp_s_MemoryView_of_r_object);
- Py_VISIT(traverse_module_state->__pyx_n_b_O);
- Py_VISIT(traverse_module_state->__pyx_kp_u_Out_of_bounds_on_buffer_access_a);
- Py_VISIT(traverse_module_state->__pyx_n_s_PickleError);
- Py_VISIT(traverse_module_state->__pyx_n_s_Sequence);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Step_may_not_be_zero_axis_d);
- Py_VISIT(traverse_module_state->__pyx_n_s_TypeError);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Unable_to_convert_item_to_object);
- Py_VISIT(traverse_module_state->__pyx_n_s_ValueError);
- Py_VISIT(traverse_module_state->__pyx_n_s_View_MemoryView);
- Py_VISIT(traverse_module_state->__pyx_kp_u__2);
- Py_VISIT(traverse_module_state->__pyx_n_s__23);
- Py_VISIT(traverse_module_state->__pyx_n_s__3);
- Py_VISIT(traverse_module_state->__pyx_kp_u__6);
- Py_VISIT(traverse_module_state->__pyx_kp_u__7);
- Py_VISIT(traverse_module_state->__pyx_n_s_abc);
- Py_VISIT(traverse_module_state->__pyx_n_s_allocate_buffer);
- Py_VISIT(traverse_module_state->__pyx_kp_u_and);
- Py_VISIT(traverse_module_state->__pyx_n_s_asyncio_coroutines);
- Py_VISIT(traverse_module_state->__pyx_n_s_base);
- Py_VISIT(traverse_module_state->__pyx_n_s_c);
- Py_VISIT(traverse_module_state->__pyx_n_u_c);
- Py_VISIT(traverse_module_state->__pyx_n_s_class);
- Py_VISIT(traverse_module_state->__pyx_n_s_class_getitem);
- Py_VISIT(traverse_module_state->__pyx_n_s_cline_in_traceback);
- Py_VISIT(traverse_module_state->__pyx_n_s_collections);
- Py_VISIT(traverse_module_state->__pyx_kp_s_collections_abc);
- Py_VISIT(traverse_module_state->__pyx_kp_s_contiguous_and_direct);
- Py_VISIT(traverse_module_state->__pyx_kp_s_contiguous_and_indirect);
- Py_VISIT(traverse_module_state->__pyx_kp_s_core_pyx);
- Py_VISIT(traverse_module_state->__pyx_n_s_count);
- Py_VISIT(traverse_module_state->__pyx_n_s_dict);
- Py_VISIT(traverse_module_state->__pyx_kp_u_disable);
- Py_VISIT(traverse_module_state->__pyx_n_s_dtype_is_object);
- Py_VISIT(traverse_module_state->__pyx_kp_u_enable);
- Py_VISIT(traverse_module_state->__pyx_n_s_encode);
- Py_VISIT(traverse_module_state->__pyx_n_s_enumerate);
- Py_VISIT(traverse_module_state->__pyx_n_s_error);
- Py_VISIT(traverse_module_state->__pyx_n_s_flags);
- Py_VISIT(traverse_module_state->__pyx_n_s_format);
- Py_VISIT(traverse_module_state->__pyx_n_s_fortran);
- Py_VISIT(traverse_module_state->__pyx_n_u_fortran);
- Py_VISIT(traverse_module_state->__pyx_kp_u_gc);
- Py_VISIT(traverse_module_state->__pyx_n_s_getstate);
- Py_VISIT(traverse_module_state->__pyx_kp_u_got);
- Py_VISIT(traverse_module_state->__pyx_kp_u_got_differing_extents_in_dimensi);
- Py_VISIT(traverse_module_state->__pyx_n_s_id);
- Py_VISIT(traverse_module_state->__pyx_n_s_import);
- Py_VISIT(traverse_module_state->__pyx_n_s_index);
- Py_VISIT(traverse_module_state->__pyx_n_s_initializing);
- Py_VISIT(traverse_module_state->__pyx_n_s_is_coroutine);
- Py_VISIT(traverse_module_state->__pyx_kp_u_isenabled);
- Py_VISIT(traverse_module_state->__pyx_n_s_itemsize);
- Py_VISIT(traverse_module_state->__pyx_kp_s_itemsize_0_for_cython_array);
- Py_VISIT(traverse_module_state->__pyx_n_s_main);
- Py_VISIT(traverse_module_state->__pyx_n_s_maximum_path_c);
- Py_VISIT(traverse_module_state->__pyx_n_s_memview);
- Py_VISIT(traverse_module_state->__pyx_n_s_mode);
- Py_VISIT(traverse_module_state->__pyx_n_s_monotonic_align_core);
- Py_VISIT(traverse_module_state->__pyx_n_s_name);
- Py_VISIT(traverse_module_state->__pyx_n_s_name_2);
- Py_VISIT(traverse_module_state->__pyx_n_s_ndim);
- Py_VISIT(traverse_module_state->__pyx_n_s_new);
- Py_VISIT(traverse_module_state->__pyx_kp_s_no_default___reduce___due_to_non);
- Py_VISIT(traverse_module_state->__pyx_n_s_obj);
- Py_VISIT(traverse_module_state->__pyx_n_s_pack);
- Py_VISIT(traverse_module_state->__pyx_n_s_paths);
- Py_VISIT(traverse_module_state->__pyx_n_s_pickle);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_PickleError);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_checksum);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_result);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_state);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_type);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_unpickle_Enum);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_vtable);
- Py_VISIT(traverse_module_state->__pyx_n_s_range);
- Py_VISIT(traverse_module_state->__pyx_n_s_reduce);
- Py_VISIT(traverse_module_state->__pyx_n_s_reduce_cython);
- Py_VISIT(traverse_module_state->__pyx_n_s_reduce_ex);
- Py_VISIT(traverse_module_state->__pyx_n_s_register);
- Py_VISIT(traverse_module_state->__pyx_n_s_setstate);
- Py_VISIT(traverse_module_state->__pyx_n_s_setstate_cython);
- Py_VISIT(traverse_module_state->__pyx_n_s_shape);
- Py_VISIT(traverse_module_state->__pyx_n_s_size);
- Py_VISIT(traverse_module_state->__pyx_n_s_spec);
- Py_VISIT(traverse_module_state->__pyx_n_s_start);
- Py_VISIT(traverse_module_state->__pyx_n_s_step);
- Py_VISIT(traverse_module_state->__pyx_n_s_stop);
- Py_VISIT(traverse_module_state->__pyx_kp_s_strided_and_direct);
- Py_VISIT(traverse_module_state->__pyx_kp_s_strided_and_direct_or_indirect);
- Py_VISIT(traverse_module_state->__pyx_kp_s_strided_and_indirect);
- Py_VISIT(traverse_module_state->__pyx_kp_s_stringsource);
- Py_VISIT(traverse_module_state->__pyx_n_s_struct);
- Py_VISIT(traverse_module_state->__pyx_n_s_sys);
- Py_VISIT(traverse_module_state->__pyx_n_s_t_xs);
- Py_VISIT(traverse_module_state->__pyx_n_s_t_ys);
- Py_VISIT(traverse_module_state->__pyx_n_s_test);
- Py_VISIT(traverse_module_state->__pyx_kp_s_unable_to_allocate_array_data);
- Py_VISIT(traverse_module_state->__pyx_kp_s_unable_to_allocate_shape_and_str);
- Py_VISIT(traverse_module_state->__pyx_n_s_unpack);
- Py_VISIT(traverse_module_state->__pyx_n_s_update);
- Py_VISIT(traverse_module_state->__pyx_n_s_values);
- Py_VISIT(traverse_module_state->__pyx_n_s_version_info);
- Py_VISIT(traverse_module_state->__pyx_int_0);
- Py_VISIT(traverse_module_state->__pyx_int_1);
- Py_VISIT(traverse_module_state->__pyx_int_3);
- Py_VISIT(traverse_module_state->__pyx_int_112105877);
- Py_VISIT(traverse_module_state->__pyx_int_136983863);
- Py_VISIT(traverse_module_state->__pyx_int_184977713);
- Py_VISIT(traverse_module_state->__pyx_int_neg_1);
- Py_VISIT(traverse_module_state->__pyx_slice__5);
- Py_VISIT(traverse_module_state->__pyx_tuple__4);
- Py_VISIT(traverse_module_state->__pyx_tuple__8);
- Py_VISIT(traverse_module_state->__pyx_tuple__10);
- Py_VISIT(traverse_module_state->__pyx_tuple__11);
- Py_VISIT(traverse_module_state->__pyx_tuple__12);
- Py_VISIT(traverse_module_state->__pyx_tuple__13);
- Py_VISIT(traverse_module_state->__pyx_tuple__14);
- Py_VISIT(traverse_module_state->__pyx_tuple__15);
- Py_VISIT(traverse_module_state->__pyx_tuple__16);
- Py_VISIT(traverse_module_state->__pyx_tuple__17);
- Py_VISIT(traverse_module_state->__pyx_tuple__18);
- Py_VISIT(traverse_module_state->__pyx_tuple__19);
- Py_VISIT(traverse_module_state->__pyx_tuple__21);
- Py_VISIT(traverse_module_state->__pyx_codeobj__20);
- Py_VISIT(traverse_module_state->__pyx_codeobj__22);
- return 0;
-}
-#endif
-/* #### Code section: module_state_defines ### */
-#define __pyx_d __pyx_mstate_global->__pyx_d
-#define __pyx_b __pyx_mstate_global->__pyx_b
-#define __pyx_cython_runtime __pyx_mstate_global->__pyx_cython_runtime
-#define __pyx_empty_tuple __pyx_mstate_global->__pyx_empty_tuple
-#define __pyx_empty_bytes __pyx_mstate_global->__pyx_empty_bytes
-#define __pyx_empty_unicode __pyx_mstate_global->__pyx_empty_unicode
-#ifdef __Pyx_CyFunction_USED
-#define __pyx_CyFunctionType __pyx_mstate_global->__pyx_CyFunctionType
-#endif
-#ifdef __Pyx_FusedFunction_USED
-#define __pyx_FusedFunctionType __pyx_mstate_global->__pyx_FusedFunctionType
-#endif
-#ifdef __Pyx_Generator_USED
-#define __pyx_GeneratorType __pyx_mstate_global->__pyx_GeneratorType
-#endif
-#ifdef __Pyx_IterableCoroutine_USED
-#define __pyx_IterableCoroutineType __pyx_mstate_global->__pyx_IterableCoroutineType
-#endif
-#ifdef __Pyx_Coroutine_USED
-#define __pyx_CoroutineAwaitType __pyx_mstate_global->__pyx_CoroutineAwaitType
-#endif
-#ifdef __Pyx_Coroutine_USED
-#define __pyx_CoroutineType __pyx_mstate_global->__pyx_CoroutineType
-#endif
-#if CYTHON_USE_MODULE_STATE
-#endif
-#if CYTHON_USE_MODULE_STATE
-#endif
-#if CYTHON_USE_MODULE_STATE
-#endif
-#if CYTHON_USE_MODULE_STATE
-#define __pyx_type___pyx_array __pyx_mstate_global->__pyx_type___pyx_array
-#define __pyx_type___pyx_MemviewEnum __pyx_mstate_global->__pyx_type___pyx_MemviewEnum
-#define __pyx_type___pyx_memoryview __pyx_mstate_global->__pyx_type___pyx_memoryview
-#define __pyx_type___pyx_memoryviewslice __pyx_mstate_global->__pyx_type___pyx_memoryviewslice
-#endif
-#define __pyx_array_type __pyx_mstate_global->__pyx_array_type
-#define __pyx_MemviewEnum_type __pyx_mstate_global->__pyx_MemviewEnum_type
-#define __pyx_memoryview_type __pyx_mstate_global->__pyx_memoryview_type
-#define __pyx_memoryviewslice_type __pyx_mstate_global->__pyx_memoryviewslice_type
-#define __pyx_kp_u_ __pyx_mstate_global->__pyx_kp_u_
-#define __pyx_n_s_ASCII __pyx_mstate_global->__pyx_n_s_ASCII
-#define __pyx_kp_s_All_dimensions_preceding_dimensi __pyx_mstate_global->__pyx_kp_s_All_dimensions_preceding_dimensi
-#define __pyx_n_s_AssertionError __pyx_mstate_global->__pyx_n_s_AssertionError
-#define __pyx_kp_s_Buffer_view_does_not_expose_stri __pyx_mstate_global->__pyx_kp_s_Buffer_view_does_not_expose_stri
-#define __pyx_kp_s_Can_only_create_a_buffer_that_is __pyx_mstate_global->__pyx_kp_s_Can_only_create_a_buffer_that_is
-#define __pyx_kp_s_Cannot_assign_to_read_only_memor __pyx_mstate_global->__pyx_kp_s_Cannot_assign_to_read_only_memor
-#define __pyx_kp_s_Cannot_create_writable_memory_vi __pyx_mstate_global->__pyx_kp_s_Cannot_create_writable_memory_vi
-#define __pyx_kp_u_Cannot_index_with_type __pyx_mstate_global->__pyx_kp_u_Cannot_index_with_type
-#define __pyx_kp_s_Cannot_transpose_memoryview_with __pyx_mstate_global->__pyx_kp_s_Cannot_transpose_memoryview_with
-#define __pyx_kp_s_Dimension_d_is_not_direct __pyx_mstate_global->__pyx_kp_s_Dimension_d_is_not_direct
-#define __pyx_n_s_Ellipsis __pyx_mstate_global->__pyx_n_s_Ellipsis
-#define __pyx_kp_s_Empty_shape_tuple_for_cython_arr __pyx_mstate_global->__pyx_kp_s_Empty_shape_tuple_for_cython_arr
-#define __pyx_kp_s_Incompatible_checksums_0x_x_vs_0 __pyx_mstate_global->__pyx_kp_s_Incompatible_checksums_0x_x_vs_0
-#define __pyx_n_s_IndexError __pyx_mstate_global->__pyx_n_s_IndexError
-#define __pyx_kp_s_Index_out_of_bounds_axis_d __pyx_mstate_global->__pyx_kp_s_Index_out_of_bounds_axis_d
-#define __pyx_kp_s_Indirect_dimensions_not_supporte __pyx_mstate_global->__pyx_kp_s_Indirect_dimensions_not_supporte
-#define __pyx_kp_u_Invalid_mode_expected_c_or_fortr __pyx_mstate_global->__pyx_kp_u_Invalid_mode_expected_c_or_fortr
-#define __pyx_kp_u_Invalid_shape_in_axis __pyx_mstate_global->__pyx_kp_u_Invalid_shape_in_axis
-#define __pyx_n_s_MemoryError __pyx_mstate_global->__pyx_n_s_MemoryError
-#define __pyx_kp_s_MemoryView_of_r_at_0x_x __pyx_mstate_global->__pyx_kp_s_MemoryView_of_r_at_0x_x
-#define __pyx_kp_s_MemoryView_of_r_object __pyx_mstate_global->__pyx_kp_s_MemoryView_of_r_object
-#define __pyx_n_b_O __pyx_mstate_global->__pyx_n_b_O
-#define __pyx_kp_u_Out_of_bounds_on_buffer_access_a __pyx_mstate_global->__pyx_kp_u_Out_of_bounds_on_buffer_access_a
-#define __pyx_n_s_PickleError __pyx_mstate_global->__pyx_n_s_PickleError
-#define __pyx_n_s_Sequence __pyx_mstate_global->__pyx_n_s_Sequence
-#define __pyx_kp_s_Step_may_not_be_zero_axis_d __pyx_mstate_global->__pyx_kp_s_Step_may_not_be_zero_axis_d
-#define __pyx_n_s_TypeError __pyx_mstate_global->__pyx_n_s_TypeError
-#define __pyx_kp_s_Unable_to_convert_item_to_object __pyx_mstate_global->__pyx_kp_s_Unable_to_convert_item_to_object
-#define __pyx_n_s_ValueError __pyx_mstate_global->__pyx_n_s_ValueError
-#define __pyx_n_s_View_MemoryView __pyx_mstate_global->__pyx_n_s_View_MemoryView
-#define __pyx_kp_u__2 __pyx_mstate_global->__pyx_kp_u__2
-#define __pyx_n_s__23 __pyx_mstate_global->__pyx_n_s__23
-#define __pyx_n_s__3 __pyx_mstate_global->__pyx_n_s__3
-#define __pyx_kp_u__6 __pyx_mstate_global->__pyx_kp_u__6
-#define __pyx_kp_u__7 __pyx_mstate_global->__pyx_kp_u__7
-#define __pyx_n_s_abc __pyx_mstate_global->__pyx_n_s_abc
-#define __pyx_n_s_allocate_buffer __pyx_mstate_global->__pyx_n_s_allocate_buffer
-#define __pyx_kp_u_and __pyx_mstate_global->__pyx_kp_u_and
-#define __pyx_n_s_asyncio_coroutines __pyx_mstate_global->__pyx_n_s_asyncio_coroutines
-#define __pyx_n_s_base __pyx_mstate_global->__pyx_n_s_base
-#define __pyx_n_s_c __pyx_mstate_global->__pyx_n_s_c
-#define __pyx_n_u_c __pyx_mstate_global->__pyx_n_u_c
-#define __pyx_n_s_class __pyx_mstate_global->__pyx_n_s_class
-#define __pyx_n_s_class_getitem __pyx_mstate_global->__pyx_n_s_class_getitem
-#define __pyx_n_s_cline_in_traceback __pyx_mstate_global->__pyx_n_s_cline_in_traceback
-#define __pyx_n_s_collections __pyx_mstate_global->__pyx_n_s_collections
-#define __pyx_kp_s_collections_abc __pyx_mstate_global->__pyx_kp_s_collections_abc
-#define __pyx_kp_s_contiguous_and_direct __pyx_mstate_global->__pyx_kp_s_contiguous_and_direct
-#define __pyx_kp_s_contiguous_and_indirect __pyx_mstate_global->__pyx_kp_s_contiguous_and_indirect
-#define __pyx_kp_s_core_pyx __pyx_mstate_global->__pyx_kp_s_core_pyx
-#define __pyx_n_s_count __pyx_mstate_global->__pyx_n_s_count
-#define __pyx_n_s_dict __pyx_mstate_global->__pyx_n_s_dict
-#define __pyx_kp_u_disable __pyx_mstate_global->__pyx_kp_u_disable
-#define __pyx_n_s_dtype_is_object __pyx_mstate_global->__pyx_n_s_dtype_is_object
-#define __pyx_kp_u_enable __pyx_mstate_global->__pyx_kp_u_enable
-#define __pyx_n_s_encode __pyx_mstate_global->__pyx_n_s_encode
-#define __pyx_n_s_enumerate __pyx_mstate_global->__pyx_n_s_enumerate
-#define __pyx_n_s_error __pyx_mstate_global->__pyx_n_s_error
-#define __pyx_n_s_flags __pyx_mstate_global->__pyx_n_s_flags
-#define __pyx_n_s_format __pyx_mstate_global->__pyx_n_s_format
-#define __pyx_n_s_fortran __pyx_mstate_global->__pyx_n_s_fortran
-#define __pyx_n_u_fortran __pyx_mstate_global->__pyx_n_u_fortran
-#define __pyx_kp_u_gc __pyx_mstate_global->__pyx_kp_u_gc
-#define __pyx_n_s_getstate __pyx_mstate_global->__pyx_n_s_getstate
-#define __pyx_kp_u_got __pyx_mstate_global->__pyx_kp_u_got
-#define __pyx_kp_u_got_differing_extents_in_dimensi __pyx_mstate_global->__pyx_kp_u_got_differing_extents_in_dimensi
-#define __pyx_n_s_id __pyx_mstate_global->__pyx_n_s_id
-#define __pyx_n_s_import __pyx_mstate_global->__pyx_n_s_import
-#define __pyx_n_s_index __pyx_mstate_global->__pyx_n_s_index
-#define __pyx_n_s_initializing __pyx_mstate_global->__pyx_n_s_initializing
-#define __pyx_n_s_is_coroutine __pyx_mstate_global->__pyx_n_s_is_coroutine
-#define __pyx_kp_u_isenabled __pyx_mstate_global->__pyx_kp_u_isenabled
-#define __pyx_n_s_itemsize __pyx_mstate_global->__pyx_n_s_itemsize
-#define __pyx_kp_s_itemsize_0_for_cython_array __pyx_mstate_global->__pyx_kp_s_itemsize_0_for_cython_array
-#define __pyx_n_s_main __pyx_mstate_global->__pyx_n_s_main
-#define __pyx_n_s_maximum_path_c __pyx_mstate_global->__pyx_n_s_maximum_path_c
-#define __pyx_n_s_memview __pyx_mstate_global->__pyx_n_s_memview
-#define __pyx_n_s_mode __pyx_mstate_global->__pyx_n_s_mode
-#define __pyx_n_s_monotonic_align_core __pyx_mstate_global->__pyx_n_s_monotonic_align_core
-#define __pyx_n_s_name __pyx_mstate_global->__pyx_n_s_name
-#define __pyx_n_s_name_2 __pyx_mstate_global->__pyx_n_s_name_2
-#define __pyx_n_s_ndim __pyx_mstate_global->__pyx_n_s_ndim
-#define __pyx_n_s_new __pyx_mstate_global->__pyx_n_s_new
-#define __pyx_kp_s_no_default___reduce___due_to_non __pyx_mstate_global->__pyx_kp_s_no_default___reduce___due_to_non
-#define __pyx_n_s_obj __pyx_mstate_global->__pyx_n_s_obj
-#define __pyx_n_s_pack __pyx_mstate_global->__pyx_n_s_pack
-#define __pyx_n_s_paths __pyx_mstate_global->__pyx_n_s_paths
-#define __pyx_n_s_pickle __pyx_mstate_global->__pyx_n_s_pickle
-#define __pyx_n_s_pyx_PickleError __pyx_mstate_global->__pyx_n_s_pyx_PickleError
-#define __pyx_n_s_pyx_checksum __pyx_mstate_global->__pyx_n_s_pyx_checksum
-#define __pyx_n_s_pyx_result __pyx_mstate_global->__pyx_n_s_pyx_result
-#define __pyx_n_s_pyx_state __pyx_mstate_global->__pyx_n_s_pyx_state
-#define __pyx_n_s_pyx_type __pyx_mstate_global->__pyx_n_s_pyx_type
-#define __pyx_n_s_pyx_unpickle_Enum __pyx_mstate_global->__pyx_n_s_pyx_unpickle_Enum
-#define __pyx_n_s_pyx_vtable __pyx_mstate_global->__pyx_n_s_pyx_vtable
-#define __pyx_n_s_range __pyx_mstate_global->__pyx_n_s_range
-#define __pyx_n_s_reduce __pyx_mstate_global->__pyx_n_s_reduce
-#define __pyx_n_s_reduce_cython __pyx_mstate_global->__pyx_n_s_reduce_cython
-#define __pyx_n_s_reduce_ex __pyx_mstate_global->__pyx_n_s_reduce_ex
-#define __pyx_n_s_register __pyx_mstate_global->__pyx_n_s_register
-#define __pyx_n_s_setstate __pyx_mstate_global->__pyx_n_s_setstate
-#define __pyx_n_s_setstate_cython __pyx_mstate_global->__pyx_n_s_setstate_cython
-#define __pyx_n_s_shape __pyx_mstate_global->__pyx_n_s_shape
-#define __pyx_n_s_size __pyx_mstate_global->__pyx_n_s_size
-#define __pyx_n_s_spec __pyx_mstate_global->__pyx_n_s_spec
-#define __pyx_n_s_start __pyx_mstate_global->__pyx_n_s_start
-#define __pyx_n_s_step __pyx_mstate_global->__pyx_n_s_step
-#define __pyx_n_s_stop __pyx_mstate_global->__pyx_n_s_stop
-#define __pyx_kp_s_strided_and_direct __pyx_mstate_global->__pyx_kp_s_strided_and_direct
-#define __pyx_kp_s_strided_and_direct_or_indirect __pyx_mstate_global->__pyx_kp_s_strided_and_direct_or_indirect
-#define __pyx_kp_s_strided_and_indirect __pyx_mstate_global->__pyx_kp_s_strided_and_indirect
-#define __pyx_kp_s_stringsource __pyx_mstate_global->__pyx_kp_s_stringsource
-#define __pyx_n_s_struct __pyx_mstate_global->__pyx_n_s_struct
-#define __pyx_n_s_sys __pyx_mstate_global->__pyx_n_s_sys
-#define __pyx_n_s_t_xs __pyx_mstate_global->__pyx_n_s_t_xs
-#define __pyx_n_s_t_ys __pyx_mstate_global->__pyx_n_s_t_ys
-#define __pyx_n_s_test __pyx_mstate_global->__pyx_n_s_test
-#define __pyx_kp_s_unable_to_allocate_array_data __pyx_mstate_global->__pyx_kp_s_unable_to_allocate_array_data
-#define __pyx_kp_s_unable_to_allocate_shape_and_str __pyx_mstate_global->__pyx_kp_s_unable_to_allocate_shape_and_str
-#define __pyx_n_s_unpack __pyx_mstate_global->__pyx_n_s_unpack
-#define __pyx_n_s_update __pyx_mstate_global->__pyx_n_s_update
-#define __pyx_n_s_values __pyx_mstate_global->__pyx_n_s_values
-#define __pyx_n_s_version_info __pyx_mstate_global->__pyx_n_s_version_info
-#define __pyx_int_0 __pyx_mstate_global->__pyx_int_0
-#define __pyx_int_1 __pyx_mstate_global->__pyx_int_1
-#define __pyx_int_3 __pyx_mstate_global->__pyx_int_3
-#define __pyx_int_112105877 __pyx_mstate_global->__pyx_int_112105877
-#define __pyx_int_136983863 __pyx_mstate_global->__pyx_int_136983863
-#define __pyx_int_184977713 __pyx_mstate_global->__pyx_int_184977713
-#define __pyx_int_neg_1 __pyx_mstate_global->__pyx_int_neg_1
-#define __pyx_k__9 __pyx_mstate_global->__pyx_k__9
-#define __pyx_slice__5 __pyx_mstate_global->__pyx_slice__5
-#define __pyx_tuple__4 __pyx_mstate_global->__pyx_tuple__4
-#define __pyx_tuple__8 __pyx_mstate_global->__pyx_tuple__8
-#define __pyx_tuple__10 __pyx_mstate_global->__pyx_tuple__10
-#define __pyx_tuple__11 __pyx_mstate_global->__pyx_tuple__11
-#define __pyx_tuple__12 __pyx_mstate_global->__pyx_tuple__12
-#define __pyx_tuple__13 __pyx_mstate_global->__pyx_tuple__13
-#define __pyx_tuple__14 __pyx_mstate_global->__pyx_tuple__14
-#define __pyx_tuple__15 __pyx_mstate_global->__pyx_tuple__15
-#define __pyx_tuple__16 __pyx_mstate_global->__pyx_tuple__16
-#define __pyx_tuple__17 __pyx_mstate_global->__pyx_tuple__17
-#define __pyx_tuple__18 __pyx_mstate_global->__pyx_tuple__18
-#define __pyx_tuple__19 __pyx_mstate_global->__pyx_tuple__19
-#define __pyx_tuple__21 __pyx_mstate_global->__pyx_tuple__21
-#define __pyx_codeobj__20 __pyx_mstate_global->__pyx_codeobj__20
-#define __pyx_codeobj__22 __pyx_mstate_global->__pyx_codeobj__22
-/* #### Code section: module_code ### */
-
-/* "View.MemoryView":131
- * cdef bint dtype_is_object
- *
- * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
- * mode="c", bint allocate_buffer=True):
- *
- */
-
-/* Python wrapper */
-static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
- PyObject *__pyx_v_shape = 0;
- Py_ssize_t __pyx_v_itemsize;
- PyObject *__pyx_v_format = 0;
- PyObject *__pyx_v_mode = 0;
- int __pyx_v_allocate_buffer;
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
- {
- PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
- PyObject* values[5] = {0,0,0,0,0};
- values[3] = ((PyObject *)__pyx_n_s_c);
- if (__pyx_kwds) {
- Py_ssize_t kw_args;
- switch (__pyx_nargs) {
- case 5: values[4] = __Pyx_Arg_VARARGS(__pyx_args, 4);
- CYTHON_FALLTHROUGH;
- case 4: values[3] = __Pyx_Arg_VARARGS(__pyx_args, 3);
- CYTHON_FALLTHROUGH;
- case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2);
- CYTHON_FALLTHROUGH;
- case 2: values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1);
- CYTHON_FALLTHROUGH;
- case 1: values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0);
- CYTHON_FALLTHROUGH;
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- kw_args = __Pyx_NumKwargs_VARARGS(__pyx_kwds);
- switch (__pyx_nargs) {
- case 0:
- if (likely((values[0] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_shape)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error)
- else goto __pyx_L5_argtuple_error;
- CYTHON_FALLTHROUGH;
- case 1:
- if (likely((values[1] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_itemsize)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error)
- else {
- __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 131, __pyx_L3_error)
- }
- CYTHON_FALLTHROUGH;
- case 2:
- if (likely((values[2] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_format)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error)
- else {
- __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 131, __pyx_L3_error)
- }
- CYTHON_FALLTHROUGH;
- case 3:
- if (kw_args > 0) {
- PyObject* value = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_mode);
- if (value) { values[3] = value; kw_args--; }
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error)
- }
- CYTHON_FALLTHROUGH;
- case 4:
- if (kw_args > 0) {
- PyObject* value = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_allocate_buffer);
- if (value) { values[4] = value; kw_args--; }
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error)
- }
- }
- if (unlikely(kw_args > 0)) {
- const Py_ssize_t kwd_pos_args = __pyx_nargs;
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__cinit__") < 0)) __PYX_ERR(1, 131, __pyx_L3_error)
- }
- } else {
- switch (__pyx_nargs) {
- case 5: values[4] = __Pyx_Arg_VARARGS(__pyx_args, 4);
- CYTHON_FALLTHROUGH;
- case 4: values[3] = __Pyx_Arg_VARARGS(__pyx_args, 3);
- CYTHON_FALLTHROUGH;
- case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2);
- values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1);
- values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0);
- break;
- default: goto __pyx_L5_argtuple_error;
- }
- }
- __pyx_v_shape = ((PyObject*)values[0]);
- __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error)
- __pyx_v_format = values[2];
- __pyx_v_mode = values[3];
- if (values[4]) {
- __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 132, __pyx_L3_error)
- } else {
-
- /* "View.MemoryView":132
- *
- * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
- * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
- *
- * cdef int idx
- */
- __pyx_v_allocate_buffer = ((int)1);
- }
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, __pyx_nargs); __PYX_ERR(1, 131, __pyx_L3_error)
- __pyx_L3_error:;
- __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __Pyx_RefNannyFinishContext();
- return -1;
- __pyx_L4_argument_unpacking_done:;
- if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 131, __pyx_L1_error)
- if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
- PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 131, __pyx_L1_error)
- }
- __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
-
- /* "View.MemoryView":131
- * cdef bint dtype_is_object
- *
- * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
- * mode="c", bint allocate_buffer=True):
- *
- */
-
- /* function exit code */
- goto __pyx_L0;
- __pyx_L1_error:;
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
- int __pyx_v_idx;
- Py_ssize_t __pyx_v_dim;
- char __pyx_v_order;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- Py_ssize_t __pyx_t_1;
- int __pyx_t_2;
- int __pyx_t_3;
- PyObject *__pyx_t_4 = NULL;
- PyObject *__pyx_t_5 = NULL;
- PyObject *__pyx_t_6 = NULL;
- int __pyx_t_7;
- char *__pyx_t_8;
- Py_ssize_t __pyx_t_9;
- Py_UCS4 __pyx_t_10;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__cinit__", 0);
- __Pyx_INCREF(__pyx_v_format);
-
- /* "View.MemoryView":137
- * cdef Py_ssize_t dim
- *
- * self.ndim = len(shape) # <<<<<<<<<<<<<<
- * self.itemsize = itemsize
- *
- */
- if (unlikely(__pyx_v_shape == Py_None)) {
- PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
- __PYX_ERR(1, 137, __pyx_L1_error)
- }
- __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 137, __pyx_L1_error)
- __pyx_v_self->ndim = ((int)__pyx_t_1);
-
- /* "View.MemoryView":138
- *
- * self.ndim = len(shape)
- * self.itemsize = itemsize # <<<<<<<<<<<<<<
- *
- * if not self.ndim:
- */
- __pyx_v_self->itemsize = __pyx_v_itemsize;
-
- /* "View.MemoryView":140
- * self.itemsize = itemsize
- *
- * if not self.ndim: # <<<<<<<<<<<<<<
- * raise ValueError, "Empty shape tuple for cython.array"
- *
- */
- __pyx_t_2 = (!(__pyx_v_self->ndim != 0));
- if (unlikely(__pyx_t_2)) {
-
- /* "View.MemoryView":141
- *
- * if not self.ndim:
- * raise ValueError, "Empty shape tuple for cython.array" # <<<<<<<<<<<<<<
- *
- * if itemsize <= 0:
- */
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Empty_shape_tuple_for_cython_arr, 0, 0);
- __PYX_ERR(1, 141, __pyx_L1_error)
-
- /* "View.MemoryView":140
- * self.itemsize = itemsize
- *
- * if not self.ndim: # <<<<<<<<<<<<<<
- * raise ValueError, "Empty shape tuple for cython.array"
- *
- */
- }
-
- /* "View.MemoryView":143
- * raise ValueError, "Empty shape tuple for cython.array"
- *
- * if itemsize <= 0: # <<<<<<<<<<<<<<
- * raise ValueError, "itemsize <= 0 for cython.array"
- *
- */
- __pyx_t_2 = (__pyx_v_itemsize <= 0);
- if (unlikely(__pyx_t_2)) {
-
- /* "View.MemoryView":144
- *
- * if itemsize <= 0:
- * raise ValueError, "itemsize <= 0 for cython.array" # <<<<<<<<<<<<<<
- *
- * if not isinstance(format, bytes):
- */
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_itemsize_0_for_cython_array, 0, 0);
- __PYX_ERR(1, 144, __pyx_L1_error)
-
- /* "View.MemoryView":143
- * raise ValueError, "Empty shape tuple for cython.array"
- *
- * if itemsize <= 0: # <<<<<<<<<<<<<<
- * raise ValueError, "itemsize <= 0 for cython.array"
- *
- */
- }
-
- /* "View.MemoryView":146
- * raise ValueError, "itemsize <= 0 for cython.array"
- *
- * if not isinstance(format, bytes): # <<<<<<<<<<<<<<
- * format = format.encode('ASCII')
- * self._format = format # keep a reference to the byte string
- */
- __pyx_t_2 = PyBytes_Check(__pyx_v_format);
- __pyx_t_3 = (!__pyx_t_2);
- if (__pyx_t_3) {
-
- /* "View.MemoryView":147
- *
- * if not isinstance(format, bytes):
- * format = format.encode('ASCII') # <<<<<<<<<<<<<<
- * self._format = format # keep a reference to the byte string
- * self.format = self._format
- */
- __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 147, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- __pyx_t_6 = NULL;
- __pyx_t_7 = 0;
- if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
- __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
- if (likely(__pyx_t_6)) {
- PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
- __Pyx_INCREF(__pyx_t_6);
- __Pyx_INCREF(function);
- __Pyx_DECREF_SET(__pyx_t_5, function);
- __pyx_t_7 = 1;
- }
- }
- {
- PyObject *__pyx_callargs[2] = {__pyx_t_6, __pyx_n_s_ASCII};
- __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_7, 1+__pyx_t_7);
- __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
- if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 147, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- }
- __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_4);
- __pyx_t_4 = 0;
-
- /* "View.MemoryView":146
- * raise ValueError, "itemsize <= 0 for cython.array"
- *
- * if not isinstance(format, bytes): # <<<<<<<<<<<<<<
- * format = format.encode('ASCII')
- * self._format = format # keep a reference to the byte string
- */
- }
-
- /* "View.MemoryView":148
- * if not isinstance(format, bytes):
- * format = format.encode('ASCII')
- * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
- * self.format = self._format
- *
- */
- if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_v_format))) __PYX_ERR(1, 148, __pyx_L1_error)
- __pyx_t_4 = __pyx_v_format;
- __Pyx_INCREF(__pyx_t_4);
- __Pyx_GIVEREF(__pyx_t_4);
- __Pyx_GOTREF(__pyx_v_self->_format);
- __Pyx_DECREF(__pyx_v_self->_format);
- __pyx_v_self->_format = ((PyObject*)__pyx_t_4);
- __pyx_t_4 = 0;
-
- /* "View.MemoryView":149
- * format = format.encode('ASCII')
- * self._format = format # keep a reference to the byte string
- * self.format = self._format # <<<<<<<<<<<<<<
- *
- *
- */
- if (unlikely(__pyx_v_self->_format == Py_None)) {
- PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
- __PYX_ERR(1, 149, __pyx_L1_error)
- }
- __pyx_t_8 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_8) && PyErr_Occurred())) __PYX_ERR(1, 149, __pyx_L1_error)
- __pyx_v_self->format = __pyx_t_8;
-
- /* "View.MemoryView":152
- *
- *
- * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
- * self._strides = self._shape + self.ndim
- *
- */
- __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
-
- /* "View.MemoryView":153
- *
- * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
- * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
- *
- * if not self._shape:
- */
- __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
-
- /* "View.MemoryView":155
- * self._strides = self._shape + self.ndim
- *
- * if not self._shape: # <<<<<<<<<<<<<<
- * raise MemoryError, "unable to allocate shape and strides."
- *
- */
- __pyx_t_3 = (!(__pyx_v_self->_shape != 0));
- if (unlikely(__pyx_t_3)) {
-
- /* "View.MemoryView":156
- *
- * if not self._shape:
- * raise MemoryError, "unable to allocate shape and strides." # <<<<<<<<<<<<<<
- *
- *
- */
- __Pyx_Raise(__pyx_builtin_MemoryError, __pyx_kp_s_unable_to_allocate_shape_and_str, 0, 0);
- __PYX_ERR(1, 156, __pyx_L1_error)
-
- /* "View.MemoryView":155
- * self._strides = self._shape + self.ndim
- *
- * if not self._shape: # <<<<<<<<<<<<<<
- * raise MemoryError, "unable to allocate shape and strides."
- *
- */
- }
-
- /* "View.MemoryView":159
- *
- *
- * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
- * if dim <= 0:
- * raise ValueError, f"Invalid shape in axis {idx}: {dim}."
- */
- __pyx_t_7 = 0;
- __pyx_t_4 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_4); __pyx_t_1 = 0;
- for (;;) {
- if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
- #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
- __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely((0 < 0))) __PYX_ERR(1, 159, __pyx_L1_error)
- #else
- __pyx_t_5 = PySequence_ITEM(__pyx_t_4, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 159, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- #endif
- __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 159, __pyx_L1_error)
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_v_dim = __pyx_t_9;
- __pyx_v_idx = __pyx_t_7;
- __pyx_t_7 = (__pyx_t_7 + 1);
-
- /* "View.MemoryView":160
- *
- * for idx, dim in enumerate(shape):
- * if dim <= 0: # <<<<<<<<<<<<<<
- * raise ValueError, f"Invalid shape in axis {idx}: {dim}."
- * self._shape[idx] = dim
- */
- __pyx_t_3 = (__pyx_v_dim <= 0);
- if (unlikely(__pyx_t_3)) {
-
- /* "View.MemoryView":161
- * for idx, dim in enumerate(shape):
- * if dim <= 0:
- * raise ValueError, f"Invalid shape in axis {idx}: {dim}." # <<<<<<<<<<<<<<
- * self._shape[idx] = dim
- *
- */
- __pyx_t_5 = PyTuple_New(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 161, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- __pyx_t_9 = 0;
- __pyx_t_10 = 127;
- __Pyx_INCREF(__pyx_kp_u_Invalid_shape_in_axis);
- __pyx_t_9 += 22;
- __Pyx_GIVEREF(__pyx_kp_u_Invalid_shape_in_axis);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_kp_u_Invalid_shape_in_axis);
- __pyx_t_6 = __Pyx_PyUnicode_From_int(__pyx_v_idx, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 161, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_6);
- __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_6);
- __Pyx_GIVEREF(__pyx_t_6);
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_6);
- __pyx_t_6 = 0;
- __Pyx_INCREF(__pyx_kp_u_);
- __pyx_t_9 += 2;
- __Pyx_GIVEREF(__pyx_kp_u_);
- PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_kp_u_);
- __pyx_t_6 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_dim, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 161, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_6);
- __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_6);
- __Pyx_GIVEREF(__pyx_t_6);
- PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_6);
- __pyx_t_6 = 0;
- __Pyx_INCREF(__pyx_kp_u__2);
- __pyx_t_9 += 1;
- __Pyx_GIVEREF(__pyx_kp_u__2);
- PyTuple_SET_ITEM(__pyx_t_5, 4, __pyx_kp_u__2);
- __pyx_t_6 = __Pyx_PyUnicode_Join(__pyx_t_5, 5, __pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 161, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_t_6, 0, 0);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __PYX_ERR(1, 161, __pyx_L1_error)
-
- /* "View.MemoryView":160
- *
- * for idx, dim in enumerate(shape):
- * if dim <= 0: # <<<<<<<<<<<<<<
- * raise ValueError, f"Invalid shape in axis {idx}: {dim}."
- * self._shape[idx] = dim
- */
- }
-
- /* "View.MemoryView":162
- * if dim <= 0:
- * raise ValueError, f"Invalid shape in axis {idx}: {dim}."
- * self._shape[idx] = dim # <<<<<<<<<<<<<<
- *
- * cdef char order
- */
- (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
-
- /* "View.MemoryView":159
- *
- *
- * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
- * if dim <= 0:
- * raise ValueError, f"Invalid shape in axis {idx}: {dim}."
- */
- }
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
- /* "View.MemoryView":165
- *
- * cdef char order
- * if mode == 'c': # <<<<<<<<<<<<<<
- * order = b'C'
- * self.mode = u'c'
- */
- __pyx_t_3 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 165, __pyx_L1_error)
- if (__pyx_t_3) {
-
- /* "View.MemoryView":166
- * cdef char order
- * if mode == 'c':
- * order = b'C' # <<<<<<<<<<<<<<
- * self.mode = u'c'
- * elif mode == 'fortran':
- */
- __pyx_v_order = 'C';
-
- /* "View.MemoryView":167
- * if mode == 'c':
- * order = b'C'
- * self.mode = u'c' # <<<<<<<<<<<<<<
- * elif mode == 'fortran':
- * order = b'F'
- */
- __Pyx_INCREF(__pyx_n_u_c);
- __Pyx_GIVEREF(__pyx_n_u_c);
- __Pyx_GOTREF(__pyx_v_self->mode);
- __Pyx_DECREF(__pyx_v_self->mode);
- __pyx_v_self->mode = __pyx_n_u_c;
-
- /* "View.MemoryView":165
- *
- * cdef char order
- * if mode == 'c': # <<<<<<<<<<<<<<
- * order = b'C'
- * self.mode = u'c'
- */
- goto __pyx_L11;
- }
-
- /* "View.MemoryView":168
- * order = b'C'
- * self.mode = u'c'
- * elif mode == 'fortran': # <<<<<<<<<<<<<<
- * order = b'F'
- * self.mode = u'fortran'
- */
- __pyx_t_3 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 168, __pyx_L1_error)
- if (likely(__pyx_t_3)) {
-
- /* "View.MemoryView":169
- * self.mode = u'c'
- * elif mode == 'fortran':
- * order = b'F' # <<<<<<<<<<<<<<
- * self.mode = u'fortran'
- * else:
- */
- __pyx_v_order = 'F';
-
- /* "View.MemoryView":170
- * elif mode == 'fortran':
- * order = b'F'
- * self.mode = u'fortran' # <<<<<<<<<<<<<<
- * else:
- * raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}"
- */
- __Pyx_INCREF(__pyx_n_u_fortran);
- __Pyx_GIVEREF(__pyx_n_u_fortran);
- __Pyx_GOTREF(__pyx_v_self->mode);
- __Pyx_DECREF(__pyx_v_self->mode);
- __pyx_v_self->mode = __pyx_n_u_fortran;
-
- /* "View.MemoryView":168
- * order = b'C'
- * self.mode = u'c'
- * elif mode == 'fortran': # <<<<<<<<<<<<<<
- * order = b'F'
- * self.mode = u'fortran'
- */
- goto __pyx_L11;
- }
-
- /* "View.MemoryView":172
- * self.mode = u'fortran'
- * else:
- * raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}" # <<<<<<<<<<<<<<
- *
- * self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order)
- */
- /*else*/ {
- __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_v_mode, __pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 172, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_kp_u_Invalid_mode_expected_c_or_fortr, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 172, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_t_6, 0, 0);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __PYX_ERR(1, 172, __pyx_L1_error)
- }
- __pyx_L11:;
-
- /* "View.MemoryView":174
- * raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}"
- *
- * self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order) # <<<<<<<<<<<<<<
- *
- * self.free_data = allocate_buffer
- */
- __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
-
- /* "View.MemoryView":176
- * self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order)
- *
- * self.free_data = allocate_buffer # <<<<<<<<<<<<<<
- * self.dtype_is_object = format == b'O'
- *
- */
- __pyx_v_self->free_data = __pyx_v_allocate_buffer;
-
- /* "View.MemoryView":177
- *
- * self.free_data = allocate_buffer
- * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
- *
- * if allocate_buffer:
- */
- __pyx_t_6 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 177, __pyx_L1_error)
- __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 177, __pyx_L1_error)
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __pyx_v_self->dtype_is_object = __pyx_t_3;
-
- /* "View.MemoryView":179
- * self.dtype_is_object = format == b'O'
- *
- * if allocate_buffer: # <<<<<<<<<<<<<<
- * _allocate_buffer(self)
- *
- */
- if (__pyx_v_allocate_buffer) {
-
- /* "View.MemoryView":180
- *
- * if allocate_buffer:
- * _allocate_buffer(self) # <<<<<<<<<<<<<<
- *
- * @cname('getbuffer')
- */
- __pyx_t_7 = __pyx_array_allocate_buffer(__pyx_v_self); if (unlikely(__pyx_t_7 == ((int)-1))) __PYX_ERR(1, 180, __pyx_L1_error)
-
- /* "View.MemoryView":179
- * self.dtype_is_object = format == b'O'
- *
- * if allocate_buffer: # <<<<<<<<<<<<<<
- * _allocate_buffer(self)
- *
- */
- }
-
- /* "View.MemoryView":131
- * cdef bint dtype_is_object
- *
- * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
- * mode="c", bint allocate_buffer=True):
- *
- */
-
- /* function exit code */
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_XDECREF(__pyx_t_6);
- __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_XDECREF(__pyx_v_format);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":182
- * _allocate_buffer(self)
- *
- * @cname('getbuffer') # <<<<<<<<<<<<<<
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * cdef int bufmode = -1
- */
-
-/* Python wrapper */
-CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
-CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
- __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
- int __pyx_v_bufmode;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- char *__pyx_t_2;
- Py_ssize_t __pyx_t_3;
- int __pyx_t_4;
- Py_ssize_t *__pyx_t_5;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- if (unlikely(__pyx_v_info == NULL)) {
- PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
- return -1;
- }
- __Pyx_RefNannySetupContext("__getbuffer__", 0);
- __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
- __Pyx_GIVEREF(__pyx_v_info->obj);
-
- /* "View.MemoryView":184
- * @cname('getbuffer')
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * cdef int bufmode = -1 # <<<<<<<<<<<<<<
- * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):
- * if self.mode == u"c":
- */
- __pyx_v_bufmode = -1;
-
- /* "View.MemoryView":185
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * cdef int bufmode = -1
- * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): # <<<<<<<<<<<<<<
- * if self.mode == u"c":
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- */
- __pyx_t_1 = ((__pyx_v_flags & ((PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS) | PyBUF_ANY_CONTIGUOUS)) != 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":186
- * cdef int bufmode = -1
- * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):
- * if self.mode == u"c": # <<<<<<<<<<<<<<
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * elif self.mode == u"fortran":
- */
- __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 186, __pyx_L1_error)
- if (__pyx_t_1) {
-
- /* "View.MemoryView":187
- * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):
- * if self.mode == u"c":
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
- * elif self.mode == u"fortran":
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- */
- __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
-
- /* "View.MemoryView":186
- * cdef int bufmode = -1
- * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):
- * if self.mode == u"c": # <<<<<<<<<<<<<<
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * elif self.mode == u"fortran":
- */
- goto __pyx_L4;
- }
-
- /* "View.MemoryView":188
- * if self.mode == u"c":
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * elif self.mode == u"fortran": # <<<<<<<<<<<<<<
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * if not (flags & bufmode):
- */
- __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 188, __pyx_L1_error)
- if (__pyx_t_1) {
-
- /* "View.MemoryView":189
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * elif self.mode == u"fortran":
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
- * if not (flags & bufmode):
- * raise ValueError, "Can only create a buffer that is contiguous in memory."
- */
- __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
-
- /* "View.MemoryView":188
- * if self.mode == u"c":
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * elif self.mode == u"fortran": # <<<<<<<<<<<<<<
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * if not (flags & bufmode):
- */
- }
- __pyx_L4:;
-
- /* "View.MemoryView":190
- * elif self.mode == u"fortran":
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * if not (flags & bufmode): # <<<<<<<<<<<<<<
- * raise ValueError, "Can only create a buffer that is contiguous in memory."
- * info.buf = self.data
- */
- __pyx_t_1 = (!((__pyx_v_flags & __pyx_v_bufmode) != 0));
- if (unlikely(__pyx_t_1)) {
-
- /* "View.MemoryView":191
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * if not (flags & bufmode):
- * raise ValueError, "Can only create a buffer that is contiguous in memory." # <<<<<<<<<<<<<<
- * info.buf = self.data
- * info.len = self.len
- */
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Can_only_create_a_buffer_that_is, 0, 0);
- __PYX_ERR(1, 191, __pyx_L1_error)
-
- /* "View.MemoryView":190
- * elif self.mode == u"fortran":
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * if not (flags & bufmode): # <<<<<<<<<<<<<<
- * raise ValueError, "Can only create a buffer that is contiguous in memory."
- * info.buf = self.data
- */
- }
-
- /* "View.MemoryView":185
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * cdef int bufmode = -1
- * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): # <<<<<<<<<<<<<<
- * if self.mode == u"c":
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- */
- }
-
- /* "View.MemoryView":192
- * if not (flags & bufmode):
- * raise ValueError, "Can only create a buffer that is contiguous in memory."
- * info.buf = self.data # <<<<<<<<<<<<<<
- * info.len = self.len
- *
- */
- __pyx_t_2 = __pyx_v_self->data;
- __pyx_v_info->buf = __pyx_t_2;
-
- /* "View.MemoryView":193
- * raise ValueError, "Can only create a buffer that is contiguous in memory."
- * info.buf = self.data
- * info.len = self.len # <<<<<<<<<<<<<<
- *
- * if flags & PyBUF_STRIDES:
- */
- __pyx_t_3 = __pyx_v_self->len;
- __pyx_v_info->len = __pyx_t_3;
-
- /* "View.MemoryView":195
- * info.len = self.len
- *
- * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
- * info.ndim = self.ndim
- * info.shape = self._shape
- */
- __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":196
- *
- * if flags & PyBUF_STRIDES:
- * info.ndim = self.ndim # <<<<<<<<<<<<<<
- * info.shape = self._shape
- * info.strides = self._strides
- */
- __pyx_t_4 = __pyx_v_self->ndim;
- __pyx_v_info->ndim = __pyx_t_4;
-
- /* "View.MemoryView":197
- * if flags & PyBUF_STRIDES:
- * info.ndim = self.ndim
- * info.shape = self._shape # <<<<<<<<<<<<<<
- * info.strides = self._strides
- * else:
- */
- __pyx_t_5 = __pyx_v_self->_shape;
- __pyx_v_info->shape = __pyx_t_5;
-
- /* "View.MemoryView":198
- * info.ndim = self.ndim
- * info.shape = self._shape
- * info.strides = self._strides # <<<<<<<<<<<<<<
- * else:
- * info.ndim = 1
- */
- __pyx_t_5 = __pyx_v_self->_strides;
- __pyx_v_info->strides = __pyx_t_5;
-
- /* "View.MemoryView":195
- * info.len = self.len
- *
- * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
- * info.ndim = self.ndim
- * info.shape = self._shape
- */
- goto __pyx_L6;
- }
-
- /* "View.MemoryView":200
- * info.strides = self._strides
- * else:
- * info.ndim = 1 # <<<<<<<<<<<<<<
- * info.shape = &self.len if flags & PyBUF_ND else NULL
- * info.strides = NULL
- */
- /*else*/ {
- __pyx_v_info->ndim = 1;
-
- /* "View.MemoryView":201
- * else:
- * info.ndim = 1
- * info.shape = &self.len if flags & PyBUF_ND else NULL # <<<<<<<<<<<<<<
- * info.strides = NULL
- *
- */
- if (((__pyx_v_flags & PyBUF_ND) != 0)) {
- __pyx_t_5 = (&__pyx_v_self->len);
- } else {
- __pyx_t_5 = NULL;
- }
- __pyx_v_info->shape = __pyx_t_5;
-
- /* "View.MemoryView":202
- * info.ndim = 1
- * info.shape = &self.len if flags & PyBUF_ND else NULL
- * info.strides = NULL # <<<<<<<<<<<<<<
- *
- * info.suboffsets = NULL
- */
- __pyx_v_info->strides = NULL;
- }
- __pyx_L6:;
-
- /* "View.MemoryView":204
- * info.strides = NULL
- *
- * info.suboffsets = NULL # <<<<<<<<<<<<<<
- * info.itemsize = self.itemsize
- * info.readonly = 0
- */
- __pyx_v_info->suboffsets = NULL;
-
- /* "View.MemoryView":205
- *
- * info.suboffsets = NULL
- * info.itemsize = self.itemsize # <<<<<<<<<<<<<<
- * info.readonly = 0
- * info.format = self.format if flags & PyBUF_FORMAT else NULL
- */
- __pyx_t_3 = __pyx_v_self->itemsize;
- __pyx_v_info->itemsize = __pyx_t_3;
-
- /* "View.MemoryView":206
- * info.suboffsets = NULL
- * info.itemsize = self.itemsize
- * info.readonly = 0 # <<<<<<<<<<<<<<
- * info.format = self.format if flags & PyBUF_FORMAT else NULL
- * info.obj = self
- */
- __pyx_v_info->readonly = 0;
-
- /* "View.MemoryView":207
- * info.itemsize = self.itemsize
- * info.readonly = 0
- * info.format = self.format if flags & PyBUF_FORMAT else NULL # <<<<<<<<<<<<<<
- * info.obj = self
- *
- */
- if (((__pyx_v_flags & PyBUF_FORMAT) != 0)) {
- __pyx_t_2 = __pyx_v_self->format;
- } else {
- __pyx_t_2 = NULL;
- }
- __pyx_v_info->format = __pyx_t_2;
-
- /* "View.MemoryView":208
- * info.readonly = 0
- * info.format = self.format if flags & PyBUF_FORMAT else NULL
- * info.obj = self # <<<<<<<<<<<<<<
- *
- * def __dealloc__(array self):
- */
- __Pyx_INCREF((PyObject *)__pyx_v_self);
- __Pyx_GIVEREF((PyObject *)__pyx_v_self);
- __Pyx_GOTREF(__pyx_v_info->obj);
- __Pyx_DECREF(__pyx_v_info->obj);
- __pyx_v_info->obj = ((PyObject *)__pyx_v_self);
-
- /* "View.MemoryView":182
- * _allocate_buffer(self)
- *
- * @cname('getbuffer') # <<<<<<<<<<<<<<
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * cdef int bufmode = -1
- */
-
- /* function exit code */
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- if (__pyx_v_info->obj != NULL) {
- __Pyx_GOTREF(__pyx_v_info->obj);
- __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
- }
- goto __pyx_L2;
- __pyx_L0:;
- if (__pyx_v_info->obj == Py_None) {
- __Pyx_GOTREF(__pyx_v_info->obj);
- __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
- }
- __pyx_L2:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":210
- * info.obj = self
- *
- * def __dealloc__(array self): # <<<<<<<<<<<<<<
- * if self.callback_free_data != NULL:
- * self.callback_free_data(self.data)
- */
-
-/* Python wrapper */
-static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
-static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
- __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
-}
-
-static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- int __pyx_t_2;
- __Pyx_RefNannySetupContext("__dealloc__", 0);
-
- /* "View.MemoryView":211
- *
- * def __dealloc__(array self):
- * if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
- * self.callback_free_data(self.data)
- * elif self.free_data and self.data is not NULL:
- */
- __pyx_t_1 = (__pyx_v_self->callback_free_data != NULL);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":212
- * def __dealloc__(array self):
- * if self.callback_free_data != NULL:
- * self.callback_free_data(self.data) # <<<<<<<<<<<<<<
- * elif self.free_data and self.data is not NULL:
- * if self.dtype_is_object:
- */
- __pyx_v_self->callback_free_data(__pyx_v_self->data);
-
- /* "View.MemoryView":211
- *
- * def __dealloc__(array self):
- * if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
- * self.callback_free_data(self.data)
- * elif self.free_data and self.data is not NULL:
- */
- goto __pyx_L3;
- }
-
- /* "View.MemoryView":213
- * if self.callback_free_data != NULL:
- * self.callback_free_data(self.data)
- * elif self.free_data and self.data is not NULL: # <<<<<<<<<<<<<<
- * if self.dtype_is_object:
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
- */
- if (__pyx_v_self->free_data) {
- } else {
- __pyx_t_1 = __pyx_v_self->free_data;
- goto __pyx_L4_bool_binop_done;
- }
- __pyx_t_2 = (__pyx_v_self->data != NULL);
- __pyx_t_1 = __pyx_t_2;
- __pyx_L4_bool_binop_done:;
- if (__pyx_t_1) {
-
- /* "View.MemoryView":214
- * self.callback_free_data(self.data)
- * elif self.free_data and self.data is not NULL:
- * if self.dtype_is_object: # <<<<<<<<<<<<<<
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
- * free(self.data)
- */
- if (__pyx_v_self->dtype_is_object) {
-
- /* "View.MemoryView":215
- * elif self.free_data and self.data is not NULL:
- * if self.dtype_is_object:
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) # <<<<<<<<<<<<<<
- * free(self.data)
- * PyObject_Free(self._shape)
- */
- __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
-
- /* "View.MemoryView":214
- * self.callback_free_data(self.data)
- * elif self.free_data and self.data is not NULL:
- * if self.dtype_is_object: # <<<<<<<<<<<<<<
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
- * free(self.data)
- */
- }
-
- /* "View.MemoryView":216
- * if self.dtype_is_object:
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
- * free(self.data) # <<<<<<<<<<<<<<
- * PyObject_Free(self._shape)
- *
- */
- free(__pyx_v_self->data);
-
- /* "View.MemoryView":213
- * if self.callback_free_data != NULL:
- * self.callback_free_data(self.data)
- * elif self.free_data and self.data is not NULL: # <<<<<<<<<<<<<<
- * if self.dtype_is_object:
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
- */
- }
- __pyx_L3:;
-
- /* "View.MemoryView":217
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
- * free(self.data)
- * PyObject_Free(self._shape) # <<<<<<<<<<<<<<
- *
- * @property
- */
- PyObject_Free(__pyx_v_self->_shape);
-
- /* "View.MemoryView":210
- * info.obj = self
- *
- * def __dealloc__(array self): # <<<<<<<<<<<<<<
- * if self.callback_free_data != NULL:
- * self.callback_free_data(self.data)
- */
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
-}
-
-/* "View.MemoryView":219
- * PyObject_Free(self._shape)
- *
- * @property # <<<<<<<<<<<<<<
- * def memview(self):
- * return self.get_memview()
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
- __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__get__", 0);
-
- /* "View.MemoryView":221
- * @property
- * def memview(self):
- * return self.get_memview() # <<<<<<<<<<<<<<
- *
- * @cname('get_memview')
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 221, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_r = __pyx_t_1;
- __pyx_t_1 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":219
- * PyObject_Free(self._shape)
- *
- * @property # <<<<<<<<<<<<<<
- * def memview(self):
- * return self.get_memview()
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":224
- *
- * @cname('get_memview')
- * cdef get_memview(self): # <<<<<<<<<<<<<<
- * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
- * return memoryview(self, flags, self.dtype_is_object)
- */
-
-static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
- int __pyx_v_flags;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("get_memview", 0);
-
- /* "View.MemoryView":225
- * @cname('get_memview')
- * cdef get_memview(self):
- * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
- * return memoryview(self, flags, self.dtype_is_object)
- *
- */
- __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
-
- /* "View.MemoryView":226
- * cdef get_memview(self):
- * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
- * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
- *
- * def __len__(self):
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 226, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 226, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF((PyObject *)__pyx_v_self);
- __Pyx_GIVEREF((PyObject *)__pyx_v_self);
- PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
- __Pyx_GIVEREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
- __Pyx_GIVEREF(__pyx_t_2);
- PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
- __pyx_t_1 = 0;
- __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":224
- *
- * @cname('get_memview')
- * cdef get_memview(self): # <<<<<<<<<<<<<<
- * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
- * return memoryview(self, flags, self.dtype_is_object)
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":228
- * return memoryview(self, flags, self.dtype_is_object)
- *
- * def __len__(self): # <<<<<<<<<<<<<<
- * return self._shape[0]
- *
- */
-
-/* Python wrapper */
-static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
-static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- Py_ssize_t __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
- __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
- Py_ssize_t __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__len__", 0);
-
- /* "View.MemoryView":229
- *
- * def __len__(self):
- * return self._shape[0] # <<<<<<<<<<<<<<
- *
- * def __getattr__(self, attr):
- */
- __pyx_r = (__pyx_v_self->_shape[0]);
- goto __pyx_L0;
-
- /* "View.MemoryView":228
- * return memoryview(self, flags, self.dtype_is_object)
- *
- * def __len__(self): # <<<<<<<<<<<<<<
- * return self._shape[0]
- *
- */
-
- /* function exit code */
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":231
- * return self._shape[0]
- *
- * def __getattr__(self, attr): # <<<<<<<<<<<<<<
- * return getattr(self.memview, attr)
- *
- */
-
-/* Python wrapper */
-static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
-static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
- __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__getattr__", 0);
-
- /* "View.MemoryView":232
- *
- * def __getattr__(self, attr):
- * return getattr(self.memview, attr) # <<<<<<<<<<<<<<
- *
- * def __getitem__(self, item):
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 232, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 232, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":231
- * return self._shape[0]
- *
- * def __getattr__(self, attr): # <<<<<<<<<<<<<<
- * return getattr(self.memview, attr)
- *
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":234
- * return getattr(self.memview, attr)
- *
- * def __getitem__(self, item): # <<<<<<<<<<<<<<
- * return self.memview[item]
- *
- */
-
-/* Python wrapper */
-static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
-static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
- __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__getitem__", 0);
-
- /* "View.MemoryView":235
- *
- * def __getitem__(self, item):
- * return self.memview[item] # <<<<<<<<<<<<<<
- *
- * def __setitem__(self, item, value):
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 235, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 235, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":234
- * return getattr(self.memview, attr)
- *
- * def __getitem__(self, item): # <<<<<<<<<<<<<<
- * return self.memview[item]
- *
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":237
- * return self.memview[item]
- *
- * def __setitem__(self, item, value): # <<<<<<<<<<<<<<
- * self.memview[item] = value
- *
- */
-
-/* Python wrapper */
-static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
-static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
- __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__setitem__", 0);
-
- /* "View.MemoryView":238
- *
- * def __setitem__(self, item, value):
- * self.memview[item] = value # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 238, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- if (unlikely((PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0))) __PYX_ERR(1, 238, __pyx_L1_error)
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
- /* "View.MemoryView":237
- * return self.memview[item]
- *
- * def __setitem__(self, item, value): # <<<<<<<<<<<<<<
- * self.memview[item] = value
- *
- */
-
- /* function exit code */
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "(tree fragment)":1
- * def __reduce_cython__(self): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state):
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
- if (unlikely(__pyx_nargs > 0)) {
- __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;}
- if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL;
- __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__reduce_cython__", 0);
-
- /* "(tree fragment)":2
- * def __reduce_cython__(self):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<<
- * def __setstate_cython__(self, __pyx_state):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- */
- __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0);
- __PYX_ERR(1, 2, __pyx_L1_error)
-
- /* "(tree fragment)":1
- * def __reduce_cython__(self): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state):
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "(tree fragment)":3
- * def __reduce_cython__(self):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
- {
- PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0};
- PyObject* values[1] = {0};
- if (__pyx_kwds) {
- Py_ssize_t kw_args;
- switch (__pyx_nargs) {
- case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
- CYTHON_FALLTHROUGH;
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds);
- switch (__pyx_nargs) {
- case 0:
- if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 3, __pyx_L3_error)
- else goto __pyx_L5_argtuple_error;
- }
- if (unlikely(kw_args > 0)) {
- const Py_ssize_t kwd_pos_args = __pyx_nargs;
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 3, __pyx_L3_error)
- }
- } else if (unlikely(__pyx_nargs != 1)) {
- goto __pyx_L5_argtuple_error;
- } else {
- values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
- }
- __pyx_v___pyx_state = values[0];
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
- __pyx_L3_error:;
- __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __Pyx_RefNannyFinishContext();
- return NULL;
- __pyx_L4_argument_unpacking_done:;
- __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v___pyx_state);
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__setstate_cython__", 0);
-
- /* "(tree fragment)":4
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<<
- */
- __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0);
- __PYX_ERR(1, 4, __pyx_L1_error)
-
- /* "(tree fragment)":3
- * def __reduce_cython__(self):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":248
- *
- * @cname("__pyx_array_allocate_buffer")
- * cdef int _allocate_buffer(array self) except -1: # <<<<<<<<<<<<<<
- *
- *
- */
-
-static int __pyx_array_allocate_buffer(struct __pyx_array_obj *__pyx_v_self) {
- Py_ssize_t __pyx_v_i;
- PyObject **__pyx_v_p;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- Py_ssize_t __pyx_t_2;
- Py_ssize_t __pyx_t_3;
- Py_ssize_t __pyx_t_4;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("_allocate_buffer", 0);
-
- /* "View.MemoryView":254
- * cdef PyObject **p
- *
- * self.free_data = True # <<<<<<<<<<<<<<
- * self.data = malloc(self.len)
- * if not self.data:
- */
- __pyx_v_self->free_data = 1;
-
- /* "View.MemoryView":255
- *
- * self.free_data = True
- * self.data = malloc(self.len) # <<<<<<<<<<<<<<
- * if not self.data:
- * raise MemoryError, "unable to allocate array data."
- */
- __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
-
- /* "View.MemoryView":256
- * self.free_data = True
- * self.data = malloc(self.len)
- * if not self.data: # <<<<<<<<<<<<<<
- * raise MemoryError, "unable to allocate array data."
- *
- */
- __pyx_t_1 = (!(__pyx_v_self->data != 0));
- if (unlikely(__pyx_t_1)) {
-
- /* "View.MemoryView":257
- * self.data = malloc(self.len)
- * if not self.data:
- * raise MemoryError, "unable to allocate array data." # <<<<<<<<<<<<<<
- *
- * if self.dtype_is_object:
- */
- __Pyx_Raise(__pyx_builtin_MemoryError, __pyx_kp_s_unable_to_allocate_array_data, 0, 0);
- __PYX_ERR(1, 257, __pyx_L1_error)
-
- /* "View.MemoryView":256
- * self.free_data = True
- * self.data = malloc(self.len)
- * if not self.data: # <<<<<<<<<<<<<<
- * raise MemoryError, "unable to allocate array data."
- *
- */
- }
-
- /* "View.MemoryView":259
- * raise MemoryError, "unable to allocate array data."
- *
- * if self.dtype_is_object: # <<<<<<<<<<<<<<
- * p = self.data
- * for i in range(self.len // self.itemsize):
- */
- if (__pyx_v_self->dtype_is_object) {
-
- /* "View.MemoryView":260
- *
- * if self.dtype_is_object:
- * p = self.data # <<<<<<<<<<<<<<
- * for i in range(self.len // self.itemsize):
- * p[i] = Py_None
- */
- __pyx_v_p = ((PyObject **)__pyx_v_self->data);
-
- /* "View.MemoryView":261
- * if self.dtype_is_object:
- * p = self.data
- * for i in range(self.len // self.itemsize): # <<<<<<<<<<<<<<
- * p[i] = Py_None
- * Py_INCREF(Py_None)
- */
- if (unlikely(__pyx_v_self->itemsize == 0)) {
- PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
- __PYX_ERR(1, 261, __pyx_L1_error)
- }
- else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_self->itemsize == (Py_ssize_t)-1) && unlikely(__Pyx_UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
- PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
- __PYX_ERR(1, 261, __pyx_L1_error)
- }
- __pyx_t_2 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_self->itemsize);
- __pyx_t_3 = __pyx_t_2;
- for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
- __pyx_v_i = __pyx_t_4;
-
- /* "View.MemoryView":262
- * p = self.data
- * for i in range(self.len // self.itemsize):
- * p[i] = Py_None # <<<<<<<<<<<<<<
- * Py_INCREF(Py_None)
- * return 0
- */
- (__pyx_v_p[__pyx_v_i]) = Py_None;
-
- /* "View.MemoryView":263
- * for i in range(self.len // self.itemsize):
- * p[i] = Py_None
- * Py_INCREF(Py_None) # <<<<<<<<<<<<<<
- * return 0
- *
- */
- Py_INCREF(Py_None);
- }
-
- /* "View.MemoryView":259
- * raise MemoryError, "unable to allocate array data."
- *
- * if self.dtype_is_object: # <<<<<<<<<<<<<<
- * p = self.data
- * for i in range(self.len // self.itemsize):
- */
- }
-
- /* "View.MemoryView":264
- * p[i] = Py_None
- * Py_INCREF(Py_None)
- * return 0 # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_r = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":248
- *
- * @cname("__pyx_array_allocate_buffer")
- * cdef int _allocate_buffer(array self) except -1: # <<<<<<<<<<<<<<
- *
- *
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView._allocate_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":268
- *
- * @cname("__pyx_array_new")
- * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf): # <<<<<<<<<<<<<<
- * cdef array result
- * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string.
- */
-
-static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_c_mode, char *__pyx_v_buf) {
- struct __pyx_array_obj *__pyx_v_result = 0;
- PyObject *__pyx_v_mode = 0;
- struct __pyx_array_obj *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_t_2;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("array_cwrapper", 0);
-
- /* "View.MemoryView":270
- * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf):
- * cdef array result
- * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. # <<<<<<<<<<<<<<
- *
- * if buf is NULL:
- */
- if (((__pyx_v_c_mode[0]) == 'f')) {
- __Pyx_INCREF(__pyx_n_s_fortran);
- __pyx_t_1 = __pyx_n_s_fortran;
- } else {
- __Pyx_INCREF(__pyx_n_s_c);
- __pyx_t_1 = __pyx_n_s_c;
- }
- __pyx_v_mode = ((PyObject*)__pyx_t_1);
- __pyx_t_1 = 0;
-
- /* "View.MemoryView":272
- * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string.
- *
- * if buf is NULL: # <<<<<<<<<<<<<<
- * result = array.__new__(array, shape, itemsize, format, mode)
- * else:
- */
- __pyx_t_2 = (__pyx_v_buf == NULL);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":273
- *
- * if buf is NULL:
- * result = array.__new__(array, shape, itemsize, format, mode) # <<<<<<<<<<<<<<
- * else:
- * result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False)
- */
- __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 273, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 273, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyTuple_New(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 273, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_INCREF(__pyx_v_shape);
- __Pyx_GIVEREF(__pyx_v_shape);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_shape);
- __Pyx_GIVEREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
- __Pyx_GIVEREF(__pyx_t_3);
- PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
- __Pyx_INCREF(__pyx_v_mode);
- __Pyx_GIVEREF(__pyx_v_mode);
- PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_v_mode);
- __pyx_t_1 = 0;
- __pyx_t_3 = 0;
- __pyx_t_3 = ((PyObject *)__pyx_tp_new_array(((PyTypeObject *)__pyx_array_type), __pyx_t_4, NULL)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 273, __pyx_L1_error)
- __Pyx_GOTREF((PyObject *)__pyx_t_3);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_3);
- __pyx_t_3 = 0;
-
- /* "View.MemoryView":272
- * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string.
- *
- * if buf is NULL: # <<<<<<<<<<<<<<
- * result = array.__new__(array, shape, itemsize, format, mode)
- * else:
- */
- goto __pyx_L3;
- }
-
- /* "View.MemoryView":275
- * result = array.__new__(array, shape, itemsize, format, mode)
- * else:
- * result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False) # <<<<<<<<<<<<<<
- * result.data = buf
- *
- */
- /*else*/ {
- __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 275, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 275, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyTuple_New(4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 275, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(__pyx_v_shape);
- __Pyx_GIVEREF(__pyx_v_shape);
- PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_shape);
- __Pyx_GIVEREF(__pyx_t_3);
- PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3);
- __Pyx_GIVEREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_4);
- __Pyx_INCREF(__pyx_v_mode);
- __Pyx_GIVEREF(__pyx_v_mode);
- PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_v_mode);
- __pyx_t_3 = 0;
- __pyx_t_4 = 0;
- __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 275, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 275, __pyx_L1_error)
- __pyx_t_3 = ((PyObject *)__pyx_tp_new_array(((PyTypeObject *)__pyx_array_type), __pyx_t_1, __pyx_t_4)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 275, __pyx_L1_error)
- __Pyx_GOTREF((PyObject *)__pyx_t_3);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_3);
- __pyx_t_3 = 0;
-
- /* "View.MemoryView":276
- * else:
- * result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False)
- * result.data = buf # <<<<<<<<<<<<<<
- *
- * return result
- */
- __pyx_v_result->data = __pyx_v_buf;
- }
- __pyx_L3:;
-
- /* "View.MemoryView":278
- * result.data = buf
- *
- * return result # <<<<<<<<<<<<<<
- *
- *
- */
- __Pyx_XDECREF((PyObject *)__pyx_r);
- __Pyx_INCREF((PyObject *)__pyx_v_result);
- __pyx_r = __pyx_v_result;
- goto __pyx_L0;
-
- /* "View.MemoryView":268
- *
- * @cname("__pyx_array_new")
- * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf): # <<<<<<<<<<<<<<
- * cdef array result
- * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string.
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XDECREF((PyObject *)__pyx_v_result);
- __Pyx_XDECREF(__pyx_v_mode);
- __Pyx_XGIVEREF((PyObject *)__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":304
- * cdef class Enum(object):
- * cdef object name
- * def __init__(self, name): # <<<<<<<<<<<<<<
- * self.name = name
- * def __repr__(self):
- */
-
-/* Python wrapper */
-static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
- PyObject *__pyx_v_name = 0;
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
- {
- PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
- PyObject* values[1] = {0};
- if (__pyx_kwds) {
- Py_ssize_t kw_args;
- switch (__pyx_nargs) {
- case 1: values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0);
- CYTHON_FALLTHROUGH;
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- kw_args = __Pyx_NumKwargs_VARARGS(__pyx_kwds);
- switch (__pyx_nargs) {
- case 0:
- if (likely((values[0] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_name)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 304, __pyx_L3_error)
- else goto __pyx_L5_argtuple_error;
- }
- if (unlikely(kw_args > 0)) {
- const Py_ssize_t kwd_pos_args = __pyx_nargs;
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__init__") < 0)) __PYX_ERR(1, 304, __pyx_L3_error)
- }
- } else if (unlikely(__pyx_nargs != 1)) {
- goto __pyx_L5_argtuple_error;
- } else {
- values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0);
- }
- __pyx_v_name = values[0];
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 304, __pyx_L3_error)
- __pyx_L3_error:;
- __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __Pyx_RefNannyFinishContext();
- return -1;
- __pyx_L4_argument_unpacking_done:;
- __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__init__", 0);
-
- /* "View.MemoryView":305
- * cdef object name
- * def __init__(self, name):
- * self.name = name # <<<<<<<<<<<<<<
- * def __repr__(self):
- * return self.name
- */
- __Pyx_INCREF(__pyx_v_name);
- __Pyx_GIVEREF(__pyx_v_name);
- __Pyx_GOTREF(__pyx_v_self->name);
- __Pyx_DECREF(__pyx_v_self->name);
- __pyx_v_self->name = __pyx_v_name;
-
- /* "View.MemoryView":304
- * cdef class Enum(object):
- * cdef object name
- * def __init__(self, name): # <<<<<<<<<<<<<<
- * self.name = name
- * def __repr__(self):
- */
-
- /* function exit code */
- __pyx_r = 0;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":306
- * def __init__(self, name):
- * self.name = name
- * def __repr__(self): # <<<<<<<<<<<<<<
- * return self.name
- *
- */
-
-/* Python wrapper */
-static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
- __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__repr__", 0);
-
- /* "View.MemoryView":307
- * self.name = name
- * def __repr__(self):
- * return self.name # <<<<<<<<<<<<<<
- *
- * cdef generic = Enum("")
- */
- __Pyx_XDECREF(__pyx_r);
- __Pyx_INCREF(__pyx_v_self->name);
- __pyx_r = __pyx_v_self->name;
- goto __pyx_L0;
-
- /* "View.MemoryView":306
- * def __init__(self, name):
- * self.name = name
- * def __repr__(self): # <<<<<<<<<<<<<<
- * return self.name
- *
- */
-
- /* function exit code */
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "(tree fragment)":1
- * def __reduce_cython__(self): # <<<<<<<<<<<<<<
- * cdef tuple state
- * cdef object _dict
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
- if (unlikely(__pyx_nargs > 0)) {
- __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;}
- if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL;
- __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
- PyObject *__pyx_v_state = 0;
- PyObject *__pyx_v__dict = 0;
- int __pyx_v_use_setstate;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_t_2;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__reduce_cython__", 0);
-
- /* "(tree fragment)":5
- * cdef object _dict
- * cdef bint use_setstate
- * state = (self.name,) # <<<<<<<<<<<<<<
- * _dict = getattr(self, '__dict__', None)
- * if _dict is not None:
- */
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(__pyx_v_self->name);
- __Pyx_GIVEREF(__pyx_v_self->name);
- PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
- __pyx_v_state = ((PyObject*)__pyx_t_1);
- __pyx_t_1 = 0;
-
- /* "(tree fragment)":6
- * cdef bint use_setstate
- * state = (self.name,)
- * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
- * if _dict is not None:
- * state += (_dict,)
- */
- __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_v__dict = __pyx_t_1;
- __pyx_t_1 = 0;
-
- /* "(tree fragment)":7
- * state = (self.name,)
- * _dict = getattr(self, '__dict__', None)
- * if _dict is not None: # <<<<<<<<<<<<<<
- * state += (_dict,)
- * use_setstate = True
- */
- __pyx_t_2 = (__pyx_v__dict != Py_None);
- if (__pyx_t_2) {
-
- /* "(tree fragment)":8
- * _dict = getattr(self, '__dict__', None)
- * if _dict is not None:
- * state += (_dict,) # <<<<<<<<<<<<<<
- * use_setstate = True
- * else:
- */
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(__pyx_v__dict);
- __Pyx_GIVEREF(__pyx_v__dict);
- PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
- __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 8, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_3));
- __pyx_t_3 = 0;
-
- /* "(tree fragment)":9
- * if _dict is not None:
- * state += (_dict,)
- * use_setstate = True # <<<<<<<<<<<<<<
- * else:
- * use_setstate = self.name is not None
- */
- __pyx_v_use_setstate = 1;
-
- /* "(tree fragment)":7
- * state = (self.name,)
- * _dict = getattr(self, '__dict__', None)
- * if _dict is not None: # <<<<<<<<<<<<<<
- * state += (_dict,)
- * use_setstate = True
- */
- goto __pyx_L3;
- }
-
- /* "(tree fragment)":11
- * use_setstate = True
- * else:
- * use_setstate = self.name is not None # <<<<<<<<<<<<<<
- * if use_setstate:
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state
- */
- /*else*/ {
- __pyx_t_2 = (__pyx_v_self->name != Py_None);
- __pyx_v_use_setstate = __pyx_t_2;
- }
- __pyx_L3:;
-
- /* "(tree fragment)":12
- * else:
- * use_setstate = self.name is not None
- * if use_setstate: # <<<<<<<<<<<<<<
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state
- * else:
- */
- if (__pyx_v_use_setstate) {
-
- /* "(tree fragment)":13
- * use_setstate = self.name is not None
- * if use_setstate:
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state # <<<<<<<<<<<<<<
- * else:
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state)
- */
- __Pyx_XDECREF(__pyx_r);
- __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 13, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
- __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
- PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
- __Pyx_INCREF(__pyx_int_136983863);
- __Pyx_GIVEREF(__pyx_int_136983863);
- PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_136983863);
- __Pyx_INCREF(Py_None);
- __Pyx_GIVEREF(Py_None);
- PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
- __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_GIVEREF(__pyx_t_3);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
- __Pyx_GIVEREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
- __Pyx_INCREF(__pyx_v_state);
- __Pyx_GIVEREF(__pyx_v_state);
- PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_state);
- __pyx_t_3 = 0;
- __pyx_t_1 = 0;
- __pyx_r = __pyx_t_4;
- __pyx_t_4 = 0;
- goto __pyx_L0;
-
- /* "(tree fragment)":12
- * else:
- * use_setstate = self.name is not None
- * if use_setstate: # <<<<<<<<<<<<<<
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state
- * else:
- */
- }
-
- /* "(tree fragment)":15
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state
- * else:
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state) # <<<<<<<<<<<<<<
- * def __setstate_cython__(self, __pyx_state):
- * __pyx_unpickle_Enum__set_state(self, __pyx_state)
- */
- /*else*/ {
- __Pyx_XDECREF(__pyx_r);
- __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
- __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
- PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
- __Pyx_INCREF(__pyx_int_136983863);
- __Pyx_GIVEREF(__pyx_int_136983863);
- PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_136983863);
- __Pyx_INCREF(__pyx_v_state);
- __Pyx_GIVEREF(__pyx_v_state);
- PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
- __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 15, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_GIVEREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
- __Pyx_GIVEREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
- __pyx_t_4 = 0;
- __pyx_t_1 = 0;
- __pyx_r = __pyx_t_3;
- __pyx_t_3 = 0;
- goto __pyx_L0;
- }
-
- /* "(tree fragment)":1
- * def __reduce_cython__(self): # <<<<<<<<<<<<<<
- * cdef tuple state
- * cdef object _dict
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XDECREF(__pyx_v_state);
- __Pyx_XDECREF(__pyx_v__dict);
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "(tree fragment)":16
- * else:
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state)
- * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
- * __pyx_unpickle_Enum__set_state(self, __pyx_state)
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- PyObject *__pyx_v___pyx_state = 0;
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
- {
- PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0};
- PyObject* values[1] = {0};
- if (__pyx_kwds) {
- Py_ssize_t kw_args;
- switch (__pyx_nargs) {
- case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
- CYTHON_FALLTHROUGH;
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds);
- switch (__pyx_nargs) {
- case 0:
- if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 16, __pyx_L3_error)
- else goto __pyx_L5_argtuple_error;
- }
- if (unlikely(kw_args > 0)) {
- const Py_ssize_t kwd_pos_args = __pyx_nargs;
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 16, __pyx_L3_error)
- }
- } else if (unlikely(__pyx_nargs != 1)) {
- goto __pyx_L5_argtuple_error;
- } else {
- values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
- }
- __pyx_v___pyx_state = values[0];
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
- __pyx_L3_error:;
- __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __Pyx_RefNannyFinishContext();
- return NULL;
- __pyx_L4_argument_unpacking_done:;
- __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v___pyx_state);
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__setstate_cython__", 0);
-
- /* "(tree fragment)":17
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state)
- * def __setstate_cython__(self, __pyx_state):
- * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
- */
- if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_v___pyx_state))) __PYX_ERR(1, 17, __pyx_L1_error)
- __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
- /* "(tree fragment)":16
- * else:
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state)
- * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
- * __pyx_unpickle_Enum__set_state(self, __pyx_state)
- */
-
- /* function exit code */
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":349
- * cdef __Pyx_TypeInfo *typeinfo
- *
- * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
- * self.obj = obj
- * self.flags = flags
- */
-
-/* Python wrapper */
-static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
- PyObject *__pyx_v_obj = 0;
- int __pyx_v_flags;
- int __pyx_v_dtype_is_object;
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
- {
- PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
- PyObject* values[3] = {0,0,0};
- if (__pyx_kwds) {
- Py_ssize_t kw_args;
- switch (__pyx_nargs) {
- case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2);
- CYTHON_FALLTHROUGH;
- case 2: values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1);
- CYTHON_FALLTHROUGH;
- case 1: values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0);
- CYTHON_FALLTHROUGH;
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- kw_args = __Pyx_NumKwargs_VARARGS(__pyx_kwds);
- switch (__pyx_nargs) {
- case 0:
- if (likely((values[0] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_obj)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error)
- else goto __pyx_L5_argtuple_error;
- CYTHON_FALLTHROUGH;
- case 1:
- if (likely((values[1] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_flags)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error)
- else {
- __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 349, __pyx_L3_error)
- }
- CYTHON_FALLTHROUGH;
- case 2:
- if (kw_args > 0) {
- PyObject* value = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_dtype_is_object);
- if (value) { values[2] = value; kw_args--; }
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error)
- }
- }
- if (unlikely(kw_args > 0)) {
- const Py_ssize_t kwd_pos_args = __pyx_nargs;
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__cinit__") < 0)) __PYX_ERR(1, 349, __pyx_L3_error)
- }
- } else {
- switch (__pyx_nargs) {
- case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2);
- CYTHON_FALLTHROUGH;
- case 2: values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1);
- values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0);
- break;
- default: goto __pyx_L5_argtuple_error;
- }
- }
- __pyx_v_obj = values[0];
- __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error)
- if (values[2]) {
- __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error)
- } else {
- __pyx_v_dtype_is_object = ((int)0);
- }
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, __pyx_nargs); __PYX_ERR(1, 349, __pyx_L3_error)
- __pyx_L3_error:;
- __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __Pyx_RefNannyFinishContext();
- return -1;
- __pyx_L4_argument_unpacking_done:;
- __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- int __pyx_t_2;
- int __pyx_t_3;
- Py_intptr_t __pyx_t_4;
- size_t __pyx_t_5;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__cinit__", 0);
-
- /* "View.MemoryView":350
- *
- * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
- * self.obj = obj # <<<<<<<<<<<<<<
- * self.flags = flags
- * if type(self) is memoryview or obj is not None:
- */
- __Pyx_INCREF(__pyx_v_obj);
- __Pyx_GIVEREF(__pyx_v_obj);
- __Pyx_GOTREF(__pyx_v_self->obj);
- __Pyx_DECREF(__pyx_v_self->obj);
- __pyx_v_self->obj = __pyx_v_obj;
-
- /* "View.MemoryView":351
- * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
- * self.obj = obj
- * self.flags = flags # <<<<<<<<<<<<<<
- * if type(self) is memoryview or obj is not None:
- * __Pyx_GetBuffer(obj, &self.view, flags)
- */
- __pyx_v_self->flags = __pyx_v_flags;
-
- /* "View.MemoryView":352
- * self.obj = obj
- * self.flags = flags
- * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
- * __Pyx_GetBuffer(obj, &self.view, flags)
- * if self.view.obj == NULL:
- */
- __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
- if (!__pyx_t_2) {
- } else {
- __pyx_t_1 = __pyx_t_2;
- goto __pyx_L4_bool_binop_done;
- }
- __pyx_t_2 = (__pyx_v_obj != Py_None);
- __pyx_t_1 = __pyx_t_2;
- __pyx_L4_bool_binop_done:;
- if (__pyx_t_1) {
-
- /* "View.MemoryView":353
- * self.flags = flags
- * if type(self) is memoryview or obj is not None:
- * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
- * if self.view.obj == NULL:
- * (<__pyx_buffer *> &self.view).obj = Py_None
- */
- __pyx_t_3 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 353, __pyx_L1_error)
-
- /* "View.MemoryView":354
- * if type(self) is memoryview or obj is not None:
- * __Pyx_GetBuffer(obj, &self.view, flags)
- * if self.view.obj == NULL: # <<<<<<<<<<<<<<
- * (<__pyx_buffer *> &self.view).obj = Py_None
- * Py_INCREF(Py_None)
- */
- __pyx_t_1 = (((PyObject *)__pyx_v_self->view.obj) == NULL);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":355
- * __Pyx_GetBuffer(obj, &self.view, flags)
- * if self.view.obj == NULL:
- * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
- * Py_INCREF(Py_None)
- *
- */
- ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
-
- /* "View.MemoryView":356
- * if self.view.obj == NULL:
- * (<__pyx_buffer *> &self.view).obj = Py_None
- * Py_INCREF(Py_None) # <<<<<<<<<<<<<<
- *
- * if not __PYX_CYTHON_ATOMICS_ENABLED():
- */
- Py_INCREF(Py_None);
-
- /* "View.MemoryView":354
- * if type(self) is memoryview or obj is not None:
- * __Pyx_GetBuffer(obj, &self.view, flags)
- * if self.view.obj == NULL: # <<<<<<<<<<<<<<
- * (<__pyx_buffer *> &self.view).obj = Py_None
- * Py_INCREF(Py_None)
- */
- }
-
- /* "View.MemoryView":352
- * self.obj = obj
- * self.flags = flags
- * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
- * __Pyx_GetBuffer(obj, &self.view, flags)
- * if self.view.obj == NULL:
- */
- }
-
- /* "View.MemoryView":358
- * Py_INCREF(Py_None)
- *
- * if not __PYX_CYTHON_ATOMICS_ENABLED(): # <<<<<<<<<<<<<<
- * global __pyx_memoryview_thread_locks_used
- * if __pyx_memoryview_thread_locks_used < 8:
- */
- __pyx_t_1 = (!__PYX_CYTHON_ATOMICS_ENABLED());
- if (__pyx_t_1) {
-
- /* "View.MemoryView":360
- * if not __PYX_CYTHON_ATOMICS_ENABLED():
- * global __pyx_memoryview_thread_locks_used
- * if __pyx_memoryview_thread_locks_used < 8: # <<<<<<<<<<<<<<
- * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- * __pyx_memoryview_thread_locks_used += 1
- */
- __pyx_t_1 = (__pyx_memoryview_thread_locks_used < 8);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":361
- * global __pyx_memoryview_thread_locks_used
- * if __pyx_memoryview_thread_locks_used < 8:
- * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
- * __pyx_memoryview_thread_locks_used += 1
- * if self.lock is NULL:
- */
- __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
-
- /* "View.MemoryView":362
- * if __pyx_memoryview_thread_locks_used < 8:
- * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
- * if self.lock is NULL:
- * self.lock = PyThread_allocate_lock()
- */
- __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
-
- /* "View.MemoryView":360
- * if not __PYX_CYTHON_ATOMICS_ENABLED():
- * global __pyx_memoryview_thread_locks_used
- * if __pyx_memoryview_thread_locks_used < 8: # <<<<<<<<<<<<<<
- * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- * __pyx_memoryview_thread_locks_used += 1
- */
- }
-
- /* "View.MemoryView":363
- * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- * __pyx_memoryview_thread_locks_used += 1
- * if self.lock is NULL: # <<<<<<<<<<<<<<
- * self.lock = PyThread_allocate_lock()
- * if self.lock is NULL:
- */
- __pyx_t_1 = (__pyx_v_self->lock == NULL);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":364
- * __pyx_memoryview_thread_locks_used += 1
- * if self.lock is NULL:
- * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
- * if self.lock is NULL:
- * raise MemoryError
- */
- __pyx_v_self->lock = PyThread_allocate_lock();
-
- /* "View.MemoryView":365
- * if self.lock is NULL:
- * self.lock = PyThread_allocate_lock()
- * if self.lock is NULL: # <<<<<<<<<<<<<<
- * raise MemoryError
- *
- */
- __pyx_t_1 = (__pyx_v_self->lock == NULL);
- if (unlikely(__pyx_t_1)) {
-
- /* "View.MemoryView":366
- * self.lock = PyThread_allocate_lock()
- * if self.lock is NULL:
- * raise MemoryError # <<<<<<<<<<<<<<
- *
- * if flags & PyBUF_FORMAT:
- */
- PyErr_NoMemory(); __PYX_ERR(1, 366, __pyx_L1_error)
-
- /* "View.MemoryView":365
- * if self.lock is NULL:
- * self.lock = PyThread_allocate_lock()
- * if self.lock is NULL: # <<<<<<<<<<<<<<
- * raise MemoryError
- *
- */
- }
-
- /* "View.MemoryView":363
- * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- * __pyx_memoryview_thread_locks_used += 1
- * if self.lock is NULL: # <<<<<<<<<<<<<<
- * self.lock = PyThread_allocate_lock()
- * if self.lock is NULL:
- */
- }
-
- /* "View.MemoryView":358
- * Py_INCREF(Py_None)
- *
- * if not __PYX_CYTHON_ATOMICS_ENABLED(): # <<<<<<<<<<<<<<
- * global __pyx_memoryview_thread_locks_used
- * if __pyx_memoryview_thread_locks_used < 8:
- */
- }
-
- /* "View.MemoryView":368
- * raise MemoryError
- *
- * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
- * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
- * else:
- */
- __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":369
- *
- * if flags & PyBUF_FORMAT:
- * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
- * else:
- * self.dtype_is_object = dtype_is_object
- */
- __pyx_t_2 = ((__pyx_v_self->view.format[0]) == 'O');
- if (__pyx_t_2) {
- } else {
- __pyx_t_1 = __pyx_t_2;
- goto __pyx_L12_bool_binop_done;
- }
- __pyx_t_2 = ((__pyx_v_self->view.format[1]) == '\x00');
- __pyx_t_1 = __pyx_t_2;
- __pyx_L12_bool_binop_done:;
- __pyx_v_self->dtype_is_object = __pyx_t_1;
-
- /* "View.MemoryView":368
- * raise MemoryError
- *
- * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
- * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
- * else:
- */
- goto __pyx_L11;
- }
-
- /* "View.MemoryView":371
- * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
- * else:
- * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
- *
- * assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0
- */
- /*else*/ {
- __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
- }
- __pyx_L11:;
-
- /* "View.MemoryView":373
- * self.dtype_is_object = dtype_is_object
- *
- * assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0 # <<<<<<<<<<<<<<
- * self.typeinfo = NULL
- *
- */
- #ifndef CYTHON_WITHOUT_ASSERTIONS
- if (unlikely(__pyx_assertions_enabled())) {
- __pyx_t_4 = ((Py_intptr_t)((void *)(&__pyx_v_self->acquisition_count)));
- __pyx_t_5 = (sizeof(__pyx_atomic_int_type));
- if (unlikely(__pyx_t_5 == 0)) {
- PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
- __PYX_ERR(1, 373, __pyx_L1_error)
- }
- __pyx_t_1 = ((__pyx_t_4 % __pyx_t_5) == 0);
- if (unlikely(!__pyx_t_1)) {
- __Pyx_Raise(__pyx_builtin_AssertionError, 0, 0, 0);
- __PYX_ERR(1, 373, __pyx_L1_error)
- }
- }
- #else
- if ((1)); else __PYX_ERR(1, 373, __pyx_L1_error)
- #endif
-
- /* "View.MemoryView":374
- *
- * assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0
- * self.typeinfo = NULL # <<<<<<<<<<<<<<
- *
- * def __dealloc__(memoryview self):
- */
- __pyx_v_self->typeinfo = NULL;
-
- /* "View.MemoryView":349
- * cdef __Pyx_TypeInfo *typeinfo
- *
- * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
- * self.obj = obj
- * self.flags = flags
- */
-
- /* function exit code */
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":376
- * self.typeinfo = NULL
- *
- * def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
- * if self.obj is not None:
- * __Pyx_ReleaseBuffer(&self.view)
- */
-
-/* Python wrapper */
-static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
-static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
- __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
-}
-
-static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
- int __pyx_v_i;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- int __pyx_t_2;
- int __pyx_t_3;
- int __pyx_t_4;
- PyThread_type_lock __pyx_t_5;
- PyThread_type_lock __pyx_t_6;
- __Pyx_RefNannySetupContext("__dealloc__", 0);
-
- /* "View.MemoryView":377
- *
- * def __dealloc__(memoryview self):
- * if self.obj is not None: # <<<<<<<<<<<<<<
- * __Pyx_ReleaseBuffer(&self.view)
- * elif (<__pyx_buffer *> &self.view).obj == Py_None:
- */
- __pyx_t_1 = (__pyx_v_self->obj != Py_None);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":378
- * def __dealloc__(memoryview self):
- * if self.obj is not None:
- * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
- * elif (<__pyx_buffer *> &self.view).obj == Py_None:
- *
- */
- __Pyx_ReleaseBuffer((&__pyx_v_self->view));
-
- /* "View.MemoryView":377
- *
- * def __dealloc__(memoryview self):
- * if self.obj is not None: # <<<<<<<<<<<<<<
- * __Pyx_ReleaseBuffer(&self.view)
- * elif (<__pyx_buffer *> &self.view).obj == Py_None:
- */
- goto __pyx_L3;
- }
-
- /* "View.MemoryView":379
- * if self.obj is not None:
- * __Pyx_ReleaseBuffer(&self.view)
- * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
- *
- * (<__pyx_buffer *> &self.view).obj = NULL
- */
- __pyx_t_1 = (((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":381
- * elif (<__pyx_buffer *> &self.view).obj == Py_None:
- *
- * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<<
- * Py_DECREF(Py_None)
- *
- */
- ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL;
-
- /* "View.MemoryView":382
- *
- * (<__pyx_buffer *> &self.view).obj = NULL
- * Py_DECREF(Py_None) # <<<<<<<<<<<<<<
- *
- * cdef int i
- */
- Py_DECREF(Py_None);
-
- /* "View.MemoryView":379
- * if self.obj is not None:
- * __Pyx_ReleaseBuffer(&self.view)
- * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
- *
- * (<__pyx_buffer *> &self.view).obj = NULL
- */
- }
- __pyx_L3:;
-
- /* "View.MemoryView":386
- * cdef int i
- * global __pyx_memoryview_thread_locks_used
- * if self.lock != NULL: # <<<<<<<<<<<<<<
- * for i in range(__pyx_memoryview_thread_locks_used):
- * if __pyx_memoryview_thread_locks[i] is self.lock:
- */
- __pyx_t_1 = (__pyx_v_self->lock != NULL);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":387
- * global __pyx_memoryview_thread_locks_used
- * if self.lock != NULL:
- * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
- * if __pyx_memoryview_thread_locks[i] is self.lock:
- * __pyx_memoryview_thread_locks_used -= 1
- */
- __pyx_t_2 = __pyx_memoryview_thread_locks_used;
- __pyx_t_3 = __pyx_t_2;
- for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
- __pyx_v_i = __pyx_t_4;
-
- /* "View.MemoryView":388
- * if self.lock != NULL:
- * for i in range(__pyx_memoryview_thread_locks_used):
- * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
- * __pyx_memoryview_thread_locks_used -= 1
- * if i != __pyx_memoryview_thread_locks_used:
- */
- __pyx_t_1 = ((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":389
- * for i in range(__pyx_memoryview_thread_locks_used):
- * if __pyx_memoryview_thread_locks[i] is self.lock:
- * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
- * if i != __pyx_memoryview_thread_locks_used:
- * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- */
- __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
-
- /* "View.MemoryView":390
- * if __pyx_memoryview_thread_locks[i] is self.lock:
- * __pyx_memoryview_thread_locks_used -= 1
- * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
- * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
- */
- __pyx_t_1 = (__pyx_v_i != __pyx_memoryview_thread_locks_used);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":392
- * if i != __pyx_memoryview_thread_locks_used:
- * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
- * break
- * else:
- */
- __pyx_t_5 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
- __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
-
- /* "View.MemoryView":391
- * __pyx_memoryview_thread_locks_used -= 1
- * if i != __pyx_memoryview_thread_locks_used:
- * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
- * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
- * break
- */
- (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_5;
- (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_6;
-
- /* "View.MemoryView":390
- * if __pyx_memoryview_thread_locks[i] is self.lock:
- * __pyx_memoryview_thread_locks_used -= 1
- * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
- * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
- */
- }
-
- /* "View.MemoryView":393
- * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
- * break # <<<<<<<<<<<<<<
- * else:
- * PyThread_free_lock(self.lock)
- */
- goto __pyx_L6_break;
-
- /* "View.MemoryView":388
- * if self.lock != NULL:
- * for i in range(__pyx_memoryview_thread_locks_used):
- * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
- * __pyx_memoryview_thread_locks_used -= 1
- * if i != __pyx_memoryview_thread_locks_used:
- */
- }
- }
- /*else*/ {
-
- /* "View.MemoryView":395
- * break
- * else:
- * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
- *
- * cdef char *get_item_pointer(memoryview self, object index) except NULL:
- */
- PyThread_free_lock(__pyx_v_self->lock);
- }
- __pyx_L6_break:;
-
- /* "View.MemoryView":386
- * cdef int i
- * global __pyx_memoryview_thread_locks_used
- * if self.lock != NULL: # <<<<<<<<<<<<<<
- * for i in range(__pyx_memoryview_thread_locks_used):
- * if __pyx_memoryview_thread_locks[i] is self.lock:
- */
- }
-
- /* "View.MemoryView":376
- * self.typeinfo = NULL
- *
- * def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
- * if self.obj is not None:
- * __Pyx_ReleaseBuffer(&self.view)
- */
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
-}
-
-/* "View.MemoryView":397
- * PyThread_free_lock(self.lock)
- *
- * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
- * cdef Py_ssize_t dim
- * cdef char *itemp = self.view.buf
- */
-
-static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
- Py_ssize_t __pyx_v_dim;
- char *__pyx_v_itemp;
- PyObject *__pyx_v_idx = NULL;
- char *__pyx_r;
- __Pyx_RefNannyDeclarations
- Py_ssize_t __pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- Py_ssize_t __pyx_t_3;
- PyObject *(*__pyx_t_4)(PyObject *);
- PyObject *__pyx_t_5 = NULL;
- Py_ssize_t __pyx_t_6;
- char *__pyx_t_7;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("get_item_pointer", 0);
-
- /* "View.MemoryView":399
- * cdef char *get_item_pointer(memoryview self, object index) except NULL:
- * cdef Py_ssize_t dim
- * cdef char *itemp = self.view.buf # <<<<<<<<<<<<<<
- *
- * for dim, idx in enumerate(index):
- */
- __pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
-
- /* "View.MemoryView":401
- * cdef char *itemp = self.view.buf
- *
- * for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
- * itemp = pybuffer_index(&self.view, itemp, idx, dim)
- *
- */
- __pyx_t_1 = 0;
- if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
- __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
- __pyx_t_4 = NULL;
- } else {
- __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 401, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 401, __pyx_L1_error)
- }
- for (;;) {
- if (likely(!__pyx_t_4)) {
- if (likely(PyList_CheckExact(__pyx_t_2))) {
- if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
- #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
- __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely((0 < 0))) __PYX_ERR(1, 401, __pyx_L1_error)
- #else
- __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 401, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- #endif
- } else {
- if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
- #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
- __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely((0 < 0))) __PYX_ERR(1, 401, __pyx_L1_error)
- #else
- __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 401, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- #endif
- }
- } else {
- __pyx_t_5 = __pyx_t_4(__pyx_t_2);
- if (unlikely(!__pyx_t_5)) {
- PyObject* exc_type = PyErr_Occurred();
- if (exc_type) {
- if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
- else __PYX_ERR(1, 401, __pyx_L1_error)
- }
- break;
- }
- __Pyx_GOTREF(__pyx_t_5);
- }
- __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
- __pyx_t_5 = 0;
- __pyx_v_dim = __pyx_t_1;
- __pyx_t_1 = (__pyx_t_1 + 1);
-
- /* "View.MemoryView":402
- *
- * for dim, idx in enumerate(index):
- * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
- *
- * return itemp
- */
- __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 402, __pyx_L1_error)
- __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 402, __pyx_L1_error)
- __pyx_v_itemp = __pyx_t_7;
-
- /* "View.MemoryView":401
- * cdef char *itemp = self.view.buf
- *
- * for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
- * itemp = pybuffer_index(&self.view, itemp, idx, dim)
- *
- */
- }
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
- /* "View.MemoryView":404
- * itemp = pybuffer_index(&self.view, itemp, idx, dim)
- *
- * return itemp # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_r = __pyx_v_itemp;
- goto __pyx_L0;
-
- /* "View.MemoryView":397
- * PyThread_free_lock(self.lock)
- *
- * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
- * cdef Py_ssize_t dim
- * cdef char *itemp = self.view.buf
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XDECREF(__pyx_v_idx);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":407
- *
- *
- * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
- * if index is Ellipsis:
- * return self
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
-static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
- __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
- PyObject *__pyx_v_have_slices = NULL;
- PyObject *__pyx_v_indices = NULL;
- char *__pyx_v_itemp;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- char *__pyx_t_5;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__getitem__", 0);
-
- /* "View.MemoryView":408
- *
- * def __getitem__(memoryview self, object index):
- * if index is Ellipsis: # <<<<<<<<<<<<<<
- * return self
- *
- */
- __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":409
- * def __getitem__(memoryview self, object index):
- * if index is Ellipsis:
- * return self # <<<<<<<<<<<<<<
- *
- * have_slices, indices = _unellipsify(index, self.view.ndim)
- */
- __Pyx_XDECREF(__pyx_r);
- __Pyx_INCREF((PyObject *)__pyx_v_self);
- __pyx_r = ((PyObject *)__pyx_v_self);
- goto __pyx_L0;
-
- /* "View.MemoryView":408
- *
- * def __getitem__(memoryview self, object index):
- * if index is Ellipsis: # <<<<<<<<<<<<<<
- * return self
- *
- */
- }
-
- /* "View.MemoryView":411
- * return self
- *
- * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
- *
- * cdef char *itemp
- */
- __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 411, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- if (likely(__pyx_t_2 != Py_None)) {
- PyObject* sequence = __pyx_t_2;
- Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
- if (unlikely(size != 2)) {
- if (size > 2) __Pyx_RaiseTooManyValuesError(2);
- else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
- __PYX_ERR(1, 411, __pyx_L1_error)
- }
- #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
- __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
- __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
- __Pyx_INCREF(__pyx_t_3);
- __Pyx_INCREF(__pyx_t_4);
- #else
- __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 411, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- #endif
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- } else {
- __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 411, __pyx_L1_error)
- }
- __pyx_v_have_slices = __pyx_t_3;
- __pyx_t_3 = 0;
- __pyx_v_indices = __pyx_t_4;
- __pyx_t_4 = 0;
-
- /* "View.MemoryView":414
- *
- * cdef char *itemp
- * if have_slices: # <<<<<<<<<<<<<<
- * return memview_slice(self, indices)
- * else:
- */
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 414, __pyx_L1_error)
- if (__pyx_t_1) {
-
- /* "View.MemoryView":415
- * cdef char *itemp
- * if have_slices:
- * return memview_slice(self, indices) # <<<<<<<<<<<<<<
- * else:
- * itemp = self.get_item_pointer(indices)
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 415, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":414
- *
- * cdef char *itemp
- * if have_slices: # <<<<<<<<<<<<<<
- * return memview_slice(self, indices)
- * else:
- */
- }
-
- /* "View.MemoryView":417
- * return memview_slice(self, indices)
- * else:
- * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
- * return self.convert_item_to_object(itemp)
- *
- */
- /*else*/ {
- __pyx_t_5 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_5 == ((char *)NULL))) __PYX_ERR(1, 417, __pyx_L1_error)
- __pyx_v_itemp = __pyx_t_5;
-
- /* "View.MemoryView":418
- * else:
- * itemp = self.get_item_pointer(indices)
- * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
- *
- * def __setitem__(memoryview self, object index, object value):
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
- }
-
- /* "View.MemoryView":407
- *
- *
- * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
- * if index is Ellipsis:
- * return self
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XDECREF(__pyx_v_have_slices);
- __Pyx_XDECREF(__pyx_v_indices);
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":420
- * return self.convert_item_to_object(itemp)
- *
- * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
- * if self.view.readonly:
- * raise TypeError, "Cannot assign to read-only memoryview"
- */
-
-/* Python wrapper */
-static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
-static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
- __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
- PyObject *__pyx_v_have_slices = NULL;
- PyObject *__pyx_v_obj = NULL;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- int __pyx_t_4;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__setitem__", 0);
- __Pyx_INCREF(__pyx_v_index);
-
- /* "View.MemoryView":421
- *
- * def __setitem__(memoryview self, object index, object value):
- * if self.view.readonly: # <<<<<<<<<<<<<<
- * raise TypeError, "Cannot assign to read-only memoryview"
- *
- */
- if (unlikely(__pyx_v_self->view.readonly)) {
-
- /* "View.MemoryView":422
- * def __setitem__(memoryview self, object index, object value):
- * if self.view.readonly:
- * raise TypeError, "Cannot assign to read-only memoryview" # <<<<<<<<<<<<<<
- *
- * have_slices, index = _unellipsify(index, self.view.ndim)
- */
- __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_Cannot_assign_to_read_only_memor, 0, 0);
- __PYX_ERR(1, 422, __pyx_L1_error)
-
- /* "View.MemoryView":421
- *
- * def __setitem__(memoryview self, object index, object value):
- * if self.view.readonly: # <<<<<<<<<<<<<<
- * raise TypeError, "Cannot assign to read-only memoryview"
- *
- */
- }
-
- /* "View.MemoryView":424
- * raise TypeError, "Cannot assign to read-only memoryview"
- *
- * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
- *
- * if have_slices:
- */
- __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 424, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- if (likely(__pyx_t_1 != Py_None)) {
- PyObject* sequence = __pyx_t_1;
- Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
- if (unlikely(size != 2)) {
- if (size > 2) __Pyx_RaiseTooManyValuesError(2);
- else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
- __PYX_ERR(1, 424, __pyx_L1_error)
- }
- #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
- __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0);
- __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1);
- __Pyx_INCREF(__pyx_t_2);
- __Pyx_INCREF(__pyx_t_3);
- #else
- __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 424, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 424, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- #endif
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- } else {
- __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 424, __pyx_L1_error)
- }
- __pyx_v_have_slices = __pyx_t_2;
- __pyx_t_2 = 0;
- __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3);
- __pyx_t_3 = 0;
-
- /* "View.MemoryView":426
- * have_slices, index = _unellipsify(index, self.view.ndim)
- *
- * if have_slices: # <<<<<<<<<<<<<<
- * obj = self.is_slice(value)
- * if obj:
- */
- __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(1, 426, __pyx_L1_error)
- if (__pyx_t_4) {
-
- /* "View.MemoryView":427
- *
- * if have_slices:
- * obj = self.is_slice(value) # <<<<<<<<<<<<<<
- * if obj:
- * self.setitem_slice_assignment(self[index], obj)
- */
- __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 427, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_v_obj = __pyx_t_1;
- __pyx_t_1 = 0;
-
- /* "View.MemoryView":428
- * if have_slices:
- * obj = self.is_slice(value)
- * if obj: # <<<<<<<<<<<<<<
- * self.setitem_slice_assignment(self[index], obj)
- * else:
- */
- __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(1, 428, __pyx_L1_error)
- if (__pyx_t_4) {
-
- /* "View.MemoryView":429
- * obj = self.is_slice(value)
- * if obj:
- * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
- * else:
- * self.setitem_slice_assign_scalar(self[index], value)
- */
- __pyx_t_1 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 429, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 429, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
- /* "View.MemoryView":428
- * if have_slices:
- * obj = self.is_slice(value)
- * if obj: # <<<<<<<<<<<<<<
- * self.setitem_slice_assignment(self[index], obj)
- * else:
- */
- goto __pyx_L5;
- }
-
- /* "View.MemoryView":431
- * self.setitem_slice_assignment(self[index], obj)
- * else:
- * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
- * else:
- * self.setitem_indexed(index, value)
- */
- /*else*/ {
- __pyx_t_3 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 431, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 431, __pyx_L1_error)
- __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 431, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- }
- __pyx_L5:;
-
- /* "View.MemoryView":426
- * have_slices, index = _unellipsify(index, self.view.ndim)
- *
- * if have_slices: # <<<<<<<<<<<<<<
- * obj = self.is_slice(value)
- * if obj:
- */
- goto __pyx_L4;
- }
-
- /* "View.MemoryView":433
- * self.setitem_slice_assign_scalar(self[index], value)
- * else:
- * self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
- *
- * cdef is_slice(self, obj):
- */
- /*else*/ {
- __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 433, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- }
- __pyx_L4:;
-
- /* "View.MemoryView":420
- * return self.convert_item_to_object(itemp)
- *
- * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
- * if self.view.readonly:
- * raise TypeError, "Cannot assign to read-only memoryview"
- */
-
- /* function exit code */
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_XDECREF(__pyx_v_have_slices);
- __Pyx_XDECREF(__pyx_v_obj);
- __Pyx_XDECREF(__pyx_v_index);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":435
- * self.setitem_indexed(index, value)
- *
- * cdef is_slice(self, obj): # <<<<<<<<<<<<<<
- * if not isinstance(obj, memoryview):
- * try:
- */
-
-static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- int __pyx_t_2;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- PyObject *__pyx_t_5 = NULL;
- PyObject *__pyx_t_6 = NULL;
- PyObject *__pyx_t_7 = NULL;
- PyObject *__pyx_t_8 = NULL;
- int __pyx_t_9;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("is_slice", 0);
- __Pyx_INCREF(__pyx_v_obj);
-
- /* "View.MemoryView":436
- *
- * cdef is_slice(self, obj):
- * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
- * try:
- * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- */
- __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
- __pyx_t_2 = (!__pyx_t_1);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":437
- * cdef is_slice(self, obj):
- * if not isinstance(obj, memoryview):
- * try: # <<<<<<<<<<<<<<
- * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- * self.dtype_is_object)
- */
- {
- __Pyx_PyThreadState_declare
- __Pyx_PyThreadState_assign
- __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
- __Pyx_XGOTREF(__pyx_t_3);
- __Pyx_XGOTREF(__pyx_t_4);
- __Pyx_XGOTREF(__pyx_t_5);
- /*try:*/ {
-
- /* "View.MemoryView":438
- * if not isinstance(obj, memoryview):
- * try:
- * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
- * self.dtype_is_object)
- * except TypeError:
- */
- __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 438, __pyx_L4_error)
- __Pyx_GOTREF(__pyx_t_6);
-
- /* "View.MemoryView":439
- * try:
- * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- * self.dtype_is_object) # <<<<<<<<<<<<<<
- * except TypeError:
- * return None
- */
- __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 439, __pyx_L4_error)
- __Pyx_GOTREF(__pyx_t_7);
-
- /* "View.MemoryView":438
- * if not isinstance(obj, memoryview):
- * try:
- * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
- * self.dtype_is_object)
- * except TypeError:
- */
- __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 438, __pyx_L4_error)
- __Pyx_GOTREF(__pyx_t_8);
- __Pyx_INCREF(__pyx_v_obj);
- __Pyx_GIVEREF(__pyx_v_obj);
- PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
- __Pyx_GIVEREF(__pyx_t_6);
- PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
- __Pyx_GIVEREF(__pyx_t_7);
- PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
- __pyx_t_6 = 0;
- __pyx_t_7 = 0;
- __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 438, __pyx_L4_error)
- __Pyx_GOTREF(__pyx_t_7);
- __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
- __pyx_t_7 = 0;
-
- /* "View.MemoryView":437
- * cdef is_slice(self, obj):
- * if not isinstance(obj, memoryview):
- * try: # <<<<<<<<<<<<<<
- * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- * self.dtype_is_object)
- */
- }
- __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
- goto __pyx_L9_try_end;
- __pyx_L4_error:;
- __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
- __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
- __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
-
- /* "View.MemoryView":440
- * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- * self.dtype_is_object)
- * except TypeError: # <<<<<<<<<<<<<<
- * return None
- *
- */
- __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
- if (__pyx_t_9) {
- __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
- if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 440, __pyx_L6_except_error)
- __Pyx_XGOTREF(__pyx_t_7);
- __Pyx_XGOTREF(__pyx_t_8);
- __Pyx_XGOTREF(__pyx_t_6);
-
- /* "View.MemoryView":441
- * self.dtype_is_object)
- * except TypeError:
- * return None # <<<<<<<<<<<<<<
- *
- * return obj
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
- __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- goto __pyx_L7_except_return;
- }
- goto __pyx_L6_except_error;
-
- /* "View.MemoryView":437
- * cdef is_slice(self, obj):
- * if not isinstance(obj, memoryview):
- * try: # <<<<<<<<<<<<<<
- * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- * self.dtype_is_object)
- */
- __pyx_L6_except_error:;
- __Pyx_XGIVEREF(__pyx_t_3);
- __Pyx_XGIVEREF(__pyx_t_4);
- __Pyx_XGIVEREF(__pyx_t_5);
- __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
- goto __pyx_L1_error;
- __pyx_L7_except_return:;
- __Pyx_XGIVEREF(__pyx_t_3);
- __Pyx_XGIVEREF(__pyx_t_4);
- __Pyx_XGIVEREF(__pyx_t_5);
- __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
- goto __pyx_L0;
- __pyx_L9_try_end:;
- }
-
- /* "View.MemoryView":436
- *
- * cdef is_slice(self, obj):
- * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
- * try:
- * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
- */
- }
-
- /* "View.MemoryView":443
- * return None
- *
- * return obj # <<<<<<<<<<<<<<
- *
- * cdef setitem_slice_assignment(self, dst, src):
- */
- __Pyx_XDECREF(__pyx_r);
- __Pyx_INCREF(__pyx_v_obj);
- __pyx_r = __pyx_v_obj;
- goto __pyx_L0;
-
- /* "View.MemoryView":435
- * self.setitem_indexed(index, value)
- *
- * cdef is_slice(self, obj): # <<<<<<<<<<<<<<
- * if not isinstance(obj, memoryview):
- * try:
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_6);
- __Pyx_XDECREF(__pyx_t_7);
- __Pyx_XDECREF(__pyx_t_8);
- __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XDECREF(__pyx_v_obj);
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":445
- * return obj
- *
- * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
- * cdef __Pyx_memviewslice dst_slice
- * cdef __Pyx_memviewslice src_slice
- */
-
-static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
- __Pyx_memviewslice __pyx_v_dst_slice;
- __Pyx_memviewslice __pyx_v_src_slice;
- __Pyx_memviewslice __pyx_v_msrc;
- __Pyx_memviewslice __pyx_v_mdst;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- __Pyx_memviewslice *__pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- int __pyx_t_3;
- int __pyx_t_4;
- int __pyx_t_5;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
-
- /* "View.MemoryView":448
- * cdef __Pyx_memviewslice dst_slice
- * cdef __Pyx_memviewslice src_slice
- * cdef __Pyx_memviewslice msrc = get_slice_from_memview(src, &src_slice)[0] # <<<<<<<<<<<<<<
- * cdef __Pyx_memviewslice mdst = get_slice_from_memview(dst, &dst_slice)[0]
- *
- */
- if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 448, __pyx_L1_error)
- __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 448, __pyx_L1_error)
- __pyx_v_msrc = (__pyx_t_1[0]);
-
- /* "View.MemoryView":449
- * cdef __Pyx_memviewslice src_slice
- * cdef __Pyx_memviewslice msrc = get_slice_from_memview(src, &src_slice)[0]
- * cdef __Pyx_memviewslice mdst = get_slice_from_memview(dst, &dst_slice)[0] # <<<<<<<<<<<<<<
- *
- * memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object)
- */
- if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 449, __pyx_L1_error)
- __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 449, __pyx_L1_error)
- __pyx_v_mdst = (__pyx_t_1[0]);
-
- /* "View.MemoryView":451
- * cdef __Pyx_memviewslice mdst = get_slice_from_memview(dst, &dst_slice)[0]
- *
- * memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
- *
- * cdef setitem_slice_assign_scalar(self, memoryview dst, value):
- */
- __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 451, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 451, __pyx_L1_error)
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 451, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_2); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 451, __pyx_L1_error)
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_5 = __pyx_memoryview_copy_contents(__pyx_v_msrc, __pyx_v_mdst, __pyx_t_3, __pyx_t_4, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 451, __pyx_L1_error)
-
- /* "View.MemoryView":445
- * return obj
- *
- * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
- * cdef __Pyx_memviewslice dst_slice
- * cdef __Pyx_memviewslice src_slice
- */
-
- /* function exit code */
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":453
- * memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object)
- *
- * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
- * cdef int array[128]
- * cdef void *tmp = NULL
- */
-
-static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
- int __pyx_v_array[0x80];
- void *__pyx_v_tmp;
- void *__pyx_v_item;
- __Pyx_memviewslice *__pyx_v_dst_slice;
- __Pyx_memviewslice __pyx_v_tmp_slice;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- __Pyx_memviewslice *__pyx_t_1;
- int __pyx_t_2;
- PyObject *__pyx_t_3 = NULL;
- int __pyx_t_4;
- int __pyx_t_5;
- char const *__pyx_t_6;
- PyObject *__pyx_t_7 = NULL;
- PyObject *__pyx_t_8 = NULL;
- PyObject *__pyx_t_9 = NULL;
- PyObject *__pyx_t_10 = NULL;
- PyObject *__pyx_t_11 = NULL;
- PyObject *__pyx_t_12 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
-
- /* "View.MemoryView":455
- * cdef setitem_slice_assign_scalar(self, memoryview dst, value):
- * cdef int array[128]
- * cdef void *tmp = NULL # <<<<<<<<<<<<<<
- * cdef void *item
- *
- */
- __pyx_v_tmp = NULL;
-
- /* "View.MemoryView":460
- * cdef __Pyx_memviewslice *dst_slice
- * cdef __Pyx_memviewslice tmp_slice
- * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
- *
- * if self.view.itemsize > sizeof(array):
- */
- __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 460, __pyx_L1_error)
- __pyx_v_dst_slice = __pyx_t_1;
-
- /* "View.MemoryView":462
- * dst_slice = get_slice_from_memview(dst, &tmp_slice)
- *
- * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
- * tmp = PyMem_Malloc(self.view.itemsize)
- * if tmp == NULL:
- */
- __pyx_t_2 = (((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array)));
- if (__pyx_t_2) {
-
- /* "View.MemoryView":463
- *
- * if self.view.itemsize > sizeof(array):
- * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
- * if tmp == NULL:
- * raise MemoryError
- */
- __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
-
- /* "View.MemoryView":464
- * if self.view.itemsize > sizeof(array):
- * tmp = PyMem_Malloc(self.view.itemsize)
- * if tmp == NULL: # <<<<<<<<<<<<<<
- * raise MemoryError
- * item = tmp
- */
- __pyx_t_2 = (__pyx_v_tmp == NULL);
- if (unlikely(__pyx_t_2)) {
-
- /* "View.MemoryView":465
- * tmp = PyMem_Malloc(self.view.itemsize)
- * if tmp == NULL:
- * raise MemoryError # <<<<<<<<<<<<<<
- * item = tmp
- * else:
- */
- PyErr_NoMemory(); __PYX_ERR(1, 465, __pyx_L1_error)
-
- /* "View.MemoryView":464
- * if self.view.itemsize > sizeof(array):
- * tmp = PyMem_Malloc(self.view.itemsize)
- * if tmp == NULL: # <<<<<<<<<<<<<<
- * raise MemoryError
- * item = tmp
- */
- }
-
- /* "View.MemoryView":466
- * if tmp == NULL:
- * raise MemoryError
- * item = tmp # <<<<<<<<<<<<<<
- * else:
- * item = array
- */
- __pyx_v_item = __pyx_v_tmp;
-
- /* "View.MemoryView":462
- * dst_slice = get_slice_from_memview(dst, &tmp_slice)
- *
- * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
- * tmp = PyMem_Malloc(self.view.itemsize)
- * if tmp == NULL:
- */
- goto __pyx_L3;
- }
-
- /* "View.MemoryView":468
- * item = tmp
- * else:
- * item = array # <<<<<<<<<<<<<<
- *
- * try:
- */
- /*else*/ {
- __pyx_v_item = ((void *)__pyx_v_array);
- }
- __pyx_L3:;
-
- /* "View.MemoryView":470
- * item = array
- *
- * try: # <<<<<<<<<<<<<<
- * if self.dtype_is_object:
- * ( item)[0] = value
- */
- /*try:*/ {
-
- /* "View.MemoryView":471
- *
- * try:
- * if self.dtype_is_object: # <<<<<<<<<<<<<<
- * ( item)[0] = value
- * else:
- */
- if (__pyx_v_self->dtype_is_object) {
-
- /* "View.MemoryView":472
- * try:
- * if self.dtype_is_object:
- * ( item)[0] = value # <<<<<<<<<<<<<<
- * else:
- * self.assign_item_from_object( item, value)
- */
- (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
-
- /* "View.MemoryView":471
- *
- * try:
- * if self.dtype_is_object: # <<<<<<<<<<<<<<
- * ( item)[0] = value
- * else:
- */
- goto __pyx_L8;
- }
-
- /* "View.MemoryView":474
- * ( item)[0] = value
- * else:
- * self.assign_item_from_object( item, value) # <<<<<<<<<<<<<<
- *
- *
- */
- /*else*/ {
- __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 474, __pyx_L6_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- }
- __pyx_L8:;
-
- /* "View.MemoryView":478
- *
- *
- * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
- * assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
- * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
- */
- __pyx_t_2 = (__pyx_v_self->view.suboffsets != NULL);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":479
- *
- * if self.view.suboffsets != NULL:
- * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
- * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
- * item, self.dtype_is_object)
- */
- __pyx_t_4 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 479, __pyx_L6_error)
-
- /* "View.MemoryView":478
- *
- *
- * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
- * assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
- * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
- */
- }
-
- /* "View.MemoryView":480
- * if self.view.suboffsets != NULL:
- * assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
- * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
- * item, self.dtype_is_object)
- * finally:
- */
- __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
- }
-
- /* "View.MemoryView":483
- * item, self.dtype_is_object)
- * finally:
- * PyMem_Free(tmp) # <<<<<<<<<<<<<<
- *
- * cdef setitem_indexed(self, index, value):
- */
- /*finally:*/ {
- /*normal exit:*/{
- PyMem_Free(__pyx_v_tmp);
- goto __pyx_L7;
- }
- __pyx_L6_error:;
- /*exception exit:*/{
- __Pyx_PyThreadState_declare
- __Pyx_PyThreadState_assign
- __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
- __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
- if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
- if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
- __Pyx_XGOTREF(__pyx_t_7);
- __Pyx_XGOTREF(__pyx_t_8);
- __Pyx_XGOTREF(__pyx_t_9);
- __Pyx_XGOTREF(__pyx_t_10);
- __Pyx_XGOTREF(__pyx_t_11);
- __Pyx_XGOTREF(__pyx_t_12);
- __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename;
- {
- PyMem_Free(__pyx_v_tmp);
- }
- if (PY_MAJOR_VERSION >= 3) {
- __Pyx_XGIVEREF(__pyx_t_10);
- __Pyx_XGIVEREF(__pyx_t_11);
- __Pyx_XGIVEREF(__pyx_t_12);
- __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12);
- }
- __Pyx_XGIVEREF(__pyx_t_7);
- __Pyx_XGIVEREF(__pyx_t_8);
- __Pyx_XGIVEREF(__pyx_t_9);
- __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
- __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
- __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6;
- goto __pyx_L1_error;
- }
- __pyx_L7:;
- }
-
- /* "View.MemoryView":453
- * memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object)
- *
- * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
- * cdef int array[128]
- * cdef void *tmp = NULL
- */
-
- /* function exit code */
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":485
- * PyMem_Free(tmp)
- *
- * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
- * cdef char *itemp = self.get_item_pointer(index)
- * self.assign_item_from_object(itemp, value)
- */
-
-static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
- char *__pyx_v_itemp;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- char *__pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("setitem_indexed", 0);
-
- /* "View.MemoryView":486
- *
- * cdef setitem_indexed(self, index, value):
- * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
- * self.assign_item_from_object(itemp, value)
- *
- */
- __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 486, __pyx_L1_error)
- __pyx_v_itemp = __pyx_t_1;
-
- /* "View.MemoryView":487
- * cdef setitem_indexed(self, index, value):
- * cdef char *itemp = self.get_item_pointer(index)
- * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
- *
- * cdef convert_item_to_object(self, char *itemp):
- */
- __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 487, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
- /* "View.MemoryView":485
- * PyMem_Free(tmp)
- *
- * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
- * cdef char *itemp = self.get_item_pointer(index)
- * self.assign_item_from_object(itemp, value)
- */
-
- /* function exit code */
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":489
- * self.assign_item_from_object(itemp, value)
- *
- * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
- * """Only used if instantiated manually by the user, or if Cython doesn't
- * know how to convert the type"""
- */
-
-static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
- PyObject *__pyx_v_struct = NULL;
- PyObject *__pyx_v_bytesitem = 0;
- PyObject *__pyx_v_result = NULL;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- PyObject *__pyx_t_5 = NULL;
- PyObject *__pyx_t_6 = NULL;
- PyObject *__pyx_t_7 = NULL;
- int __pyx_t_8;
- Py_ssize_t __pyx_t_9;
- int __pyx_t_10;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("convert_item_to_object", 0);
-
- /* "View.MemoryView":492
- * """Only used if instantiated manually by the user, or if Cython doesn't
- * know how to convert the type"""
- * import struct # <<<<<<<<<<<<<<
- * cdef bytes bytesitem
- *
- */
- __pyx_t_1 = __Pyx_ImportDottedModule(__pyx_n_s_struct, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 492, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_v_struct = __pyx_t_1;
- __pyx_t_1 = 0;
-
- /* "View.MemoryView":495
- * cdef bytes bytesitem
- *
- * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
- * try:
- * result = struct.unpack(self.view.format, bytesitem)
- */
- __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 495, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
- __pyx_t_1 = 0;
-
- /* "View.MemoryView":496
- *
- * bytesitem = itemp[:self.view.itemsize]
- * try: # <<<<<<<<<<<<<<
- * result = struct.unpack(self.view.format, bytesitem)
- * except struct.error:
- */
- {
- __Pyx_PyThreadState_declare
- __Pyx_PyThreadState_assign
- __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
- __Pyx_XGOTREF(__pyx_t_2);
- __Pyx_XGOTREF(__pyx_t_3);
- __Pyx_XGOTREF(__pyx_t_4);
- /*try:*/ {
-
- /* "View.MemoryView":497
- * bytesitem = itemp[:self.view.itemsize]
- * try:
- * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
- * except struct.error:
- * raise ValueError, "Unable to convert item to object"
- */
- __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 497, __pyx_L3_error)
- __Pyx_GOTREF(__pyx_t_5);
- __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 497, __pyx_L3_error)
- __Pyx_GOTREF(__pyx_t_6);
- __pyx_t_7 = NULL;
- __pyx_t_8 = 0;
- if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
- __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
- if (likely(__pyx_t_7)) {
- PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
- __Pyx_INCREF(__pyx_t_7);
- __Pyx_INCREF(function);
- __Pyx_DECREF_SET(__pyx_t_5, function);
- __pyx_t_8 = 1;
- }
- }
- {
- PyObject *__pyx_callargs[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
- __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_8, 2+__pyx_t_8);
- __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 497, __pyx_L3_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- }
- __pyx_v_result = __pyx_t_1;
- __pyx_t_1 = 0;
-
- /* "View.MemoryView":496
- *
- * bytesitem = itemp[:self.view.itemsize]
- * try: # <<<<<<<<<<<<<<
- * result = struct.unpack(self.view.format, bytesitem)
- * except struct.error:
- */
- }
-
- /* "View.MemoryView":501
- * raise ValueError, "Unable to convert item to object"
- * else:
- * if len(self.view.format) == 1: # <<<<<<<<<<<<<<
- * return result[0]
- * return result
- */
- /*else:*/ {
- __pyx_t_9 = __Pyx_ssize_strlen(__pyx_v_self->view.format); if (unlikely(__pyx_t_9 == ((Py_ssize_t)-1))) __PYX_ERR(1, 501, __pyx_L5_except_error)
- __pyx_t_10 = (__pyx_t_9 == 1);
- if (__pyx_t_10) {
-
- /* "View.MemoryView":502
- * else:
- * if len(self.view.format) == 1:
- * return result[0] # <<<<<<<<<<<<<<
- * return result
- *
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 502, __pyx_L5_except_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_r = __pyx_t_1;
- __pyx_t_1 = 0;
- goto __pyx_L6_except_return;
-
- /* "View.MemoryView":501
- * raise ValueError, "Unable to convert item to object"
- * else:
- * if len(self.view.format) == 1: # <<<<<<<<<<<<<<
- * return result[0]
- * return result
- */
- }
-
- /* "View.MemoryView":503
- * if len(self.view.format) == 1:
- * return result[0]
- * return result # <<<<<<<<<<<<<<
- *
- * cdef assign_item_from_object(self, char *itemp, object value):
- */
- __Pyx_XDECREF(__pyx_r);
- __Pyx_INCREF(__pyx_v_result);
- __pyx_r = __pyx_v_result;
- goto __pyx_L6_except_return;
- }
- __pyx_L3_error:;
- __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
- __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
- __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
-
- /* "View.MemoryView":498
- * try:
- * result = struct.unpack(self.view.format, bytesitem)
- * except struct.error: # <<<<<<<<<<<<<<
- * raise ValueError, "Unable to convert item to object"
- * else:
- */
- __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_6);
- __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 498, __pyx_L5_except_error)
- __Pyx_GOTREF(__pyx_t_7);
- __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_7);
- __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
- __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_6);
- __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_6 = 0;
- if (__pyx_t_8) {
- __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
- if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 498, __pyx_L5_except_error)
- __Pyx_XGOTREF(__pyx_t_6);
- __Pyx_XGOTREF(__pyx_t_5);
- __Pyx_XGOTREF(__pyx_t_1);
-
- /* "View.MemoryView":499
- * result = struct.unpack(self.view.format, bytesitem)
- * except struct.error:
- * raise ValueError, "Unable to convert item to object" # <<<<<<<<<<<<<<
- * else:
- * if len(self.view.format) == 1:
- */
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Unable_to_convert_item_to_object, 0, 0);
- __PYX_ERR(1, 499, __pyx_L5_except_error)
- }
- goto __pyx_L5_except_error;
-
- /* "View.MemoryView":496
- *
- * bytesitem = itemp[:self.view.itemsize]
- * try: # <<<<<<<<<<<<<<
- * result = struct.unpack(self.view.format, bytesitem)
- * except struct.error:
- */
- __pyx_L5_except_error:;
- __Pyx_XGIVEREF(__pyx_t_2);
- __Pyx_XGIVEREF(__pyx_t_3);
- __Pyx_XGIVEREF(__pyx_t_4);
- __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
- goto __pyx_L1_error;
- __pyx_L6_except_return:;
- __Pyx_XGIVEREF(__pyx_t_2);
- __Pyx_XGIVEREF(__pyx_t_3);
- __Pyx_XGIVEREF(__pyx_t_4);
- __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
- goto __pyx_L0;
- }
-
- /* "View.MemoryView":489
- * self.assign_item_from_object(itemp, value)
- *
- * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
- * """Only used if instantiated manually by the user, or if Cython doesn't
- * know how to convert the type"""
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_XDECREF(__pyx_t_6);
- __Pyx_XDECREF(__pyx_t_7);
- __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XDECREF(__pyx_v_struct);
- __Pyx_XDECREF(__pyx_v_bytesitem);
- __Pyx_XDECREF(__pyx_v_result);
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":505
- * return result
- *
- * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
- * """Only used if instantiated manually by the user, or if Cython doesn't
- * know how to convert the type"""
- */
-
-static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
- PyObject *__pyx_v_struct = NULL;
- char __pyx_v_c;
- PyObject *__pyx_v_bytesvalue = 0;
- Py_ssize_t __pyx_v_i;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_t_2;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- PyObject *__pyx_t_5 = NULL;
- int __pyx_t_6;
- Py_ssize_t __pyx_t_7;
- PyObject *__pyx_t_8 = NULL;
- char *__pyx_t_9;
- char *__pyx_t_10;
- char *__pyx_t_11;
- char *__pyx_t_12;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("assign_item_from_object", 0);
-
- /* "View.MemoryView":508
- * """Only used if instantiated manually by the user, or if Cython doesn't
- * know how to convert the type"""
- * import struct # <<<<<<<<<<<<<<
- * cdef char c
- * cdef bytes bytesvalue
- */
- __pyx_t_1 = __Pyx_ImportDottedModule(__pyx_n_s_struct, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 508, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_v_struct = __pyx_t_1;
- __pyx_t_1 = 0;
-
- /* "View.MemoryView":513
- * cdef Py_ssize_t i
- *
- * if isinstance(value, tuple): # <<<<<<<<<<<<<<
- * bytesvalue = struct.pack(self.view.format, *value)
- * else:
- */
- __pyx_t_2 = PyTuple_Check(__pyx_v_value);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":514
- *
- * if isinstance(value, tuple):
- * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
- * else:
- * bytesvalue = struct.pack(self.view.format, value)
- */
- __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 514, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 514, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 514, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_GIVEREF(__pyx_t_3);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
- __pyx_t_3 = 0;
- __pyx_t_3 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 514, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_5 = PyNumber_Add(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 514, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 514, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- if (!(likely(PyBytes_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_3))) __PYX_ERR(1, 514, __pyx_L1_error)
- __pyx_v_bytesvalue = ((PyObject*)__pyx_t_3);
- __pyx_t_3 = 0;
-
- /* "View.MemoryView":513
- * cdef Py_ssize_t i
- *
- * if isinstance(value, tuple): # <<<<<<<<<<<<<<
- * bytesvalue = struct.pack(self.view.format, *value)
- * else:
- */
- goto __pyx_L3;
- }
-
- /* "View.MemoryView":516
- * bytesvalue = struct.pack(self.view.format, *value)
- * else:
- * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
- *
- * for i, c in enumerate(bytesvalue):
- */
- /*else*/ {
- __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 516, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 516, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = NULL;
- __pyx_t_6 = 0;
- if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
- __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5);
- if (likely(__pyx_t_4)) {
- PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
- __Pyx_INCREF(__pyx_t_4);
- __Pyx_INCREF(function);
- __Pyx_DECREF_SET(__pyx_t_5, function);
- __pyx_t_6 = 1;
- }
- }
- {
- PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_t_1, __pyx_v_value};
- __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_6, 2+__pyx_t_6);
- __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 516, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- }
- if (!(likely(PyBytes_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_3))) __PYX_ERR(1, 516, __pyx_L1_error)
- __pyx_v_bytesvalue = ((PyObject*)__pyx_t_3);
- __pyx_t_3 = 0;
- }
- __pyx_L3:;
-
- /* "View.MemoryView":518
- * bytesvalue = struct.pack(self.view.format, value)
- *
- * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
- * itemp[i] = c
- *
- */
- __pyx_t_7 = 0;
- if (unlikely(__pyx_v_bytesvalue == Py_None)) {
- PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
- __PYX_ERR(1, 518, __pyx_L1_error)
- }
- __Pyx_INCREF(__pyx_v_bytesvalue);
- __pyx_t_8 = __pyx_v_bytesvalue;
- __pyx_t_10 = PyBytes_AS_STRING(__pyx_t_8);
- __pyx_t_11 = (__pyx_t_10 + PyBytes_GET_SIZE(__pyx_t_8));
- for (__pyx_t_12 = __pyx_t_10; __pyx_t_12 < __pyx_t_11; __pyx_t_12++) {
- __pyx_t_9 = __pyx_t_12;
- __pyx_v_c = (__pyx_t_9[0]);
-
- /* "View.MemoryView":519
- *
- * for i, c in enumerate(bytesvalue):
- * itemp[i] = c # <<<<<<<<<<<<<<
- *
- * @cname('getbuffer')
- */
- __pyx_v_i = __pyx_t_7;
-
- /* "View.MemoryView":518
- * bytesvalue = struct.pack(self.view.format, value)
- *
- * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
- * itemp[i] = c
- *
- */
- __pyx_t_7 = (__pyx_t_7 + 1);
-
- /* "View.MemoryView":519
- *
- * for i, c in enumerate(bytesvalue):
- * itemp[i] = c # <<<<<<<<<<<<<<
- *
- * @cname('getbuffer')
- */
- (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
- }
- __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
-
- /* "View.MemoryView":505
- * return result
- *
- * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
- * """Only used if instantiated manually by the user, or if Cython doesn't
- * know how to convert the type"""
- */
-
- /* function exit code */
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_XDECREF(__pyx_t_8);
- __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XDECREF(__pyx_v_struct);
- __Pyx_XDECREF(__pyx_v_bytesvalue);
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":521
- * itemp[i] = c
- *
- * @cname('getbuffer') # <<<<<<<<<<<<<<
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * if flags & PyBUF_WRITABLE and self.view.readonly:
- */
-
-/* Python wrapper */
-CYTHON_UNUSED static int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
-CYTHON_UNUSED static int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
- __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- int __pyx_t_2;
- Py_ssize_t *__pyx_t_3;
- char *__pyx_t_4;
- void *__pyx_t_5;
- int __pyx_t_6;
- Py_ssize_t __pyx_t_7;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- if (unlikely(__pyx_v_info == NULL)) {
- PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
- return -1;
- }
- __Pyx_RefNannySetupContext("__getbuffer__", 0);
- __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
- __Pyx_GIVEREF(__pyx_v_info->obj);
-
- /* "View.MemoryView":523
- * @cname('getbuffer')
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
- * raise ValueError, "Cannot create writable memory view from read-only memoryview"
- *
- */
- __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
- if (__pyx_t_2) {
- } else {
- __pyx_t_1 = __pyx_t_2;
- goto __pyx_L4_bool_binop_done;
- }
- __pyx_t_1 = __pyx_v_self->view.readonly;
- __pyx_L4_bool_binop_done:;
- if (unlikely(__pyx_t_1)) {
-
- /* "View.MemoryView":524
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * if flags & PyBUF_WRITABLE and self.view.readonly:
- * raise ValueError, "Cannot create writable memory view from read-only memoryview" # <<<<<<<<<<<<<<
- *
- * if flags & PyBUF_ND:
- */
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Cannot_create_writable_memory_vi, 0, 0);
- __PYX_ERR(1, 524, __pyx_L1_error)
-
- /* "View.MemoryView":523
- * @cname('getbuffer')
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
- * raise ValueError, "Cannot create writable memory view from read-only memoryview"
- *
- */
- }
-
- /* "View.MemoryView":526
- * raise ValueError, "Cannot create writable memory view from read-only memoryview"
- *
- * if flags & PyBUF_ND: # <<<<<<<<<<<<<<
- * info.shape = self.view.shape
- * else:
- */
- __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":527
- *
- * if flags & PyBUF_ND:
- * info.shape = self.view.shape # <<<<<<<<<<<<<<
- * else:
- * info.shape = NULL
- */
- __pyx_t_3 = __pyx_v_self->view.shape;
- __pyx_v_info->shape = __pyx_t_3;
-
- /* "View.MemoryView":526
- * raise ValueError, "Cannot create writable memory view from read-only memoryview"
- *
- * if flags & PyBUF_ND: # <<<<<<<<<<<<<<
- * info.shape = self.view.shape
- * else:
- */
- goto __pyx_L6;
- }
-
- /* "View.MemoryView":529
- * info.shape = self.view.shape
- * else:
- * info.shape = NULL # <<<<<<<<<<<<<<
- *
- * if flags & PyBUF_STRIDES:
- */
- /*else*/ {
- __pyx_v_info->shape = NULL;
- }
- __pyx_L6:;
-
- /* "View.MemoryView":531
- * info.shape = NULL
- *
- * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
- * info.strides = self.view.strides
- * else:
- */
- __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":532
- *
- * if flags & PyBUF_STRIDES:
- * info.strides = self.view.strides # <<<<<<<<<<<<<<
- * else:
- * info.strides = NULL
- */
- __pyx_t_3 = __pyx_v_self->view.strides;
- __pyx_v_info->strides = __pyx_t_3;
-
- /* "View.MemoryView":531
- * info.shape = NULL
- *
- * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
- * info.strides = self.view.strides
- * else:
- */
- goto __pyx_L7;
- }
-
- /* "View.MemoryView":534
- * info.strides = self.view.strides
- * else:
- * info.strides = NULL # <<<<<<<<<<<<<<
- *
- * if flags & PyBUF_INDIRECT:
- */
- /*else*/ {
- __pyx_v_info->strides = NULL;
- }
- __pyx_L7:;
-
- /* "View.MemoryView":536
- * info.strides = NULL
- *
- * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
- * info.suboffsets = self.view.suboffsets
- * else:
- */
- __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":537
- *
- * if flags & PyBUF_INDIRECT:
- * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
- * else:
- * info.suboffsets = NULL
- */
- __pyx_t_3 = __pyx_v_self->view.suboffsets;
- __pyx_v_info->suboffsets = __pyx_t_3;
-
- /* "View.MemoryView":536
- * info.strides = NULL
- *
- * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
- * info.suboffsets = self.view.suboffsets
- * else:
- */
- goto __pyx_L8;
- }
-
- /* "View.MemoryView":539
- * info.suboffsets = self.view.suboffsets
- * else:
- * info.suboffsets = NULL # <<<<<<<<<<<<<<
- *
- * if flags & PyBUF_FORMAT:
- */
- /*else*/ {
- __pyx_v_info->suboffsets = NULL;
- }
- __pyx_L8:;
-
- /* "View.MemoryView":541
- * info.suboffsets = NULL
- *
- * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
- * info.format = self.view.format
- * else:
- */
- __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":542
- *
- * if flags & PyBUF_FORMAT:
- * info.format = self.view.format # <<<<<<<<<<<<<<
- * else:
- * info.format = NULL
- */
- __pyx_t_4 = __pyx_v_self->view.format;
- __pyx_v_info->format = __pyx_t_4;
-
- /* "View.MemoryView":541
- * info.suboffsets = NULL
- *
- * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
- * info.format = self.view.format
- * else:
- */
- goto __pyx_L9;
- }
-
- /* "View.MemoryView":544
- * info.format = self.view.format
- * else:
- * info.format = NULL # <<<<<<<<<<<<<<
- *
- * info.buf = self.view.buf
- */
- /*else*/ {
- __pyx_v_info->format = NULL;
- }
- __pyx_L9:;
-
- /* "View.MemoryView":546
- * info.format = NULL
- *
- * info.buf = self.view.buf # <<<<<<<<<<<<<<
- * info.ndim = self.view.ndim
- * info.itemsize = self.view.itemsize
- */
- __pyx_t_5 = __pyx_v_self->view.buf;
- __pyx_v_info->buf = __pyx_t_5;
-
- /* "View.MemoryView":547
- *
- * info.buf = self.view.buf
- * info.ndim = self.view.ndim # <<<<<<<<<<<<<<
- * info.itemsize = self.view.itemsize
- * info.len = self.view.len
- */
- __pyx_t_6 = __pyx_v_self->view.ndim;
- __pyx_v_info->ndim = __pyx_t_6;
-
- /* "View.MemoryView":548
- * info.buf = self.view.buf
- * info.ndim = self.view.ndim
- * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
- * info.len = self.view.len
- * info.readonly = self.view.readonly
- */
- __pyx_t_7 = __pyx_v_self->view.itemsize;
- __pyx_v_info->itemsize = __pyx_t_7;
-
- /* "View.MemoryView":549
- * info.ndim = self.view.ndim
- * info.itemsize = self.view.itemsize
- * info.len = self.view.len # <<<<<<<<<<<<<<
- * info.readonly = self.view.readonly
- * info.obj = self
- */
- __pyx_t_7 = __pyx_v_self->view.len;
- __pyx_v_info->len = __pyx_t_7;
-
- /* "View.MemoryView":550
- * info.itemsize = self.view.itemsize
- * info.len = self.view.len
- * info.readonly = self.view.readonly # <<<<<<<<<<<<<<
- * info.obj = self
- *
- */
- __pyx_t_1 = __pyx_v_self->view.readonly;
- __pyx_v_info->readonly = __pyx_t_1;
-
- /* "View.MemoryView":551
- * info.len = self.view.len
- * info.readonly = self.view.readonly
- * info.obj = self # <<<<<<<<<<<<<<
- *
- *
- */
- __Pyx_INCREF((PyObject *)__pyx_v_self);
- __Pyx_GIVEREF((PyObject *)__pyx_v_self);
- __Pyx_GOTREF(__pyx_v_info->obj);
- __Pyx_DECREF(__pyx_v_info->obj);
- __pyx_v_info->obj = ((PyObject *)__pyx_v_self);
-
- /* "View.MemoryView":521
- * itemp[i] = c
- *
- * @cname('getbuffer') # <<<<<<<<<<<<<<
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * if flags & PyBUF_WRITABLE and self.view.readonly:
- */
-
- /* function exit code */
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- if (__pyx_v_info->obj != NULL) {
- __Pyx_GOTREF(__pyx_v_info->obj);
- __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
- }
- goto __pyx_L2;
- __pyx_L0:;
- if (__pyx_v_info->obj == Py_None) {
- __Pyx_GOTREF(__pyx_v_info->obj);
- __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
- }
- __pyx_L2:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":554
- *
- *
- * @property # <<<<<<<<<<<<<<
- * def T(self):
- * cdef _memoryviewslice result = memoryview_copy(self)
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
- __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
- struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_t_2;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__get__", 0);
-
- /* "View.MemoryView":556
- * @property
- * def T(self):
- * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
- * transpose_memslice(&result.from_slice)
- * return result
- */
- __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 556, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 556, __pyx_L1_error)
- __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
- __pyx_t_1 = 0;
-
- /* "View.MemoryView":557
- * def T(self):
- * cdef _memoryviewslice result = memoryview_copy(self)
- * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
- * return result
- *
- */
- __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 557, __pyx_L1_error)
-
- /* "View.MemoryView":558
- * cdef _memoryviewslice result = memoryview_copy(self)
- * transpose_memslice(&result.from_slice)
- * return result # <<<<<<<<<<<<<<
- *
- * @property
- */
- __Pyx_XDECREF(__pyx_r);
- __Pyx_INCREF((PyObject *)__pyx_v_result);
- __pyx_r = ((PyObject *)__pyx_v_result);
- goto __pyx_L0;
-
- /* "View.MemoryView":554
- *
- *
- * @property # <<<<<<<<<<<<<<
- * def T(self):
- * cdef _memoryviewslice result = memoryview_copy(self)
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XDECREF((PyObject *)__pyx_v_result);
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":560
- * return result
- *
- * @property # <<<<<<<<<<<<<<
- * def base(self):
- * return self._get_base()
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
- __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__get__", 0);
-
- /* "View.MemoryView":562
- * @property
- * def base(self):
- * return self._get_base() # <<<<<<<<<<<<<<
- *
- * cdef _get_base(self):
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->_get_base(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 562, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_r = __pyx_t_1;
- __pyx_t_1 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":560
- * return result
- *
- * @property # <<<<<<<<<<<<<<
- * def base(self):
- * return self._get_base()
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_AddTraceback("View.MemoryView.memoryview.base.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":564
- * return self._get_base()
- *
- * cdef _get_base(self): # <<<<<<<<<<<<<<
- * return self.obj
- *
- */
-
-static PyObject *__pyx_memoryview__get_base(struct __pyx_memoryview_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("_get_base", 0);
-
- /* "View.MemoryView":565
- *
- * cdef _get_base(self):
- * return self.obj # <<<<<<<<<<<<<<
- *
- * @property
- */
- __Pyx_XDECREF(__pyx_r);
- __Pyx_INCREF(__pyx_v_self->obj);
- __pyx_r = __pyx_v_self->obj;
- goto __pyx_L0;
-
- /* "View.MemoryView":564
- * return self._get_base()
- *
- * cdef _get_base(self): # <<<<<<<<<<<<<<
- * return self.obj
- *
- */
-
- /* function exit code */
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":567
- * return self.obj
- *
- * @property # <<<<<<<<<<<<<<
- * def shape(self):
- * return tuple([length for length in self.view.shape[:self.view.ndim]])
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
- __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
- Py_ssize_t __pyx_7genexpr__pyx_v_length;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- Py_ssize_t *__pyx_t_2;
- Py_ssize_t *__pyx_t_3;
- Py_ssize_t *__pyx_t_4;
- PyObject *__pyx_t_5 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__get__", 0);
-
- /* "View.MemoryView":569
- * @property
- * def shape(self):
- * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
- *
- * @property
- */
- __Pyx_XDECREF(__pyx_r);
- { /* enter inner scope */
- __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 569, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
- for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
- __pyx_t_2 = __pyx_t_4;
- __pyx_7genexpr__pyx_v_length = (__pyx_t_2[0]);
- __pyx_t_5 = PyInt_FromSsize_t(__pyx_7genexpr__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 569, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 569, __pyx_L1_error)
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- }
- } /* exit inner scope */
- __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 569, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_r = __pyx_t_5;
- __pyx_t_5 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":567
- * return self.obj
- *
- * @property # <<<<<<<<<<<<<<
- * def shape(self):
- * return tuple([length for length in self.view.shape[:self.view.ndim]])
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":571
- * return tuple([length for length in self.view.shape[:self.view.ndim]])
- *
- * @property # <<<<<<<<<<<<<<
- * def strides(self):
- * if self.view.strides == NULL:
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
- __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
- Py_ssize_t __pyx_8genexpr1__pyx_v_stride;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- Py_ssize_t *__pyx_t_3;
- Py_ssize_t *__pyx_t_4;
- Py_ssize_t *__pyx_t_5;
- PyObject *__pyx_t_6 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__get__", 0);
-
- /* "View.MemoryView":573
- * @property
- * def strides(self):
- * if self.view.strides == NULL: # <<<<<<<<<<<<<<
- *
- * raise ValueError, "Buffer view does not expose strides"
- */
- __pyx_t_1 = (__pyx_v_self->view.strides == NULL);
- if (unlikely(__pyx_t_1)) {
-
- /* "View.MemoryView":575
- * if self.view.strides == NULL:
- *
- * raise ValueError, "Buffer view does not expose strides" # <<<<<<<<<<<<<<
- *
- * return tuple([stride for stride in self.view.strides[:self.view.ndim]])
- */
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Buffer_view_does_not_expose_stri, 0, 0);
- __PYX_ERR(1, 575, __pyx_L1_error)
-
- /* "View.MemoryView":573
- * @property
- * def strides(self):
- * if self.view.strides == NULL: # <<<<<<<<<<<<<<
- *
- * raise ValueError, "Buffer view does not expose strides"
- */
- }
-
- /* "View.MemoryView":577
- * raise ValueError, "Buffer view does not expose strides"
- *
- * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
- *
- * @property
- */
- __Pyx_XDECREF(__pyx_r);
- { /* enter inner scope */
- __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
- for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
- __pyx_t_3 = __pyx_t_5;
- __pyx_8genexpr1__pyx_v_stride = (__pyx_t_3[0]);
- __pyx_t_6 = PyInt_FromSsize_t(__pyx_8genexpr1__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 577, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_6);
- if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 577, __pyx_L1_error)
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- }
- } /* exit inner scope */
- __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 577, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_r = __pyx_t_6;
- __pyx_t_6 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":571
- * return tuple([length for length in self.view.shape[:self.view.ndim]])
- *
- * @property # <<<<<<<<<<<<<<
- * def strides(self):
- * if self.view.strides == NULL:
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_6);
- __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":579
- * return tuple([stride for stride in self.view.strides[:self.view.ndim]])
- *
- * @property # <<<<<<<<<<<<<<
- * def suboffsets(self):
- * if self.view.suboffsets == NULL:
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
- __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
- Py_ssize_t __pyx_8genexpr2__pyx_v_suboffset;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- Py_ssize_t *__pyx_t_3;
- Py_ssize_t *__pyx_t_4;
- Py_ssize_t *__pyx_t_5;
- PyObject *__pyx_t_6 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__get__", 0);
-
- /* "View.MemoryView":581
- * @property
- * def suboffsets(self):
- * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
- * return (-1,) * self.view.ndim
- *
- */
- __pyx_t_1 = (__pyx_v_self->view.suboffsets == NULL);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":582
- * def suboffsets(self):
- * if self.view.suboffsets == NULL:
- * return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
- *
- * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __Pyx_PySequence_Multiply(__pyx_tuple__4, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 582, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":581
- * @property
- * def suboffsets(self):
- * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
- * return (-1,) * self.view.ndim
- *
- */
- }
-
- /* "View.MemoryView":584
- * return (-1,) * self.view.ndim
- *
- * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
- *
- * @property
- */
- __Pyx_XDECREF(__pyx_r);
- { /* enter inner scope */
- __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 584, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
- for (__pyx_t_5 = __pyx_v_self->view.suboffsets; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
- __pyx_t_3 = __pyx_t_5;
- __pyx_8genexpr2__pyx_v_suboffset = (__pyx_t_3[0]);
- __pyx_t_6 = PyInt_FromSsize_t(__pyx_8genexpr2__pyx_v_suboffset); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 584, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_6);
- if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 584, __pyx_L1_error)
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- }
- } /* exit inner scope */
- __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 584, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_r = __pyx_t_6;
- __pyx_t_6 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":579
- * return tuple([stride for stride in self.view.strides[:self.view.ndim]])
- *
- * @property # <<<<<<<<<<<<<<
- * def suboffsets(self):
- * if self.view.suboffsets == NULL:
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_6);
- __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":586
- * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
- *
- * @property # <<<<<<<<<<<<<<
- * def ndim(self):
- * return self.view.ndim
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
- __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__get__", 0);
-
- /* "View.MemoryView":588
- * @property
- * def ndim(self):
- * return self.view.ndim # <<<<<<<<<<<<<<
- *
- * @property
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 588, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_r = __pyx_t_1;
- __pyx_t_1 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":586
- * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
- *
- * @property # <<<<<<<<<<<<<<
- * def ndim(self):
- * return self.view.ndim
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":590
- * return self.view.ndim
- *
- * @property # <<<<<<<<<<<<<<
- * def itemsize(self):
- * return self.view.itemsize
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
- __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__get__", 0);
-
- /* "View.MemoryView":592
- * @property
- * def itemsize(self):
- * return self.view.itemsize # <<<<<<<<<<<<<<
- *
- * @property
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 592, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_r = __pyx_t_1;
- __pyx_t_1 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":590
- * return self.view.ndim
- *
- * @property # <<<<<<<<<<<<<<
- * def itemsize(self):
- * return self.view.itemsize
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":594
- * return self.view.itemsize
- *
- * @property # <<<<<<<<<<<<<<
- * def nbytes(self):
- * return self.size * self.view.itemsize
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
- __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__get__", 0);
-
- /* "View.MemoryView":596
- * @property
- * def nbytes(self):
- * return self.size * self.view.itemsize # <<<<<<<<<<<<<<
- *
- * @property
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 596, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 596, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 596, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_r = __pyx_t_3;
- __pyx_t_3 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":594
- * return self.view.itemsize
- *
- * @property # <<<<<<<<<<<<<<
- * def nbytes(self):
- * return self.size * self.view.itemsize
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":598
- * return self.size * self.view.itemsize
- *
- * @property # <<<<<<<<<<<<<<
- * def size(self):
- * if self._size is None:
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
- __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
- PyObject *__pyx_v_result = NULL;
- PyObject *__pyx_v_length = NULL;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- Py_ssize_t *__pyx_t_2;
- Py_ssize_t *__pyx_t_3;
- Py_ssize_t *__pyx_t_4;
- PyObject *__pyx_t_5 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__get__", 0);
-
- /* "View.MemoryView":600
- * @property
- * def size(self):
- * if self._size is None: # <<<<<<<<<<<<<<
- * result = 1
- *
- */
- __pyx_t_1 = (__pyx_v_self->_size == Py_None);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":601
- * def size(self):
- * if self._size is None:
- * result = 1 # <<<<<<<<<<<<<<
- *
- * for length in self.view.shape[:self.view.ndim]:
- */
- __Pyx_INCREF(__pyx_int_1);
- __pyx_v_result = __pyx_int_1;
-
- /* "View.MemoryView":603
- * result = 1
- *
- * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
- * result *= length
- *
- */
- __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
- for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
- __pyx_t_2 = __pyx_t_4;
- __pyx_t_5 = PyInt_FromSsize_t((__pyx_t_2[0])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 603, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_5);
- __pyx_t_5 = 0;
-
- /* "View.MemoryView":604
- *
- * for length in self.view.shape[:self.view.ndim]:
- * result *= length # <<<<<<<<<<<<<<
- *
- * self._size = result
- */
- __pyx_t_5 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 604, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_5);
- __pyx_t_5 = 0;
- }
-
- /* "View.MemoryView":606
- * result *= length
- *
- * self._size = result # <<<<<<<<<<<<<<
- *
- * return self._size
- */
- __Pyx_INCREF(__pyx_v_result);
- __Pyx_GIVEREF(__pyx_v_result);
- __Pyx_GOTREF(__pyx_v_self->_size);
- __Pyx_DECREF(__pyx_v_self->_size);
- __pyx_v_self->_size = __pyx_v_result;
-
- /* "View.MemoryView":600
- * @property
- * def size(self):
- * if self._size is None: # <<<<<<<<<<<<<<
- * result = 1
- *
- */
- }
-
- /* "View.MemoryView":608
- * self._size = result
- *
- * return self._size # <<<<<<<<<<<<<<
- *
- * def __len__(self):
- */
- __Pyx_XDECREF(__pyx_r);
- __Pyx_INCREF(__pyx_v_self->_size);
- __pyx_r = __pyx_v_self->_size;
- goto __pyx_L0;
-
- /* "View.MemoryView":598
- * return self.size * self.view.itemsize
- *
- * @property # <<<<<<<<<<<<<<
- * def size(self):
- * if self._size is None:
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XDECREF(__pyx_v_result);
- __Pyx_XDECREF(__pyx_v_length);
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":610
- * return self._size
- *
- * def __len__(self): # <<<<<<<<<<<<<<
- * if self.view.ndim >= 1:
- * return self.view.shape[0]
- */
-
-/* Python wrapper */
-static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
-static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- Py_ssize_t __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
- __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
- Py_ssize_t __pyx_r;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- __Pyx_RefNannySetupContext("__len__", 0);
-
- /* "View.MemoryView":611
- *
- * def __len__(self):
- * if self.view.ndim >= 1: # <<<<<<<<<<<<<<
- * return self.view.shape[0]
- *
- */
- __pyx_t_1 = (__pyx_v_self->view.ndim >= 1);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":612
- * def __len__(self):
- * if self.view.ndim >= 1:
- * return self.view.shape[0] # <<<<<<<<<<<<<<
- *
- * return 0
- */
- __pyx_r = (__pyx_v_self->view.shape[0]);
- goto __pyx_L0;
-
- /* "View.MemoryView":611
- *
- * def __len__(self):
- * if self.view.ndim >= 1: # <<<<<<<<<<<<<<
- * return self.view.shape[0]
- *
- */
- }
-
- /* "View.MemoryView":614
- * return self.view.shape[0]
- *
- * return 0 # <<<<<<<<<<<<<<
- *
- * def __repr__(self):
- */
- __pyx_r = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":610
- * return self._size
- *
- * def __len__(self): # <<<<<<<<<<<<<<
- * if self.view.ndim >= 1:
- * return self.view.shape[0]
- */
-
- /* function exit code */
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":616
- * return 0
- *
- * def __repr__(self): # <<<<<<<<<<<<<<
- * return "" % (self.base.__class__.__name__,
- * id(self))
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
- __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__repr__", 0);
-
- /* "View.MemoryView":617
- *
- * def __repr__(self):
- * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
- * id(self))
- *
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 617, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 617, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 617, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
- /* "View.MemoryView":618
- * def __repr__(self):
- * return "" % (self.base.__class__.__name__,
- * id(self)) # <<<<<<<<<<<<<<
- *
- * def __str__(self):
- */
- __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 618, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
-
- /* "View.MemoryView":617
- *
- * def __repr__(self):
- * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
- * id(self))
- *
- */
- __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 617, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_GIVEREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
- __Pyx_GIVEREF(__pyx_t_2);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
- __pyx_t_1 = 0;
- __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 617, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":616
- * return 0
- *
- * def __repr__(self): # <<<<<<<<<<<<<<
- * return "" % (self.base.__class__.__name__,
- * id(self))
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":620
- * id(self))
- *
- * def __str__(self): # <<<<<<<<<<<<<<
- * return "" % (self.base.__class__.__name__,)
- *
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
- __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__str__", 0);
-
- /* "View.MemoryView":621
- *
- * def __str__(self):
- * return "" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
- *
- *
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 621, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 621, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 621, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 621, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
- __pyx_t_1 = 0;
- __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 621, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_r = __pyx_t_1;
- __pyx_t_1 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":620
- * id(self))
- *
- * def __str__(self): # <<<<<<<<<<<<<<
- * return "" % (self.base.__class__.__name__,)
- *
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":624
- *
- *
- * def is_c_contig(self): # <<<<<<<<<<<<<<
- * cdef __Pyx_memviewslice *mslice
- * cdef __Pyx_memviewslice tmp
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
- if (unlikely(__pyx_nargs > 0)) {
- __Pyx_RaiseArgtupleInvalid("is_c_contig", 1, 0, 0, __pyx_nargs); return NULL;}
- if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "is_c_contig", 0))) return NULL;
- __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
- __Pyx_memviewslice *__pyx_v_mslice;
- __Pyx_memviewslice __pyx_v_tmp;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- __Pyx_memviewslice *__pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("is_c_contig", 0);
-
- /* "View.MemoryView":627
- * cdef __Pyx_memviewslice *mslice
- * cdef __Pyx_memviewslice tmp
- * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
- * return slice_is_contig(mslice[0], 'C', self.view.ndim)
- *
- */
- __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 627, __pyx_L1_error)
- __pyx_v_mslice = __pyx_t_1;
-
- /* "View.MemoryView":628
- * cdef __Pyx_memviewslice tmp
- * mslice = get_slice_from_memview(self, &tmp)
- * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
- *
- * def is_f_contig(self):
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 628, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":624
- *
- *
- * def is_c_contig(self): # <<<<<<<<<<<<<<
- * cdef __Pyx_memviewslice *mslice
- * cdef __Pyx_memviewslice tmp
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":630
- * return slice_is_contig(mslice[0], 'C', self.view.ndim)
- *
- * def is_f_contig(self): # <<<<<<<<<<<<<<
- * cdef __Pyx_memviewslice *mslice
- * cdef __Pyx_memviewslice tmp
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
- if (unlikely(__pyx_nargs > 0)) {
- __Pyx_RaiseArgtupleInvalid("is_f_contig", 1, 0, 0, __pyx_nargs); return NULL;}
- if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "is_f_contig", 0))) return NULL;
- __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
- __Pyx_memviewslice *__pyx_v_mslice;
- __Pyx_memviewslice __pyx_v_tmp;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- __Pyx_memviewslice *__pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("is_f_contig", 0);
-
- /* "View.MemoryView":633
- * cdef __Pyx_memviewslice *mslice
- * cdef __Pyx_memviewslice tmp
- * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
- * return slice_is_contig(mslice[0], 'F', self.view.ndim)
- *
- */
- __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 633, __pyx_L1_error)
- __pyx_v_mslice = __pyx_t_1;
-
- /* "View.MemoryView":634
- * cdef __Pyx_memviewslice tmp
- * mslice = get_slice_from_memview(self, &tmp)
- * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
- *
- * def copy(self):
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 634, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":630
- * return slice_is_contig(mslice[0], 'C', self.view.ndim)
- *
- * def is_f_contig(self): # <<<<<<<<<<<<<<
- * cdef __Pyx_memviewslice *mslice
- * cdef __Pyx_memviewslice tmp
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":636
- * return slice_is_contig(mslice[0], 'F', self.view.ndim)
- *
- * def copy(self): # <<<<<<<<<<<<<<
- * cdef __Pyx_memviewslice mslice
- * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("copy (wrapper)", 0);
- if (unlikely(__pyx_nargs > 0)) {
- __Pyx_RaiseArgtupleInvalid("copy", 1, 0, 0, __pyx_nargs); return NULL;}
- if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "copy", 0))) return NULL;
- __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
- __Pyx_memviewslice __pyx_v_mslice;
- int __pyx_v_flags;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- __Pyx_memviewslice __pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("copy", 0);
-
- /* "View.MemoryView":638
- * def copy(self):
- * cdef __Pyx_memviewslice mslice
- * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
- *
- * slice_copy(self, &mslice)
- */
- __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
-
- /* "View.MemoryView":640
- * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
- *
- * slice_copy(self, &mslice) # <<<<<<<<<<<<<<
- * mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
- * self.view.itemsize,
- */
- __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
-
- /* "View.MemoryView":641
- *
- * slice_copy(self, &mslice)
- * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
- * self.view.itemsize,
- * flags|PyBUF_C_CONTIGUOUS,
- */
- __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 641, __pyx_L1_error)
- __pyx_v_mslice = __pyx_t_1;
-
- /* "View.MemoryView":646
- * self.dtype_is_object)
- *
- * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
- *
- * def copy_fortran(self):
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 646, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":636
- * return slice_is_contig(mslice[0], 'F', self.view.ndim)
- *
- * def copy(self): # <<<<<<<<<<<<<<
- * cdef __Pyx_memviewslice mslice
- * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":648
- * return memoryview_copy_from_slice(self, &mslice)
- *
- * def copy_fortran(self): # <<<<<<<<<<<<<<
- * cdef __Pyx_memviewslice src, dst
- * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
- */
-
-/* Python wrapper */
-static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
- if (unlikely(__pyx_nargs > 0)) {
- __Pyx_RaiseArgtupleInvalid("copy_fortran", 1, 0, 0, __pyx_nargs); return NULL;}
- if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "copy_fortran", 0))) return NULL;
- __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
- __Pyx_memviewslice __pyx_v_src;
- __Pyx_memviewslice __pyx_v_dst;
- int __pyx_v_flags;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- __Pyx_memviewslice __pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("copy_fortran", 0);
-
- /* "View.MemoryView":650
- * def copy_fortran(self):
- * cdef __Pyx_memviewslice src, dst
- * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
- *
- * slice_copy(self, &src)
- */
- __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
-
- /* "View.MemoryView":652
- * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
- *
- * slice_copy(self, &src) # <<<<<<<<<<<<<<
- * dst = slice_copy_contig(&src, "fortran", self.view.ndim,
- * self.view.itemsize,
- */
- __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
-
- /* "View.MemoryView":653
- *
- * slice_copy(self, &src)
- * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
- * self.view.itemsize,
- * flags|PyBUF_F_CONTIGUOUS,
- */
- __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 653, __pyx_L1_error)
- __pyx_v_dst = __pyx_t_1;
-
- /* "View.MemoryView":658
- * self.dtype_is_object)
- *
- * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
- *
- *
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":648
- * return memoryview_copy_from_slice(self, &mslice)
- *
- * def copy_fortran(self): # <<<<<<<<<<<<<<
- * cdef __Pyx_memviewslice src, dst
- * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "(tree fragment)":1
- * def __reduce_cython__(self): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state):
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
- if (unlikely(__pyx_nargs > 0)) {
- __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;}
- if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL;
- __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__reduce_cython__", 0);
-
- /* "(tree fragment)":2
- * def __reduce_cython__(self):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<<
- * def __setstate_cython__(self, __pyx_state):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- */
- __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0);
- __PYX_ERR(1, 2, __pyx_L1_error)
-
- /* "(tree fragment)":1
- * def __reduce_cython__(self): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state):
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "(tree fragment)":3
- * def __reduce_cython__(self):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
- {
- PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0};
- PyObject* values[1] = {0};
- if (__pyx_kwds) {
- Py_ssize_t kw_args;
- switch (__pyx_nargs) {
- case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
- CYTHON_FALLTHROUGH;
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds);
- switch (__pyx_nargs) {
- case 0:
- if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 3, __pyx_L3_error)
- else goto __pyx_L5_argtuple_error;
- }
- if (unlikely(kw_args > 0)) {
- const Py_ssize_t kwd_pos_args = __pyx_nargs;
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 3, __pyx_L3_error)
- }
- } else if (unlikely(__pyx_nargs != 1)) {
- goto __pyx_L5_argtuple_error;
- } else {
- values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
- }
- __pyx_v___pyx_state = values[0];
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
- __pyx_L3_error:;
- __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __Pyx_RefNannyFinishContext();
- return NULL;
- __pyx_L4_argument_unpacking_done:;
- __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v___pyx_state);
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__setstate_cython__", 0);
-
- /* "(tree fragment)":4
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<<
- */
- __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0);
- __PYX_ERR(1, 4, __pyx_L1_error)
-
- /* "(tree fragment)":3
- * def __reduce_cython__(self):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":662
- *
- * @cname('__pyx_memoryview_new')
- * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
- * cdef memoryview result = memoryview(o, flags, dtype_is_object)
- * result.typeinfo = typeinfo
- */
-
-static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
- struct __pyx_memoryview_obj *__pyx_v_result = 0;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
-
- /* "View.MemoryView":663
- * @cname('__pyx_memoryview_new')
- * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
- * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
- * result.typeinfo = typeinfo
- * return result
- */
- __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 663, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 663, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 663, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF(__pyx_v_o);
- __Pyx_GIVEREF(__pyx_v_o);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
- __Pyx_GIVEREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
- __Pyx_GIVEREF(__pyx_t_2);
- PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
- __pyx_t_1 = 0;
- __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 663, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
- __pyx_t_2 = 0;
-
- /* "View.MemoryView":664
- * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
- * cdef memoryview result = memoryview(o, flags, dtype_is_object)
- * result.typeinfo = typeinfo # <<<<<<<<<<<<<<
- * return result
- *
- */
- __pyx_v_result->typeinfo = __pyx_v_typeinfo;
-
- /* "View.MemoryView":665
- * cdef memoryview result = memoryview(o, flags, dtype_is_object)
- * result.typeinfo = typeinfo
- * return result # <<<<<<<<<<<<<<
- *
- * @cname('__pyx_memoryview_check')
- */
- __Pyx_XDECREF(__pyx_r);
- __Pyx_INCREF((PyObject *)__pyx_v_result);
- __pyx_r = ((PyObject *)__pyx_v_result);
- goto __pyx_L0;
-
- /* "View.MemoryView":662
- *
- * @cname('__pyx_memoryview_new')
- * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
- * cdef memoryview result = memoryview(o, flags, dtype_is_object)
- * result.typeinfo = typeinfo
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XDECREF((PyObject *)__pyx_v_result);
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":668
- *
- * @cname('__pyx_memoryview_check')
- * cdef inline bint memoryview_check(object o) noexcept: # <<<<<<<<<<<<<<
- * return isinstance(o, memoryview)
- *
- */
-
-static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- __Pyx_RefNannySetupContext("memoryview_check", 0);
-
- /* "View.MemoryView":669
- * @cname('__pyx_memoryview_check')
- * cdef inline bint memoryview_check(object o) noexcept:
- * return isinstance(o, memoryview) # <<<<<<<<<<<<<<
- *
- * cdef tuple _unellipsify(object index, int ndim):
- */
- __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
- __pyx_r = __pyx_t_1;
- goto __pyx_L0;
-
- /* "View.MemoryView":668
- *
- * @cname('__pyx_memoryview_check')
- * cdef inline bint memoryview_check(object o) noexcept: # <<<<<<<<<<<<<<
- * return isinstance(o, memoryview)
- *
- */
-
- /* function exit code */
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":671
- * return isinstance(o, memoryview)
- *
- * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
- * """
- * Replace all ellipses with full slices and fill incomplete indices with
- */
-
-static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
- Py_ssize_t __pyx_v_idx;
- PyObject *__pyx_v_tup = NULL;
- PyObject *__pyx_v_result = NULL;
- int __pyx_v_have_slices;
- int __pyx_v_seen_ellipsis;
- PyObject *__pyx_v_item = NULL;
- Py_ssize_t __pyx_v_nslices;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_t_2;
- PyObject *__pyx_t_3 = NULL;
- Py_ssize_t __pyx_t_4;
- Py_ssize_t __pyx_t_5;
- Py_UCS4 __pyx_t_6;
- PyObject *__pyx_t_7 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("_unellipsify", 0);
-
- /* "View.MemoryView":677
- * """
- * cdef Py_ssize_t idx
- * tup = index if isinstance(index, tuple) else (index,) # <<<<<<<<<<<<<<
- *
- * result = [slice(None)] * ndim
- */
- __pyx_t_2 = PyTuple_Check(__pyx_v_index);
- if (__pyx_t_2) {
- __Pyx_INCREF(((PyObject*)__pyx_v_index));
- __pyx_t_1 = __pyx_v_index;
- } else {
- __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 677, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF(__pyx_v_index);
- __Pyx_GIVEREF(__pyx_v_index);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
- __pyx_t_1 = __pyx_t_3;
- __pyx_t_3 = 0;
- }
- __pyx_v_tup = ((PyObject*)__pyx_t_1);
- __pyx_t_1 = 0;
-
- /* "View.MemoryView":679
- * tup = index if isinstance(index, tuple) else (index,)
- *
- * result = [slice(None)] * ndim # <<<<<<<<<<<<<<
- * have_slices = False
- * seen_ellipsis = False
- */
- __pyx_t_1 = PyList_New(1 * ((__pyx_v_ndim<0) ? 0:__pyx_v_ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 679, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- { Py_ssize_t __pyx_temp;
- for (__pyx_temp=0; __pyx_temp < __pyx_v_ndim; __pyx_temp++) {
- __Pyx_INCREF(__pyx_slice__5);
- __Pyx_GIVEREF(__pyx_slice__5);
- PyList_SET_ITEM(__pyx_t_1, __pyx_temp, __pyx_slice__5);
- }
- }
- __pyx_v_result = ((PyObject*)__pyx_t_1);
- __pyx_t_1 = 0;
-
- /* "View.MemoryView":680
- *
- * result = [slice(None)] * ndim
- * have_slices = False # <<<<<<<<<<<<<<
- * seen_ellipsis = False
- * idx = 0
- */
- __pyx_v_have_slices = 0;
-
- /* "View.MemoryView":681
- * result = [slice(None)] * ndim
- * have_slices = False
- * seen_ellipsis = False # <<<<<<<<<<<<<<
- * idx = 0
- * for item in tup:
- */
- __pyx_v_seen_ellipsis = 0;
-
- /* "View.MemoryView":682
- * have_slices = False
- * seen_ellipsis = False
- * idx = 0 # <<<<<<<<<<<<<<
- * for item in tup:
- * if item is Ellipsis:
- */
- __pyx_v_idx = 0;
-
- /* "View.MemoryView":683
- * seen_ellipsis = False
- * idx = 0
- * for item in tup: # <<<<<<<<<<<<<<
- * if item is Ellipsis:
- * if not seen_ellipsis:
- */
- if (unlikely(__pyx_v_tup == Py_None)) {
- PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
- __PYX_ERR(1, 683, __pyx_L1_error)
- }
- __pyx_t_1 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_1); __pyx_t_4 = 0;
- for (;;) {
- if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
- #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
- __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely((0 < 0))) __PYX_ERR(1, 683, __pyx_L1_error)
- #else
- __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 683, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- #endif
- __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_3);
- __pyx_t_3 = 0;
-
- /* "View.MemoryView":684
- * idx = 0
- * for item in tup:
- * if item is Ellipsis: # <<<<<<<<<<<<<<
- * if not seen_ellipsis:
- * idx += ndim - len(tup)
- */
- __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":685
- * for item in tup:
- * if item is Ellipsis:
- * if not seen_ellipsis: # <<<<<<<<<<<<<<
- * idx += ndim - len(tup)
- * seen_ellipsis = True
- */
- __pyx_t_2 = (!__pyx_v_seen_ellipsis);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":686
- * if item is Ellipsis:
- * if not seen_ellipsis:
- * idx += ndim - len(tup) # <<<<<<<<<<<<<<
- * seen_ellipsis = True
- * have_slices = True
- */
- if (unlikely(__pyx_v_tup == Py_None)) {
- PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
- __PYX_ERR(1, 686, __pyx_L1_error)
- }
- __pyx_t_5 = PyTuple_GET_SIZE(__pyx_v_tup); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 686, __pyx_L1_error)
- __pyx_v_idx = (__pyx_v_idx + (__pyx_v_ndim - __pyx_t_5));
-
- /* "View.MemoryView":687
- * if not seen_ellipsis:
- * idx += ndim - len(tup)
- * seen_ellipsis = True # <<<<<<<<<<<<<<
- * have_slices = True
- * else:
- */
- __pyx_v_seen_ellipsis = 1;
-
- /* "View.MemoryView":685
- * for item in tup:
- * if item is Ellipsis:
- * if not seen_ellipsis: # <<<<<<<<<<<<<<
- * idx += ndim - len(tup)
- * seen_ellipsis = True
- */
- }
-
- /* "View.MemoryView":688
- * idx += ndim - len(tup)
- * seen_ellipsis = True
- * have_slices = True # <<<<<<<<<<<<<<
- * else:
- * if isinstance(item, slice):
- */
- __pyx_v_have_slices = 1;
-
- /* "View.MemoryView":684
- * idx = 0
- * for item in tup:
- * if item is Ellipsis: # <<<<<<<<<<<<<<
- * if not seen_ellipsis:
- * idx += ndim - len(tup)
- */
- goto __pyx_L5;
- }
-
- /* "View.MemoryView":690
- * have_slices = True
- * else:
- * if isinstance(item, slice): # <<<<<<<<<<<<<<
- * have_slices = True
- * elif not PyIndex_Check(item):
- */
- /*else*/ {
- __pyx_t_2 = PySlice_Check(__pyx_v_item);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":691
- * else:
- * if isinstance(item, slice):
- * have_slices = True # <<<<<<<<<<<<<<
- * elif not PyIndex_Check(item):
- * raise TypeError, f"Cannot index with type '{type(item)}'"
- */
- __pyx_v_have_slices = 1;
-
- /* "View.MemoryView":690
- * have_slices = True
- * else:
- * if isinstance(item, slice): # <<<<<<<<<<<<<<
- * have_slices = True
- * elif not PyIndex_Check(item):
- */
- goto __pyx_L7;
- }
-
- /* "View.MemoryView":692
- * if isinstance(item, slice):
- * have_slices = True
- * elif not PyIndex_Check(item): # <<<<<<<<<<<<<<
- * raise TypeError, f"Cannot index with type '{type(item)}'"
- * result[idx] = item
- */
- __pyx_t_2 = (!(PyIndex_Check(__pyx_v_item) != 0));
- if (unlikely(__pyx_t_2)) {
-
- /* "View.MemoryView":693
- * have_slices = True
- * elif not PyIndex_Check(item):
- * raise TypeError, f"Cannot index with type '{type(item)}'" # <<<<<<<<<<<<<<
- * result[idx] = item
- * idx += 1
- */
- __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 693, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_5 = 0;
- __pyx_t_6 = 127;
- __Pyx_INCREF(__pyx_kp_u_Cannot_index_with_type);
- __pyx_t_5 += 24;
- __Pyx_GIVEREF(__pyx_kp_u_Cannot_index_with_type);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_kp_u_Cannot_index_with_type);
- __pyx_t_7 = __Pyx_PyObject_FormatSimple(((PyObject *)Py_TYPE(__pyx_v_item)), __pyx_empty_unicode); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 693, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_7);
- __pyx_t_6 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_7) > __pyx_t_6) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_7) : __pyx_t_6;
- __pyx_t_5 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_7);
- __Pyx_GIVEREF(__pyx_t_7);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_7);
- __pyx_t_7 = 0;
- __Pyx_INCREF(__pyx_kp_u__6);
- __pyx_t_5 += 1;
- __Pyx_GIVEREF(__pyx_kp_u__6);
- PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_kp_u__6);
- __pyx_t_7 = __Pyx_PyUnicode_Join(__pyx_t_3, 3, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 693, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_7);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_Raise(__pyx_builtin_TypeError, __pyx_t_7, 0, 0);
- __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
- __PYX_ERR(1, 693, __pyx_L1_error)
-
- /* "View.MemoryView":692
- * if isinstance(item, slice):
- * have_slices = True
- * elif not PyIndex_Check(item): # <<<<<<<<<<<<<<
- * raise TypeError, f"Cannot index with type '{type(item)}'"
- * result[idx] = item
- */
- }
- __pyx_L7:;
-
- /* "View.MemoryView":694
- * elif not PyIndex_Check(item):
- * raise TypeError, f"Cannot index with type '{type(item)}'"
- * result[idx] = item # <<<<<<<<<<<<<<
- * idx += 1
- *
- */
- if (unlikely((__Pyx_SetItemInt(__pyx_v_result, __pyx_v_idx, __pyx_v_item, Py_ssize_t, 1, PyInt_FromSsize_t, 1, 1, 1) < 0))) __PYX_ERR(1, 694, __pyx_L1_error)
- }
- __pyx_L5:;
-
- /* "View.MemoryView":695
- * raise TypeError, f"Cannot index with type '{type(item)}'"
- * result[idx] = item
- * idx += 1 # <<<<<<<<<<<<<<
- *
- * nslices = ndim - idx
- */
- __pyx_v_idx = (__pyx_v_idx + 1);
-
- /* "View.MemoryView":683
- * seen_ellipsis = False
- * idx = 0
- * for item in tup: # <<<<<<<<<<<<<<
- * if item is Ellipsis:
- * if not seen_ellipsis:
- */
- }
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
- /* "View.MemoryView":697
- * idx += 1
- *
- * nslices = ndim - idx # <<<<<<<<<<<<<<
- * return have_slices or nslices, tuple(result)
- *
- */
- __pyx_v_nslices = (__pyx_v_ndim - __pyx_v_idx);
-
- /* "View.MemoryView":698
- *
- * nslices = ndim - idx
- * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
- *
- * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1:
- */
- __Pyx_XDECREF(__pyx_r);
- if (!__pyx_v_have_slices) {
- } else {
- __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 698, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_7);
- __pyx_t_1 = __pyx_t_7;
- __pyx_t_7 = 0;
- goto __pyx_L9_bool_binop_done;
- }
- __pyx_t_7 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 698, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_7);
- __pyx_t_1 = __pyx_t_7;
- __pyx_t_7 = 0;
- __pyx_L9_bool_binop_done:;
- __pyx_t_7 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 698, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_7);
- __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 698, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_GIVEREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
- __Pyx_GIVEREF(__pyx_t_7);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_7);
- __pyx_t_1 = 0;
- __pyx_t_7 = 0;
- __pyx_r = ((PyObject*)__pyx_t_3);
- __pyx_t_3 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":671
- * return isinstance(o, memoryview)
- *
- * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
- * """
- * Replace all ellipses with full slices and fill incomplete indices with
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_7);
- __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XDECREF(__pyx_v_tup);
- __Pyx_XDECREF(__pyx_v_result);
- __Pyx_XDECREF(__pyx_v_item);
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":700
- * return have_slices or nslices, tuple(result)
- *
- * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: # <<<<<<<<<<<<<<
- * for suboffset in suboffsets[:ndim]:
- * if suboffset >= 0:
- */
-
-static int assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
- Py_ssize_t __pyx_v_suboffset;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- Py_ssize_t *__pyx_t_1;
- Py_ssize_t *__pyx_t_2;
- Py_ssize_t *__pyx_t_3;
- int __pyx_t_4;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
-
- /* "View.MemoryView":701
- *
- * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1:
- * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
- * if suboffset >= 0:
- * raise ValueError, "Indirect dimensions not supported"
- */
- __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
- for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
- __pyx_t_1 = __pyx_t_3;
- __pyx_v_suboffset = (__pyx_t_1[0]);
-
- /* "View.MemoryView":702
- * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1:
- * for suboffset in suboffsets[:ndim]:
- * if suboffset >= 0: # <<<<<<<<<<<<<<
- * raise ValueError, "Indirect dimensions not supported"
- * return 0 # return type just used as an error flag
- */
- __pyx_t_4 = (__pyx_v_suboffset >= 0);
- if (unlikely(__pyx_t_4)) {
-
- /* "View.MemoryView":703
- * for suboffset in suboffsets[:ndim]:
- * if suboffset >= 0:
- * raise ValueError, "Indirect dimensions not supported" # <<<<<<<<<<<<<<
- * return 0 # return type just used as an error flag
- *
- */
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Indirect_dimensions_not_supporte, 0, 0);
- __PYX_ERR(1, 703, __pyx_L1_error)
-
- /* "View.MemoryView":702
- * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1:
- * for suboffset in suboffsets[:ndim]:
- * if suboffset >= 0: # <<<<<<<<<<<<<<
- * raise ValueError, "Indirect dimensions not supported"
- * return 0 # return type just used as an error flag
- */
- }
- }
-
- /* "View.MemoryView":704
- * if suboffset >= 0:
- * raise ValueError, "Indirect dimensions not supported"
- * return 0 # return type just used as an error flag # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_r = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":700
- * return have_slices or nslices, tuple(result)
- *
- * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: # <<<<<<<<<<<<<<
- * for suboffset in suboffsets[:ndim]:
- * if suboffset >= 0:
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":711
- *
- * @cname('__pyx_memview_slice')
- * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
- * cdef int new_ndim = 0, suboffset_dim = -1, dim
- * cdef bint negative_step
- */
-
-static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
- int __pyx_v_new_ndim;
- int __pyx_v_suboffset_dim;
- int __pyx_v_dim;
- __Pyx_memviewslice __pyx_v_src;
- __Pyx_memviewslice __pyx_v_dst;
- __Pyx_memviewslice *__pyx_v_p_src;
- struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
- __Pyx_memviewslice *__pyx_v_p_dst;
- int *__pyx_v_p_suboffset_dim;
- Py_ssize_t __pyx_v_start;
- Py_ssize_t __pyx_v_stop;
- Py_ssize_t __pyx_v_step;
- Py_ssize_t __pyx_v_cindex;
- int __pyx_v_have_start;
- int __pyx_v_have_stop;
- int __pyx_v_have_step;
- PyObject *__pyx_v_index = NULL;
- struct __pyx_memoryview_obj *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- struct __pyx_memoryview_obj *__pyx_t_3;
- char *__pyx_t_4;
- int __pyx_t_5;
- Py_ssize_t __pyx_t_6;
- PyObject *(*__pyx_t_7)(PyObject *);
- PyObject *__pyx_t_8 = NULL;
- Py_ssize_t __pyx_t_9;
- int __pyx_t_10;
- Py_ssize_t __pyx_t_11;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("memview_slice", 0);
-
- /* "View.MemoryView":712
- * @cname('__pyx_memview_slice')
- * cdef memoryview memview_slice(memoryview memview, object indices):
- * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
- * cdef bint negative_step
- * cdef __Pyx_memviewslice src, dst
- */
- __pyx_v_new_ndim = 0;
- __pyx_v_suboffset_dim = -1;
-
- /* "View.MemoryView":719
- *
- *
- * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
- *
- * cdef _memoryviewslice memviewsliceobj
- */
- (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
-
- /* "View.MemoryView":723
- * cdef _memoryviewslice memviewsliceobj
- *
- * assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
- *
- * if isinstance(memview, _memoryviewslice):
- */
- #ifndef CYTHON_WITHOUT_ASSERTIONS
- if (unlikely(__pyx_assertions_enabled())) {
- __pyx_t_1 = (__pyx_v_memview->view.ndim > 0);
- if (unlikely(!__pyx_t_1)) {
- __Pyx_Raise(__pyx_builtin_AssertionError, 0, 0, 0);
- __PYX_ERR(1, 723, __pyx_L1_error)
- }
- }
- #else
- if ((1)); else __PYX_ERR(1, 723, __pyx_L1_error)
- #endif
-
- /* "View.MemoryView":725
- * assert memview.view.ndim > 0
- *
- * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
- * memviewsliceobj = memview
- * p_src = &memviewsliceobj.from_slice
- */
- __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":726
- *
- * if isinstance(memview, _memoryviewslice):
- * memviewsliceobj = memview # <<<<<<<<<<<<<<
- * p_src = &memviewsliceobj.from_slice
- * else:
- */
- if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 726, __pyx_L1_error)
- __pyx_t_2 = ((PyObject *)__pyx_v_memview);
- __Pyx_INCREF(__pyx_t_2);
- __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
- __pyx_t_2 = 0;
-
- /* "View.MemoryView":727
- * if isinstance(memview, _memoryviewslice):
- * memviewsliceobj = memview
- * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
- * else:
- * slice_copy(memview, &src)
- */
- __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
-
- /* "View.MemoryView":725
- * assert memview.view.ndim > 0
- *
- * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
- * memviewsliceobj = memview
- * p_src = &memviewsliceobj.from_slice
- */
- goto __pyx_L3;
- }
-
- /* "View.MemoryView":729
- * p_src = &memviewsliceobj.from_slice
- * else:
- * slice_copy(memview, &src) # <<<<<<<<<<<<<<
- * p_src = &src
- *
- */
- /*else*/ {
- __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
-
- /* "View.MemoryView":730
- * else:
- * slice_copy(memview, &src)
- * p_src = &src # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_v_p_src = (&__pyx_v_src);
- }
- __pyx_L3:;
-
- /* "View.MemoryView":736
- *
- *
- * dst.memview = p_src.memview # <<<<<<<<<<<<<<
- * dst.data = p_src.data
- *
- */
- __pyx_t_3 = __pyx_v_p_src->memview;
- __pyx_v_dst.memview = __pyx_t_3;
-
- /* "View.MemoryView":737
- *
- * dst.memview = p_src.memview
- * dst.data = p_src.data # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_t_4 = __pyx_v_p_src->data;
- __pyx_v_dst.data = __pyx_t_4;
-
- /* "View.MemoryView":742
- *
- *
- * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
- * cdef int *p_suboffset_dim = &suboffset_dim
- * cdef Py_ssize_t start, stop, step, cindex
- */
- __pyx_v_p_dst = (&__pyx_v_dst);
-
- /* "View.MemoryView":743
- *
- * cdef __Pyx_memviewslice *p_dst = &dst
- * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
- * cdef Py_ssize_t start, stop, step, cindex
- * cdef bint have_start, have_stop, have_step
- */
- __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
-
- /* "View.MemoryView":747
- * cdef bint have_start, have_stop, have_step
- *
- * for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
- * if PyIndex_Check(index):
- * cindex = index
- */
- __pyx_t_5 = 0;
- if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
- __pyx_t_2 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_2); __pyx_t_6 = 0;
- __pyx_t_7 = NULL;
- } else {
- __pyx_t_6 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 747, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_7 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 747, __pyx_L1_error)
- }
- for (;;) {
- if (likely(!__pyx_t_7)) {
- if (likely(PyList_CheckExact(__pyx_t_2))) {
- if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_2)) break;
- #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
- __pyx_t_8 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++; if (unlikely((0 < 0))) __PYX_ERR(1, 747, __pyx_L1_error)
- #else
- __pyx_t_8 = PySequence_ITEM(__pyx_t_2, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 747, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_8);
- #endif
- } else {
- if (__pyx_t_6 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
- #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
- __pyx_t_8 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++; if (unlikely((0 < 0))) __PYX_ERR(1, 747, __pyx_L1_error)
- #else
- __pyx_t_8 = PySequence_ITEM(__pyx_t_2, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 747, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_8);
- #endif
- }
- } else {
- __pyx_t_8 = __pyx_t_7(__pyx_t_2);
- if (unlikely(!__pyx_t_8)) {
- PyObject* exc_type = PyErr_Occurred();
- if (exc_type) {
- if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
- else __PYX_ERR(1, 747, __pyx_L1_error)
- }
- break;
- }
- __Pyx_GOTREF(__pyx_t_8);
- }
- __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_8);
- __pyx_t_8 = 0;
- __pyx_v_dim = __pyx_t_5;
- __pyx_t_5 = (__pyx_t_5 + 1);
-
- /* "View.MemoryView":748
- *
- * for dim, index in enumerate(indices):
- * if PyIndex_Check(index): # <<<<<<<<<<<<<<
- * cindex = index
- * slice_memviewslice(
- */
- __pyx_t_1 = (PyIndex_Check(__pyx_v_index) != 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":749
- * for dim, index in enumerate(indices):
- * if PyIndex_Check(index):
- * cindex = index # <<<<<<<<<<<<<<
- * slice_memviewslice(
- * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
- */
- __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 749, __pyx_L1_error)
- __pyx_v_cindex = __pyx_t_9;
-
- /* "View.MemoryView":750
- * if PyIndex_Check(index):
- * cindex = index
- * slice_memviewslice( # <<<<<<<<<<<<<<
- * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
- * dim, new_ndim, p_suboffset_dim,
- */
- __pyx_t_10 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_cindex, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(1, 750, __pyx_L1_error)
-
- /* "View.MemoryView":748
- *
- * for dim, index in enumerate(indices):
- * if PyIndex_Check(index): # <<<<<<<<<<<<<<
- * cindex = index
- * slice_memviewslice(
- */
- goto __pyx_L6;
- }
-
- /* "View.MemoryView":756
- * 0, 0, 0, # have_{start,stop,step}
- * False)
- * elif index is None: # <<<<<<<<<<<<<<
- * p_dst.shape[new_ndim] = 1
- * p_dst.strides[new_ndim] = 0
- */
- __pyx_t_1 = (__pyx_v_index == Py_None);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":757
- * False)
- * elif index is None:
- * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
- * p_dst.strides[new_ndim] = 0
- * p_dst.suboffsets[new_ndim] = -1
- */
- (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
-
- /* "View.MemoryView":758
- * elif index is None:
- * p_dst.shape[new_ndim] = 1
- * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
- * p_dst.suboffsets[new_ndim] = -1
- * new_ndim += 1
- */
- (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
-
- /* "View.MemoryView":759
- * p_dst.shape[new_ndim] = 1
- * p_dst.strides[new_ndim] = 0
- * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
- * new_ndim += 1
- * else:
- */
- (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
-
- /* "View.MemoryView":760
- * p_dst.strides[new_ndim] = 0
- * p_dst.suboffsets[new_ndim] = -1
- * new_ndim += 1 # <<<<<<<<<<<<<<
- * else:
- * start = index.start or 0
- */
- __pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
-
- /* "View.MemoryView":756
- * 0, 0, 0, # have_{start,stop,step}
- * False)
- * elif index is None: # <<<<<<<<<<<<<<
- * p_dst.shape[new_ndim] = 1
- * p_dst.strides[new_ndim] = 0
- */
- goto __pyx_L6;
- }
-
- /* "View.MemoryView":762
- * new_ndim += 1
- * else:
- * start = index.start or 0 # <<<<<<<<<<<<<<
- * stop = index.stop or 0
- * step = index.step or 0
- */
- /*else*/ {
- __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 762, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_8);
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 762, __pyx_L1_error)
- if (!__pyx_t_1) {
- __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- } else {
- __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error)
- __pyx_t_9 = __pyx_t_11;
- __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- goto __pyx_L7_bool_binop_done;
- }
- __pyx_t_9 = 0;
- __pyx_L7_bool_binop_done:;
- __pyx_v_start = __pyx_t_9;
-
- /* "View.MemoryView":763
- * else:
- * start = index.start or 0
- * stop = index.stop or 0 # <<<<<<<<<<<<<<
- * step = index.step or 0
- *
- */
- __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 763, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_8);
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 763, __pyx_L1_error)
- if (!__pyx_t_1) {
- __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- } else {
- __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 763, __pyx_L1_error)
- __pyx_t_9 = __pyx_t_11;
- __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- goto __pyx_L9_bool_binop_done;
- }
- __pyx_t_9 = 0;
- __pyx_L9_bool_binop_done:;
- __pyx_v_stop = __pyx_t_9;
-
- /* "View.MemoryView":764
- * start = index.start or 0
- * stop = index.stop or 0
- * step = index.step or 0 # <<<<<<<<<<<<<<
- *
- * have_start = index.start is not None
- */
- __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 764, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_8);
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 764, __pyx_L1_error)
- if (!__pyx_t_1) {
- __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- } else {
- __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 764, __pyx_L1_error)
- __pyx_t_9 = __pyx_t_11;
- __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- goto __pyx_L11_bool_binop_done;
- }
- __pyx_t_9 = 0;
- __pyx_L11_bool_binop_done:;
- __pyx_v_step = __pyx_t_9;
-
- /* "View.MemoryView":766
- * step = index.step or 0
- *
- * have_start = index.start is not None # <<<<<<<<<<<<<<
- * have_stop = index.stop is not None
- * have_step = index.step is not None
- */
- __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 766, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_8);
- __pyx_t_1 = (__pyx_t_8 != Py_None);
- __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- __pyx_v_have_start = __pyx_t_1;
-
- /* "View.MemoryView":767
- *
- * have_start = index.start is not None
- * have_stop = index.stop is not None # <<<<<<<<<<<<<<
- * have_step = index.step is not None
- *
- */
- __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 767, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_8);
- __pyx_t_1 = (__pyx_t_8 != Py_None);
- __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- __pyx_v_have_stop = __pyx_t_1;
-
- /* "View.MemoryView":768
- * have_start = index.start is not None
- * have_stop = index.stop is not None
- * have_step = index.step is not None # <<<<<<<<<<<<<<
- *
- * slice_memviewslice(
- */
- __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 768, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_8);
- __pyx_t_1 = (__pyx_t_8 != Py_None);
- __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- __pyx_v_have_step = __pyx_t_1;
-
- /* "View.MemoryView":770
- * have_step = index.step is not None
- *
- * slice_memviewslice( # <<<<<<<<<<<<<<
- * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
- * dim, new_ndim, p_suboffset_dim,
- */
- __pyx_t_10 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(1, 770, __pyx_L1_error)
-
- /* "View.MemoryView":776
- * have_start, have_stop, have_step,
- * True)
- * new_ndim += 1 # <<<<<<<<<<<<<<
- *
- * if isinstance(memview, _memoryviewslice):
- */
- __pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
- }
- __pyx_L6:;
-
- /* "View.MemoryView":747
- * cdef bint have_start, have_stop, have_step
- *
- * for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
- * if PyIndex_Check(index):
- * cindex = index
- */
- }
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
- /* "View.MemoryView":778
- * new_ndim += 1
- *
- * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
- * return memoryview_fromslice(dst, new_ndim,
- * memviewsliceobj.to_object_func,
- */
- __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":779
- *
- * if isinstance(memview, _memoryviewslice):
- * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
- * memviewsliceobj.to_object_func,
- * memviewsliceobj.to_dtype_func,
- */
- __Pyx_XDECREF((PyObject *)__pyx_r);
-
- /* "View.MemoryView":780
- * if isinstance(memview, _memoryviewslice):
- * return memoryview_fromslice(dst, new_ndim,
- * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
- * memviewsliceobj.to_dtype_func,
- * memview.dtype_is_object)
- */
- if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 780, __pyx_L1_error) }
-
- /* "View.MemoryView":781
- * return memoryview_fromslice(dst, new_ndim,
- * memviewsliceobj.to_object_func,
- * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
- * memview.dtype_is_object)
- * else:
- */
- if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 781, __pyx_L1_error) }
-
- /* "View.MemoryView":779
- *
- * if isinstance(memview, _memoryviewslice):
- * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
- * memviewsliceobj.to_object_func,
- * memviewsliceobj.to_dtype_func,
- */
- __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 779, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_memoryview_type))))) __PYX_ERR(1, 779, __pyx_L1_error)
- __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_2);
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":778
- * new_ndim += 1
- *
- * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
- * return memoryview_fromslice(dst, new_ndim,
- * memviewsliceobj.to_object_func,
- */
- }
-
- /* "View.MemoryView":784
- * memview.dtype_is_object)
- * else:
- * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
- * memview.dtype_is_object)
- *
- */
- /*else*/ {
- __Pyx_XDECREF((PyObject *)__pyx_r);
-
- /* "View.MemoryView":785
- * else:
- * return memoryview_fromslice(dst, new_ndim, NULL, NULL,
- * memview.dtype_is_object) # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 784, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
-
- /* "View.MemoryView":784
- * memview.dtype_is_object)
- * else:
- * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
- * memview.dtype_is_object)
- *
- */
- if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_memoryview_type))))) __PYX_ERR(1, 784, __pyx_L1_error)
- __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_2);
- __pyx_t_2 = 0;
- goto __pyx_L0;
- }
-
- /* "View.MemoryView":711
- *
- * @cname('__pyx_memview_slice')
- * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
- * cdef int new_ndim = 0, suboffset_dim = -1, dim
- * cdef bint negative_step
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_8);
- __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
- __Pyx_XDECREF(__pyx_v_index);
- __Pyx_XGIVEREF((PyObject *)__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":793
- *
- * @cname('__pyx_memoryview_slice_memviewslice')
- * cdef int slice_memviewslice( # <<<<<<<<<<<<<<
- * __Pyx_memviewslice *dst,
- * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
- */
-
-static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
- Py_ssize_t __pyx_v_new_shape;
- int __pyx_v_negative_step;
- int __pyx_r;
- int __pyx_t_1;
- int __pyx_t_2;
- int __pyx_t_3;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- #ifdef WITH_THREAD
- PyGILState_STATE __pyx_gilstate_save;
- #endif
-
- /* "View.MemoryView":813
- * cdef bint negative_step
- *
- * if not is_slice: # <<<<<<<<<<<<<<
- *
- * if start < 0:
- */
- __pyx_t_1 = (!__pyx_v_is_slice);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":815
- * if not is_slice:
- *
- * if start < 0: # <<<<<<<<<<<<<<
- * start += shape
- * if not 0 <= start < shape:
- */
- __pyx_t_1 = (__pyx_v_start < 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":816
- *
- * if start < 0:
- * start += shape # <<<<<<<<<<<<<<
- * if not 0 <= start < shape:
- * _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim)
- */
- __pyx_v_start = (__pyx_v_start + __pyx_v_shape);
-
- /* "View.MemoryView":815
- * if not is_slice:
- *
- * if start < 0: # <<<<<<<<<<<<<<
- * start += shape
- * if not 0 <= start < shape:
- */
- }
-
- /* "View.MemoryView":817
- * if start < 0:
- * start += shape
- * if not 0 <= start < shape: # <<<<<<<<<<<<<<
- * _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim)
- * else:
- */
- __pyx_t_1 = (0 <= __pyx_v_start);
- if (__pyx_t_1) {
- __pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
- }
- __pyx_t_2 = (!__pyx_t_1);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":818
- * start += shape
- * if not 0 <= start < shape:
- * _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
- * else:
- *
- */
- __pyx_t_3 = __pyx_memoryview_err_dim(PyExc_IndexError, __pyx_kp_s_Index_out_of_bounds_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 818, __pyx_L1_error)
-
- /* "View.MemoryView":817
- * if start < 0:
- * start += shape
- * if not 0 <= start < shape: # <<<<<<<<<<<<<<
- * _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim)
- * else:
- */
- }
-
- /* "View.MemoryView":813
- * cdef bint negative_step
- *
- * if not is_slice: # <<<<<<<<<<<<<<
- *
- * if start < 0:
- */
- goto __pyx_L3;
- }
-
- /* "View.MemoryView":821
- * else:
- *
- * if have_step: # <<<<<<<<<<<<<<
- * negative_step = step < 0
- * if step == 0:
- */
- /*else*/ {
- __pyx_t_2 = (__pyx_v_have_step != 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":822
- *
- * if have_step:
- * negative_step = step < 0 # <<<<<<<<<<<<<<
- * if step == 0:
- * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim)
- */
- __pyx_v_negative_step = (__pyx_v_step < 0);
-
- /* "View.MemoryView":823
- * if have_step:
- * negative_step = step < 0
- * if step == 0: # <<<<<<<<<<<<<<
- * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim)
- * else:
- */
- __pyx_t_2 = (__pyx_v_step == 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":824
- * negative_step = step < 0
- * if step == 0:
- * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
- * else:
- * negative_step = False
- */
- __pyx_t_3 = __pyx_memoryview_err_dim(PyExc_ValueError, __pyx_kp_s_Step_may_not_be_zero_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 824, __pyx_L1_error)
-
- /* "View.MemoryView":823
- * if have_step:
- * negative_step = step < 0
- * if step == 0: # <<<<<<<<<<<<<<
- * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim)
- * else:
- */
- }
-
- /* "View.MemoryView":821
- * else:
- *
- * if have_step: # <<<<<<<<<<<<<<
- * negative_step = step < 0
- * if step == 0:
- */
- goto __pyx_L6;
- }
-
- /* "View.MemoryView":826
- * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim)
- * else:
- * negative_step = False # <<<<<<<<<<<<<<
- * step = 1
- *
- */
- /*else*/ {
- __pyx_v_negative_step = 0;
-
- /* "View.MemoryView":827
- * else:
- * negative_step = False
- * step = 1 # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_v_step = 1;
- }
- __pyx_L6:;
-
- /* "View.MemoryView":830
- *
- *
- * if have_start: # <<<<<<<<<<<<<<
- * if start < 0:
- * start += shape
- */
- __pyx_t_2 = (__pyx_v_have_start != 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":831
- *
- * if have_start:
- * if start < 0: # <<<<<<<<<<<<<<
- * start += shape
- * if start < 0:
- */
- __pyx_t_2 = (__pyx_v_start < 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":832
- * if have_start:
- * if start < 0:
- * start += shape # <<<<<<<<<<<<<<
- * if start < 0:
- * start = 0
- */
- __pyx_v_start = (__pyx_v_start + __pyx_v_shape);
-
- /* "View.MemoryView":833
- * if start < 0:
- * start += shape
- * if start < 0: # <<<<<<<<<<<<<<
- * start = 0
- * elif start >= shape:
- */
- __pyx_t_2 = (__pyx_v_start < 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":834
- * start += shape
- * if start < 0:
- * start = 0 # <<<<<<<<<<<<<<
- * elif start >= shape:
- * if negative_step:
- */
- __pyx_v_start = 0;
-
- /* "View.MemoryView":833
- * if start < 0:
- * start += shape
- * if start < 0: # <<<<<<<<<<<<<<
- * start = 0
- * elif start >= shape:
- */
- }
-
- /* "View.MemoryView":831
- *
- * if have_start:
- * if start < 0: # <<<<<<<<<<<<<<
- * start += shape
- * if start < 0:
- */
- goto __pyx_L9;
- }
-
- /* "View.MemoryView":835
- * if start < 0:
- * start = 0
- * elif start >= shape: # <<<<<<<<<<<<<<
- * if negative_step:
- * start = shape - 1
- */
- __pyx_t_2 = (__pyx_v_start >= __pyx_v_shape);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":836
- * start = 0
- * elif start >= shape:
- * if negative_step: # <<<<<<<<<<<<<<
- * start = shape - 1
- * else:
- */
- if (__pyx_v_negative_step) {
-
- /* "View.MemoryView":837
- * elif start >= shape:
- * if negative_step:
- * start = shape - 1 # <<<<<<<<<<<<<<
- * else:
- * start = shape
- */
- __pyx_v_start = (__pyx_v_shape - 1);
-
- /* "View.MemoryView":836
- * start = 0
- * elif start >= shape:
- * if negative_step: # <<<<<<<<<<<<<<
- * start = shape - 1
- * else:
- */
- goto __pyx_L11;
- }
-
- /* "View.MemoryView":839
- * start = shape - 1
- * else:
- * start = shape # <<<<<<<<<<<<<<
- * else:
- * if negative_step:
- */
- /*else*/ {
- __pyx_v_start = __pyx_v_shape;
- }
- __pyx_L11:;
-
- /* "View.MemoryView":835
- * if start < 0:
- * start = 0
- * elif start >= shape: # <<<<<<<<<<<<<<
- * if negative_step:
- * start = shape - 1
- */
- }
- __pyx_L9:;
-
- /* "View.MemoryView":830
- *
- *
- * if have_start: # <<<<<<<<<<<<<<
- * if start < 0:
- * start += shape
- */
- goto __pyx_L8;
- }
-
- /* "View.MemoryView":841
- * start = shape
- * else:
- * if negative_step: # <<<<<<<<<<<<<<
- * start = shape - 1
- * else:
- */
- /*else*/ {
- if (__pyx_v_negative_step) {
-
- /* "View.MemoryView":842
- * else:
- * if negative_step:
- * start = shape - 1 # <<<<<<<<<<<<<<
- * else:
- * start = 0
- */
- __pyx_v_start = (__pyx_v_shape - 1);
-
- /* "View.MemoryView":841
- * start = shape
- * else:
- * if negative_step: # <<<<<<<<<<<<<<
- * start = shape - 1
- * else:
- */
- goto __pyx_L12;
- }
-
- /* "View.MemoryView":844
- * start = shape - 1
- * else:
- * start = 0 # <<<<<<<<<<<<<<
- *
- * if have_stop:
- */
- /*else*/ {
- __pyx_v_start = 0;
- }
- __pyx_L12:;
- }
- __pyx_L8:;
-
- /* "View.MemoryView":846
- * start = 0
- *
- * if have_stop: # <<<<<<<<<<<<<<
- * if stop < 0:
- * stop += shape
- */
- __pyx_t_2 = (__pyx_v_have_stop != 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":847
- *
- * if have_stop:
- * if stop < 0: # <<<<<<<<<<<<<<
- * stop += shape
- * if stop < 0:
- */
- __pyx_t_2 = (__pyx_v_stop < 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":848
- * if have_stop:
- * if stop < 0:
- * stop += shape # <<<<<<<<<<<<<<
- * if stop < 0:
- * stop = 0
- */
- __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
-
- /* "View.MemoryView":849
- * if stop < 0:
- * stop += shape
- * if stop < 0: # <<<<<<<<<<<<<<
- * stop = 0
- * elif stop > shape:
- */
- __pyx_t_2 = (__pyx_v_stop < 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":850
- * stop += shape
- * if stop < 0:
- * stop = 0 # <<<<<<<<<<<<<<
- * elif stop > shape:
- * stop = shape
- */
- __pyx_v_stop = 0;
-
- /* "View.MemoryView":849
- * if stop < 0:
- * stop += shape
- * if stop < 0: # <<<<<<<<<<<<<<
- * stop = 0
- * elif stop > shape:
- */
- }
-
- /* "View.MemoryView":847
- *
- * if have_stop:
- * if stop < 0: # <<<<<<<<<<<<<<
- * stop += shape
- * if stop < 0:
- */
- goto __pyx_L14;
- }
-
- /* "View.MemoryView":851
- * if stop < 0:
- * stop = 0
- * elif stop > shape: # <<<<<<<<<<<<<<
- * stop = shape
- * else:
- */
- __pyx_t_2 = (__pyx_v_stop > __pyx_v_shape);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":852
- * stop = 0
- * elif stop > shape:
- * stop = shape # <<<<<<<<<<<<<<
- * else:
- * if negative_step:
- */
- __pyx_v_stop = __pyx_v_shape;
-
- /* "View.MemoryView":851
- * if stop < 0:
- * stop = 0
- * elif stop > shape: # <<<<<<<<<<<<<<
- * stop = shape
- * else:
- */
- }
- __pyx_L14:;
-
- /* "View.MemoryView":846
- * start = 0
- *
- * if have_stop: # <<<<<<<<<<<<<<
- * if stop < 0:
- * stop += shape
- */
- goto __pyx_L13;
- }
-
- /* "View.MemoryView":854
- * stop = shape
- * else:
- * if negative_step: # <<<<<<<<<<<<<<
- * stop = -1
- * else:
- */
- /*else*/ {
- if (__pyx_v_negative_step) {
-
- /* "View.MemoryView":855
- * else:
- * if negative_step:
- * stop = -1 # <<<<<<<<<<<<<<
- * else:
- * stop = shape
- */
- __pyx_v_stop = -1L;
-
- /* "View.MemoryView":854
- * stop = shape
- * else:
- * if negative_step: # <<<<<<<<<<<<<<
- * stop = -1
- * else:
- */
- goto __pyx_L16;
- }
-
- /* "View.MemoryView":857
- * stop = -1
- * else:
- * stop = shape # <<<<<<<<<<<<<<
- *
- *
- */
- /*else*/ {
- __pyx_v_stop = __pyx_v_shape;
- }
- __pyx_L16:;
- }
- __pyx_L13:;
-
- /* "View.MemoryView":861
- *
- * with cython.cdivision(True):
- * new_shape = (stop - start) // step # <<<<<<<<<<<<<<
- *
- * if (stop - start) - step * new_shape:
- */
- __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
-
- /* "View.MemoryView":863
- * new_shape = (stop - start) // step
- *
- * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
- * new_shape += 1
- *
- */
- __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":864
- *
- * if (stop - start) - step * new_shape:
- * new_shape += 1 # <<<<<<<<<<<<<<
- *
- * if new_shape < 0:
- */
- __pyx_v_new_shape = (__pyx_v_new_shape + 1);
-
- /* "View.MemoryView":863
- * new_shape = (stop - start) // step
- *
- * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
- * new_shape += 1
- *
- */
- }
-
- /* "View.MemoryView":866
- * new_shape += 1
- *
- * if new_shape < 0: # <<<<<<<<<<<<<<
- * new_shape = 0
- *
- */
- __pyx_t_2 = (__pyx_v_new_shape < 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":867
- *
- * if new_shape < 0:
- * new_shape = 0 # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_v_new_shape = 0;
-
- /* "View.MemoryView":866
- * new_shape += 1
- *
- * if new_shape < 0: # <<<<<<<<<<<<<<
- * new_shape = 0
- *
- */
- }
-
- /* "View.MemoryView":870
- *
- *
- * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
- * dst.shape[new_ndim] = new_shape
- * dst.suboffsets[new_ndim] = suboffset
- */
- (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
-
- /* "View.MemoryView":871
- *
- * dst.strides[new_ndim] = stride * step
- * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
- * dst.suboffsets[new_ndim] = suboffset
- *
- */
- (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
-
- /* "View.MemoryView":872
- * dst.strides[new_ndim] = stride * step
- * dst.shape[new_ndim] = new_shape
- * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
- *
- *
- */
- (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
- }
- __pyx_L3:;
-
- /* "View.MemoryView":875
- *
- *
- * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
- * dst.data += start * stride
- * else:
- */
- __pyx_t_2 = ((__pyx_v_suboffset_dim[0]) < 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":876
- *
- * if suboffset_dim[0] < 0:
- * dst.data += start * stride # <<<<<<<<<<<<<<
- * else:
- * dst.suboffsets[suboffset_dim[0]] += start * stride
- */
- __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
-
- /* "View.MemoryView":875
- *
- *
- * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
- * dst.data += start * stride
- * else:
- */
- goto __pyx_L19;
- }
-
- /* "View.MemoryView":878
- * dst.data += start * stride
- * else:
- * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
- *
- * if suboffset >= 0:
- */
- /*else*/ {
- __pyx_t_3 = (__pyx_v_suboffset_dim[0]);
- (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
- }
- __pyx_L19:;
-
- /* "View.MemoryView":880
- * dst.suboffsets[suboffset_dim[0]] += start * stride
- *
- * if suboffset >= 0: # <<<<<<<<<<<<<<
- * if not is_slice:
- * if new_ndim == 0:
- */
- __pyx_t_2 = (__pyx_v_suboffset >= 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":881
- *
- * if suboffset >= 0:
- * if not is_slice: # <<<<<<<<<<<<<<
- * if new_ndim == 0:
- * dst.data = ( dst.data)[0] + suboffset
- */
- __pyx_t_2 = (!__pyx_v_is_slice);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":882
- * if suboffset >= 0:
- * if not is_slice:
- * if new_ndim == 0: # <<<<<<<<<<<<<<
- * dst.data = ( dst.data)[0] + suboffset
- * else:
- */
- __pyx_t_2 = (__pyx_v_new_ndim == 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":883
- * if not is_slice:
- * if new_ndim == 0:
- * dst.data = ( dst.data)[0] + suboffset # <<<<<<<<<<<<<<
- * else:
- * _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d "
- */
- __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
-
- /* "View.MemoryView":882
- * if suboffset >= 0:
- * if not is_slice:
- * if new_ndim == 0: # <<<<<<<<<<<<<<
- * dst.data = ( dst.data)[0] + suboffset
- * else:
- */
- goto __pyx_L22;
- }
-
- /* "View.MemoryView":885
- * dst.data = ( dst.data)[0] + suboffset
- * else:
- * _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
- * "must be indexed and not sliced", dim)
- * else:
- */
- /*else*/ {
-
- /* "View.MemoryView":886
- * else:
- * _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d "
- * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
- * else:
- * suboffset_dim[0] = new_ndim
- */
- __pyx_t_3 = __pyx_memoryview_err_dim(PyExc_IndexError, __pyx_kp_s_All_dimensions_preceding_dimensi, __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 885, __pyx_L1_error)
- }
- __pyx_L22:;
-
- /* "View.MemoryView":881
- *
- * if suboffset >= 0:
- * if not is_slice: # <<<<<<<<<<<<<<
- * if new_ndim == 0:
- * dst.data = ( dst.data)[0] + suboffset
- */
- goto __pyx_L21;
- }
-
- /* "View.MemoryView":888
- * "must be indexed and not sliced", dim)
- * else:
- * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
- *
- * return 0
- */
- /*else*/ {
- (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
- }
- __pyx_L21:;
-
- /* "View.MemoryView":880
- * dst.suboffsets[suboffset_dim[0]] += start * stride
- *
- * if suboffset >= 0: # <<<<<<<<<<<<<<
- * if not is_slice:
- * if new_ndim == 0:
- */
- }
-
- /* "View.MemoryView":890
- * suboffset_dim[0] = new_ndim
- *
- * return 0 # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_r = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":793
- *
- * @cname('__pyx_memoryview_slice_memviewslice')
- * cdef int slice_memviewslice( # <<<<<<<<<<<<<<
- * __Pyx_memviewslice *dst,
- * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
- */
-
- /* function exit code */
- __pyx_L1_error:;
- #ifdef WITH_THREAD
- __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
- #endif
- __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- #ifdef WITH_THREAD
- __Pyx_PyGILState_Release(__pyx_gilstate_save);
- #endif
- __pyx_L0:;
- return __pyx_r;
-}
-
-/* "View.MemoryView":896
- *
- * @cname('__pyx_pybuffer_index')
- * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
- * Py_ssize_t dim) except NULL:
- * cdef Py_ssize_t shape, stride, suboffset = -1
- */
-
-static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
- Py_ssize_t __pyx_v_shape;
- Py_ssize_t __pyx_v_stride;
- Py_ssize_t __pyx_v_suboffset;
- Py_ssize_t __pyx_v_itemsize;
- char *__pyx_v_resultp;
- char *__pyx_r;
- __Pyx_RefNannyDeclarations
- Py_ssize_t __pyx_t_1;
- int __pyx_t_2;
- PyObject *__pyx_t_3 = NULL;
- Py_UCS4 __pyx_t_4;
- PyObject *__pyx_t_5 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("pybuffer_index", 0);
-
- /* "View.MemoryView":898
- * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
- * Py_ssize_t dim) except NULL:
- * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
- * cdef Py_ssize_t itemsize = view.itemsize
- * cdef char *resultp
- */
- __pyx_v_suboffset = -1L;
-
- /* "View.MemoryView":899
- * Py_ssize_t dim) except NULL:
- * cdef Py_ssize_t shape, stride, suboffset = -1
- * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
- * cdef char *resultp
- *
- */
- __pyx_t_1 = __pyx_v_view->itemsize;
- __pyx_v_itemsize = __pyx_t_1;
-
- /* "View.MemoryView":902
- * cdef char *resultp
- *
- * if view.ndim == 0: # <<<<<<<<<<<<<<
- * shape = view.len // itemsize
- * stride = itemsize
- */
- __pyx_t_2 = (__pyx_v_view->ndim == 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":903
- *
- * if view.ndim == 0:
- * shape = view.len // itemsize # <<<<<<<<<<<<<<
- * stride = itemsize
- * else:
- */
- if (unlikely(__pyx_v_itemsize == 0)) {
- PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
- __PYX_ERR(1, 903, __pyx_L1_error)
- }
- else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(__Pyx_UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
- PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
- __PYX_ERR(1, 903, __pyx_L1_error)
- }
- __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
-
- /* "View.MemoryView":904
- * if view.ndim == 0:
- * shape = view.len // itemsize
- * stride = itemsize # <<<<<<<<<<<<<<
- * else:
- * shape = view.shape[dim]
- */
- __pyx_v_stride = __pyx_v_itemsize;
-
- /* "View.MemoryView":902
- * cdef char *resultp
- *
- * if view.ndim == 0: # <<<<<<<<<<<<<<
- * shape = view.len // itemsize
- * stride = itemsize
- */
- goto __pyx_L3;
- }
-
- /* "View.MemoryView":906
- * stride = itemsize
- * else:
- * shape = view.shape[dim] # <<<<<<<<<<<<<<
- * stride = view.strides[dim]
- * if view.suboffsets != NULL:
- */
- /*else*/ {
- __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
-
- /* "View.MemoryView":907
- * else:
- * shape = view.shape[dim]
- * stride = view.strides[dim] # <<<<<<<<<<<<<<
- * if view.suboffsets != NULL:
- * suboffset = view.suboffsets[dim]
- */
- __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
-
- /* "View.MemoryView":908
- * shape = view.shape[dim]
- * stride = view.strides[dim]
- * if view.suboffsets != NULL: # <<<<<<<<<<<<<<
- * suboffset = view.suboffsets[dim]
- *
- */
- __pyx_t_2 = (__pyx_v_view->suboffsets != NULL);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":909
- * stride = view.strides[dim]
- * if view.suboffsets != NULL:
- * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
- *
- * if index < 0:
- */
- __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
-
- /* "View.MemoryView":908
- * shape = view.shape[dim]
- * stride = view.strides[dim]
- * if view.suboffsets != NULL: # <<<<<<<<<<<<<<
- * suboffset = view.suboffsets[dim]
- *
- */
- }
- }
- __pyx_L3:;
-
- /* "View.MemoryView":911
- * suboffset = view.suboffsets[dim]
- *
- * if index < 0: # <<<<<<<<<<<<<<
- * index += view.shape[dim]
- * if index < 0:
- */
- __pyx_t_2 = (__pyx_v_index < 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":912
- *
- * if index < 0:
- * index += view.shape[dim] # <<<<<<<<<<<<<<
- * if index < 0:
- * raise IndexError, f"Out of bounds on buffer access (axis {dim})"
- */
- __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
-
- /* "View.MemoryView":913
- * if index < 0:
- * index += view.shape[dim]
- * if index < 0: # <<<<<<<<<<<<<<
- * raise IndexError, f"Out of bounds on buffer access (axis {dim})"
- *
- */
- __pyx_t_2 = (__pyx_v_index < 0);
- if (unlikely(__pyx_t_2)) {
-
- /* "View.MemoryView":914
- * index += view.shape[dim]
- * if index < 0:
- * raise IndexError, f"Out of bounds on buffer access (axis {dim})" # <<<<<<<<<<<<<<
- *
- * if index >= shape:
- */
- __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 914, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_1 = 0;
- __pyx_t_4 = 127;
- __Pyx_INCREF(__pyx_kp_u_Out_of_bounds_on_buffer_access_a);
- __pyx_t_1 += 37;
- __Pyx_GIVEREF(__pyx_kp_u_Out_of_bounds_on_buffer_access_a);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_kp_u_Out_of_bounds_on_buffer_access_a);
- __pyx_t_5 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_dim, 0, ' ', 'd'); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 914, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- __pyx_t_1 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5);
- __Pyx_GIVEREF(__pyx_t_5);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5);
- __pyx_t_5 = 0;
- __Pyx_INCREF(__pyx_kp_u__7);
- __pyx_t_1 += 1;
- __Pyx_GIVEREF(__pyx_kp_u__7);
- PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_kp_u__7);
- __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_3, 3, __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 914, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_Raise(__pyx_builtin_IndexError, __pyx_t_5, 0, 0);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __PYX_ERR(1, 914, __pyx_L1_error)
-
- /* "View.MemoryView":913
- * if index < 0:
- * index += view.shape[dim]
- * if index < 0: # <<<<<<<<<<<<<<
- * raise IndexError, f"Out of bounds on buffer access (axis {dim})"
- *
- */
- }
-
- /* "View.MemoryView":911
- * suboffset = view.suboffsets[dim]
- *
- * if index < 0: # <<<<<<<<<<<<<<
- * index += view.shape[dim]
- * if index < 0:
- */
- }
-
- /* "View.MemoryView":916
- * raise IndexError, f"Out of bounds on buffer access (axis {dim})"
- *
- * if index >= shape: # <<<<<<<<<<<<<<
- * raise IndexError, f"Out of bounds on buffer access (axis {dim})"
- *
- */
- __pyx_t_2 = (__pyx_v_index >= __pyx_v_shape);
- if (unlikely(__pyx_t_2)) {
-
- /* "View.MemoryView":917
- *
- * if index >= shape:
- * raise IndexError, f"Out of bounds on buffer access (axis {dim})" # <<<<<<<<<<<<<<
- *
- * resultp = bufp + index * stride
- */
- __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 917, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- __pyx_t_1 = 0;
- __pyx_t_4 = 127;
- __Pyx_INCREF(__pyx_kp_u_Out_of_bounds_on_buffer_access_a);
- __pyx_t_1 += 37;
- __Pyx_GIVEREF(__pyx_kp_u_Out_of_bounds_on_buffer_access_a);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_kp_u_Out_of_bounds_on_buffer_access_a);
- __pyx_t_3 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_dim, 0, ' ', 'd'); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_1 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3);
- __Pyx_GIVEREF(__pyx_t_3);
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
- __pyx_t_3 = 0;
- __Pyx_INCREF(__pyx_kp_u__7);
- __pyx_t_1 += 1;
- __Pyx_GIVEREF(__pyx_kp_u__7);
- PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_kp_u__7);
- __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_5, 3, __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __Pyx_Raise(__pyx_builtin_IndexError, __pyx_t_3, 0, 0);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __PYX_ERR(1, 917, __pyx_L1_error)
-
- /* "View.MemoryView":916
- * raise IndexError, f"Out of bounds on buffer access (axis {dim})"
- *
- * if index >= shape: # <<<<<<<<<<<<<<
- * raise IndexError, f"Out of bounds on buffer access (axis {dim})"
- *
- */
- }
-
- /* "View.MemoryView":919
- * raise IndexError, f"Out of bounds on buffer access (axis {dim})"
- *
- * resultp = bufp + index * stride # <<<<<<<<<<<<<<
- * if suboffset >= 0:
- * resultp = ( resultp)[0] + suboffset
- */
- __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
-
- /* "View.MemoryView":920
- *
- * resultp = bufp + index * stride
- * if suboffset >= 0: # <<<<<<<<<<<<<<
- * resultp = ( resultp)[0] + suboffset
- *
- */
- __pyx_t_2 = (__pyx_v_suboffset >= 0);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":921
- * resultp = bufp + index * stride
- * if suboffset >= 0:
- * resultp = ( resultp)[0] + suboffset # <<<<<<<<<<<<<<
- *
- * return resultp
- */
- __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
-
- /* "View.MemoryView":920
- *
- * resultp = bufp + index * stride
- * if suboffset >= 0: # <<<<<<<<<<<<<<
- * resultp = ( resultp)[0] + suboffset
- *
- */
- }
-
- /* "View.MemoryView":923
- * resultp = ( resultp)[0] + suboffset
- *
- * return resultp # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_r = __pyx_v_resultp;
- goto __pyx_L0;
-
- /* "View.MemoryView":896
- *
- * @cname('__pyx_pybuffer_index')
- * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
- * Py_ssize_t dim) except NULL:
- * cdef Py_ssize_t shape, stride, suboffset = -1
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":929
- *
- * @cname('__pyx_memslice_transpose')
- * cdef int transpose_memslice(__Pyx_memviewslice *memslice) except -1 nogil: # <<<<<<<<<<<<<<
- * cdef int ndim = memslice.memview.view.ndim
- *
- */
-
-static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
- int __pyx_v_ndim;
- Py_ssize_t *__pyx_v_shape;
- Py_ssize_t *__pyx_v_strides;
- int __pyx_v_i;
- int __pyx_v_j;
- int __pyx_r;
- int __pyx_t_1;
- Py_ssize_t *__pyx_t_2;
- long __pyx_t_3;
- long __pyx_t_4;
- Py_ssize_t __pyx_t_5;
- Py_ssize_t __pyx_t_6;
- int __pyx_t_7;
- int __pyx_t_8;
- int __pyx_t_9;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- #ifdef WITH_THREAD
- PyGILState_STATE __pyx_gilstate_save;
- #endif
-
- /* "View.MemoryView":930
- * @cname('__pyx_memslice_transpose')
- * cdef int transpose_memslice(__Pyx_memviewslice *memslice) except -1 nogil:
- * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
- *
- * cdef Py_ssize_t *shape = memslice.shape
- */
- __pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
- __pyx_v_ndim = __pyx_t_1;
-
- /* "View.MemoryView":932
- * cdef int ndim = memslice.memview.view.ndim
- *
- * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
- * cdef Py_ssize_t *strides = memslice.strides
- *
- */
- __pyx_t_2 = __pyx_v_memslice->shape;
- __pyx_v_shape = __pyx_t_2;
-
- /* "View.MemoryView":933
- *
- * cdef Py_ssize_t *shape = memslice.shape
- * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_t_2 = __pyx_v_memslice->strides;
- __pyx_v_strides = __pyx_t_2;
-
- /* "View.MemoryView":937
- *
- * cdef int i, j
- * for i in range(ndim // 2): # <<<<<<<<<<<<<<
- * j = ndim - 1 - i
- * strides[i], strides[j] = strides[j], strides[i]
- */
- __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
- __pyx_t_4 = __pyx_t_3;
- for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
- __pyx_v_i = __pyx_t_1;
-
- /* "View.MemoryView":938
- * cdef int i, j
- * for i in range(ndim // 2):
- * j = ndim - 1 - i # <<<<<<<<<<<<<<
- * strides[i], strides[j] = strides[j], strides[i]
- * shape[i], shape[j] = shape[j], shape[i]
- */
- __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
-
- /* "View.MemoryView":939
- * for i in range(ndim // 2):
- * j = ndim - 1 - i
- * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
- * shape[i], shape[j] = shape[j], shape[i]
- *
- */
- __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
- __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
- (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
- (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
-
- /* "View.MemoryView":940
- * j = ndim - 1 - i
- * strides[i], strides[j] = strides[j], strides[i]
- * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
- *
- * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
- */
- __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
- __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
- (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
- (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
-
- /* "View.MemoryView":942
- * shape[i], shape[j] = shape[j], shape[i]
- *
- * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
- * _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions")
- *
- */
- __pyx_t_8 = ((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0);
- if (!__pyx_t_8) {
- } else {
- __pyx_t_7 = __pyx_t_8;
- goto __pyx_L6_bool_binop_done;
- }
- __pyx_t_8 = ((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0);
- __pyx_t_7 = __pyx_t_8;
- __pyx_L6_bool_binop_done:;
- if (__pyx_t_7) {
-
- /* "View.MemoryView":943
- *
- * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
- * _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
- *
- * return 0
- */
- __pyx_t_9 = __pyx_memoryview_err(PyExc_ValueError, __pyx_kp_s_Cannot_transpose_memoryview_with); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 943, __pyx_L1_error)
-
- /* "View.MemoryView":942
- * shape[i], shape[j] = shape[j], shape[i]
- *
- * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
- * _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions")
- *
- */
- }
- }
-
- /* "View.MemoryView":945
- * _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions")
- *
- * return 0 # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_r = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":929
- *
- * @cname('__pyx_memslice_transpose')
- * cdef int transpose_memslice(__Pyx_memviewslice *memslice) except -1 nogil: # <<<<<<<<<<<<<<
- * cdef int ndim = memslice.memview.view.ndim
- *
- */
-
- /* function exit code */
- __pyx_L1_error:;
- #ifdef WITH_THREAD
- __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
- #endif
- __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- #ifdef WITH_THREAD
- __Pyx_PyGILState_Release(__pyx_gilstate_save);
- #endif
- __pyx_L0:;
- return __pyx_r;
-}
-
-/* "View.MemoryView":963
- * cdef int (*to_dtype_func)(char *, object) except 0
- *
- * def __dealloc__(self): # <<<<<<<<<<<<<<
- * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1)
- *
- */
-
-/* Python wrapper */
-static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
-static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
- __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
-}
-
-static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__dealloc__", 0);
-
- /* "View.MemoryView":964
- *
- * def __dealloc__(self):
- * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
- *
- * cdef convert_item_to_object(self, char *itemp):
- */
- __PYX_XCLEAR_MEMVIEW((&__pyx_v_self->from_slice), 1);
-
- /* "View.MemoryView":963
- * cdef int (*to_dtype_func)(char *, object) except 0
- *
- * def __dealloc__(self): # <<<<<<<<<<<<<<
- * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1)
- *
- */
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
-}
-
-/* "View.MemoryView":966
- * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1)
- *
- * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
- * if self.to_object_func != NULL:
- * return self.to_object_func(itemp)
- */
-
-static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("convert_item_to_object", 0);
-
- /* "View.MemoryView":967
- *
- * cdef convert_item_to_object(self, char *itemp):
- * if self.to_object_func != NULL: # <<<<<<<<<<<<<<
- * return self.to_object_func(itemp)
- * else:
- */
- __pyx_t_1 = (__pyx_v_self->to_object_func != NULL);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":968
- * cdef convert_item_to_object(self, char *itemp):
- * if self.to_object_func != NULL:
- * return self.to_object_func(itemp) # <<<<<<<<<<<<<<
- * else:
- * return memoryview.convert_item_to_object(self, itemp)
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 968, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":967
- *
- * cdef convert_item_to_object(self, char *itemp):
- * if self.to_object_func != NULL: # <<<<<<<<<<<<<<
- * return self.to_object_func(itemp)
- * else:
- */
- }
-
- /* "View.MemoryView":970
- * return self.to_object_func(itemp)
- * else:
- * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
- *
- * cdef assign_item_from_object(self, char *itemp, object value):
- */
- /*else*/ {
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 970, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
- }
-
- /* "View.MemoryView":966
- * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1)
- *
- * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
- * if self.to_object_func != NULL:
- * return self.to_object_func(itemp)
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":972
- * return memoryview.convert_item_to_object(self, itemp)
- *
- * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
- * if self.to_dtype_func != NULL:
- * self.to_dtype_func(itemp, value)
- */
-
-static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- int __pyx_t_2;
- PyObject *__pyx_t_3 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("assign_item_from_object", 0);
-
- /* "View.MemoryView":973
- *
- * cdef assign_item_from_object(self, char *itemp, object value):
- * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
- * self.to_dtype_func(itemp, value)
- * else:
- */
- __pyx_t_1 = (__pyx_v_self->to_dtype_func != NULL);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":974
- * cdef assign_item_from_object(self, char *itemp, object value):
- * if self.to_dtype_func != NULL:
- * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
- * else:
- * memoryview.assign_item_from_object(self, itemp, value)
- */
- __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 974, __pyx_L1_error)
-
- /* "View.MemoryView":973
- *
- * cdef assign_item_from_object(self, char *itemp, object value):
- * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
- * self.to_dtype_func(itemp, value)
- * else:
- */
- goto __pyx_L3;
- }
-
- /* "View.MemoryView":976
- * self.to_dtype_func(itemp, value)
- * else:
- * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
- *
- * cdef _get_base(self):
- */
- /*else*/ {
- __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 976, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- }
- __pyx_L3:;
-
- /* "View.MemoryView":972
- * return memoryview.convert_item_to_object(self, itemp)
- *
- * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
- * if self.to_dtype_func != NULL:
- * self.to_dtype_func(itemp, value)
- */
-
- /* function exit code */
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":978
- * memoryview.assign_item_from_object(self, itemp, value)
- *
- * cdef _get_base(self): # <<<<<<<<<<<<<<
- * return self.from_object
- *
- */
-
-static PyObject *__pyx_memoryviewslice__get_base(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("_get_base", 0);
-
- /* "View.MemoryView":979
- *
- * cdef _get_base(self):
- * return self.from_object # <<<<<<<<<<<<<<
- *
- *
- */
- __Pyx_XDECREF(__pyx_r);
- __Pyx_INCREF(__pyx_v_self->from_object);
- __pyx_r = __pyx_v_self->from_object;
- goto __pyx_L0;
-
- /* "View.MemoryView":978
- * memoryview.assign_item_from_object(self, itemp, value)
- *
- * cdef _get_base(self): # <<<<<<<<<<<<<<
- * return self.from_object
- *
- */
-
- /* function exit code */
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "(tree fragment)":1
- * def __reduce_cython__(self): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state):
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
- if (unlikely(__pyx_nargs > 0)) {
- __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;}
- if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL;
- __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__reduce_cython__", 0);
-
- /* "(tree fragment)":2
- * def __reduce_cython__(self):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<<
- * def __setstate_cython__(self, __pyx_state):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- */
- __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0);
- __PYX_ERR(1, 2, __pyx_L1_error)
-
- /* "(tree fragment)":1
- * def __reduce_cython__(self): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state):
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "(tree fragment)":3
- * def __reduce_cython__(self):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
- {
- PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0};
- PyObject* values[1] = {0};
- if (__pyx_kwds) {
- Py_ssize_t kw_args;
- switch (__pyx_nargs) {
- case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
- CYTHON_FALLTHROUGH;
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds);
- switch (__pyx_nargs) {
- case 0:
- if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 3, __pyx_L3_error)
- else goto __pyx_L5_argtuple_error;
- }
- if (unlikely(kw_args > 0)) {
- const Py_ssize_t kwd_pos_args = __pyx_nargs;
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 3, __pyx_L3_error)
- }
- } else if (unlikely(__pyx_nargs != 1)) {
- goto __pyx_L5_argtuple_error;
- } else {
- values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
- }
- __pyx_v___pyx_state = values[0];
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
- __pyx_L3_error:;
- __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __Pyx_RefNannyFinishContext();
- return NULL;
- __pyx_L4_argument_unpacking_done:;
- __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), __pyx_v___pyx_state);
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__setstate_cython__", 0);
-
- /* "(tree fragment)":4
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<<
- */
- __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0);
- __PYX_ERR(1, 4, __pyx_L1_error)
-
- /* "(tree fragment)":3
- * def __reduce_cython__(self):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":999
- *
- * @cname('__pyx_memoryview_fromslice')
- * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
- * int ndim,
- * object (*to_object_func)(char *),
- */
-
-static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
- struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
- Py_ssize_t __pyx_v_suboffset;
- PyObject *__pyx_v_length = NULL;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- __Pyx_TypeInfo *__pyx_t_4;
- Py_buffer __pyx_t_5;
- Py_ssize_t *__pyx_t_6;
- Py_ssize_t *__pyx_t_7;
- Py_ssize_t *__pyx_t_8;
- Py_ssize_t __pyx_t_9;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("memoryview_fromslice", 0);
-
- /* "View.MemoryView":1007
- * cdef _memoryviewslice result
- *
- * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
- * return None
- *
- */
- __pyx_t_1 = (((PyObject *)__pyx_v_memviewslice.memview) == Py_None);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":1008
- *
- * if memviewslice.memview == Py_None:
- * return None # <<<<<<<<<<<<<<
- *
- *
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- goto __pyx_L0;
-
- /* "View.MemoryView":1007
- * cdef _memoryviewslice result
- *
- * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
- * return None
- *
- */
- }
-
- /* "View.MemoryView":1013
- *
- *
- * result = _memoryviewslice.__new__(_memoryviewslice, None, 0, dtype_is_object) # <<<<<<<<<<<<<<
- *
- * result.from_slice = memviewslice
- */
- __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF(Py_None);
- __Pyx_GIVEREF(Py_None);
- PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
- __Pyx_INCREF(__pyx_int_0);
- __Pyx_GIVEREF(__pyx_int_0);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
- __Pyx_GIVEREF(__pyx_t_2);
- PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = ((PyObject *)__pyx_tp_new__memoryviewslice(((PyTypeObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
- __Pyx_GOTREF((PyObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
- __pyx_t_2 = 0;
-
- /* "View.MemoryView":1015
- * result = _memoryviewslice.__new__(_memoryviewslice, None, 0, dtype_is_object)
- *
- * result.from_slice = memviewslice # <<<<<<<<<<<<<<
- * __PYX_INC_MEMVIEW(&memviewslice, 1)
- *
- */
- __pyx_v_result->from_slice = __pyx_v_memviewslice;
-
- /* "View.MemoryView":1016
- *
- * result.from_slice = memviewslice
- * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
- *
- * result.from_object = ( memviewslice.memview)._get_base()
- */
- __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
-
- /* "View.MemoryView":1018
- * __PYX_INC_MEMVIEW(&memviewslice, 1)
- *
- * result.from_object = ( memviewslice.memview)._get_base() # <<<<<<<<<<<<<<
- * result.typeinfo = memviewslice.memview.typeinfo
- *
- */
- __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->__pyx_vtab)->_get_base(((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __Pyx_GOTREF(__pyx_v_result->from_object);
- __Pyx_DECREF(__pyx_v_result->from_object);
- __pyx_v_result->from_object = __pyx_t_2;
- __pyx_t_2 = 0;
-
- /* "View.MemoryView":1019
- *
- * result.from_object = ( memviewslice.memview)._get_base()
- * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
- *
- * result.view = memviewslice.memview.view
- */
- __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
- __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
-
- /* "View.MemoryView":1021
- * result.typeinfo = memviewslice.memview.typeinfo
- *
- * result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
- * result.view.buf = memviewslice.data
- * result.view.ndim = ndim
- */
- __pyx_t_5 = __pyx_v_memviewslice.memview->view;
- __pyx_v_result->__pyx_base.view = __pyx_t_5;
-
- /* "View.MemoryView":1022
- *
- * result.view = memviewslice.memview.view
- * result.view.buf = memviewslice.data # <<<<<<<<<<<<<<
- * result.view.ndim = ndim
- * (<__pyx_buffer *> &result.view).obj = Py_None
- */
- __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
-
- /* "View.MemoryView":1023
- * result.view = memviewslice.memview.view
- * result.view.buf = memviewslice.data
- * result.view.ndim = ndim # <<<<<<<<<<<<<<
- * (<__pyx_buffer *> &result.view).obj = Py_None
- * Py_INCREF(Py_None)
- */
- __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
-
- /* "View.MemoryView":1024
- * result.view.buf = memviewslice.data
- * result.view.ndim = ndim
- * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
- * Py_INCREF(Py_None)
- *
- */
- ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
-
- /* "View.MemoryView":1025
- * result.view.ndim = ndim
- * (<__pyx_buffer *> &result.view).obj = Py_None
- * Py_INCREF(Py_None) # <<<<<<<<<<<<<<
- *
- * if (memviewslice.memview).flags & PyBUF_WRITABLE:
- */
- Py_INCREF(Py_None);
-
- /* "View.MemoryView":1027
- * Py_INCREF(Py_None)
- *
- * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
- * result.flags = PyBUF_RECORDS
- * else:
- */
- __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":1028
- *
- * if (memviewslice.memview).flags & PyBUF_WRITABLE:
- * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
- * else:
- * result.flags = PyBUF_RECORDS_RO
- */
- __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
-
- /* "View.MemoryView":1027
- * Py_INCREF(Py_None)
- *
- * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
- * result.flags = PyBUF_RECORDS
- * else:
- */
- goto __pyx_L4;
- }
-
- /* "View.MemoryView":1030
- * result.flags = PyBUF_RECORDS
- * else:
- * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<<
- *
- * result.view.shape = result.from_slice.shape
- */
- /*else*/ {
- __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
- }
- __pyx_L4:;
-
- /* "View.MemoryView":1032
- * result.flags = PyBUF_RECORDS_RO
- *
- * result.view.shape = result.from_slice.shape # <<<<<<<<<<<<<<
- * result.view.strides = result.from_slice.strides
- *
- */
- __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
-
- /* "View.MemoryView":1033
- *
- * result.view.shape = result.from_slice.shape
- * result.view.strides = result.from_slice.strides # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
-
- /* "View.MemoryView":1036
- *
- *
- * result.view.suboffsets = NULL # <<<<<<<<<<<<<<
- * for suboffset in result.from_slice.suboffsets[:ndim]:
- * if suboffset >= 0:
- */
- __pyx_v_result->__pyx_base.view.suboffsets = NULL;
-
- /* "View.MemoryView":1037
- *
- * result.view.suboffsets = NULL
- * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
- * if suboffset >= 0:
- * result.view.suboffsets = result.from_slice.suboffsets
- */
- __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
- for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
- __pyx_t_6 = __pyx_t_8;
- __pyx_v_suboffset = (__pyx_t_6[0]);
-
- /* "View.MemoryView":1038
- * result.view.suboffsets = NULL
- * for suboffset in result.from_slice.suboffsets[:ndim]:
- * if suboffset >= 0: # <<<<<<<<<<<<<<
- * result.view.suboffsets = result.from_slice.suboffsets
- * break
- */
- __pyx_t_1 = (__pyx_v_suboffset >= 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":1039
- * for suboffset in result.from_slice.suboffsets[:ndim]:
- * if suboffset >= 0:
- * result.view.suboffsets = result.from_slice.suboffsets # <<<<<<<<<<<<<<
- * break
- *
- */
- __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
-
- /* "View.MemoryView":1040
- * if suboffset >= 0:
- * result.view.suboffsets = result.from_slice.suboffsets
- * break # <<<<<<<<<<<<<<
- *
- * result.view.len = result.view.itemsize
- */
- goto __pyx_L6_break;
-
- /* "View.MemoryView":1038
- * result.view.suboffsets = NULL
- * for suboffset in result.from_slice.suboffsets[:ndim]:
- * if suboffset >= 0: # <<<<<<<<<<<<<<
- * result.view.suboffsets =